forked from SW/traefik
Compare commits
16 Commits
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
bf650d7623 | ||
|
|
0528af1e66 | ||
|
|
c309b3b006 | ||
|
|
e9d3c9a1cb | ||
|
|
8cfb429dec | ||
|
|
e1418fa622 | ||
|
|
d3afb20890 | ||
|
|
df6aab811d | ||
|
|
8d76f52b85 | ||
|
|
a0d8ee5a02 | ||
|
|
bc8d36a68e | ||
|
|
e73fd70add | ||
|
|
c47849d43a | ||
|
|
2921e1059a | ||
|
|
fe8e539422 | ||
|
|
45f0591cf6 |
@@ -1,3 +1,5 @@
|
||||
dist/
|
||||
!dist/traefik
|
||||
site/
|
||||
vendor/
|
||||
/traefik
|
||||
|
||||
95
.github/workflows/build.yaml
vendored
Normal file
95
.github/workflows/build.yaml
vendored
Normal file
@@ -0,0 +1,95 @@
|
||||
name: Build Binaries
|
||||
|
||||
on:
|
||||
pull_request:
|
||||
branches:
|
||||
- '*'
|
||||
|
||||
env:
|
||||
GO_VERSION: 1.16
|
||||
CGO_ENABLED: 0
|
||||
PRE_TARGET: ""
|
||||
|
||||
jobs:
|
||||
|
||||
build-webui:
|
||||
runs-on: ubuntu-20.04
|
||||
|
||||
steps:
|
||||
- name: Check out code
|
||||
uses: actions/checkout@v2
|
||||
with:
|
||||
fetch-depth: 0
|
||||
|
||||
- name: Build webui
|
||||
env:
|
||||
DOCKER_RUN_TRAEFIK: ""
|
||||
run: |
|
||||
make generate-webui
|
||||
tar czvf webui.tar.gz ./static/
|
||||
|
||||
- name: Artifact webui
|
||||
uses: actions/upload-artifact@v2
|
||||
with:
|
||||
name: webui.tar.gz
|
||||
path: webui.tar.gz
|
||||
|
||||
build:
|
||||
runs-on: ubuntu-20.04
|
||||
needs:
|
||||
- build-webui
|
||||
defaults:
|
||||
run:
|
||||
working-directory: ${{ github.workspace }}/go/src/github.com/traefik/traefik
|
||||
|
||||
steps:
|
||||
- name: Set up Go ${{ env.GO_VERSION }}
|
||||
uses: actions/setup-go@v2
|
||||
with:
|
||||
go-version: ${{ env.GO_VERSION }}
|
||||
|
||||
- name: Check out code
|
||||
uses: actions/checkout@v2
|
||||
with:
|
||||
path: go/src/github.com/traefik/traefik
|
||||
fetch-depth: 0
|
||||
|
||||
- name: Cache Go modules
|
||||
uses: actions/cache@v2
|
||||
with:
|
||||
path: |
|
||||
~/go/pkg/mod
|
||||
~/.cache/go-build
|
||||
~/Library/Caches/go-build
|
||||
'%LocalAppData%\go-build'
|
||||
key: ${{ runner.os }}-build-go-${{ hashFiles('**/go.sum') }}
|
||||
restore-keys: ${{ runner.os }}-build-go-
|
||||
|
||||
- name: Install gobindata
|
||||
run: |
|
||||
curl -fsSL -o $(go env GOPATH)/bin/go-bindata https://github.com/containous/go-bindata/releases/download/v1.0.0/go-bindata
|
||||
chmod +x $(go env GOPATH)/bin/go-bindata
|
||||
|
||||
- name: Artifact webui
|
||||
uses: actions/download-artifact@v2
|
||||
with:
|
||||
name: webui.tar.gz
|
||||
path: ${{ github.workspace }}/go/src/github.com/traefik/traefik
|
||||
|
||||
- name: Untar webui
|
||||
run: tar xvf webui.tar.gz
|
||||
|
||||
- name: Build for darwin
|
||||
env:
|
||||
DOCKER_RUN_TRAEFIK: ""
|
||||
run: GOOS=darwin GOARCH=amd64 make binary
|
||||
|
||||
- name: Build for linux
|
||||
env:
|
||||
DOCKER_RUN_TRAEFIK: ""
|
||||
run: GOOS=linux GOARCH=amd64 make binary
|
||||
|
||||
- name: Build for windows
|
||||
env:
|
||||
DOCKER_RUN_TRAEFIK: ""
|
||||
run: GOOS=windows GOARCH=amd64 make binary
|
||||
21
.github/workflows/check_doc.yaml
vendored
Normal file
21
.github/workflows/check_doc.yaml
vendored
Normal file
@@ -0,0 +1,21 @@
|
||||
name: Check Documentation
|
||||
|
||||
on:
|
||||
pull_request:
|
||||
branches:
|
||||
- '*'
|
||||
|
||||
jobs:
|
||||
|
||||
docs:
|
||||
name: Check, verify and build documentation
|
||||
runs-on: ubuntu-20.04
|
||||
|
||||
steps:
|
||||
- name: Check out code
|
||||
uses: actions/checkout@v2
|
||||
with:
|
||||
fetch-depth: 0
|
||||
|
||||
- name: Check documentation
|
||||
run: make docs-verify
|
||||
52
.github/workflows/documentation.yaml
vendored
Normal file
52
.github/workflows/documentation.yaml
vendored
Normal file
@@ -0,0 +1,52 @@
|
||||
name: Build and Publish Documentation
|
||||
|
||||
on:
|
||||
push:
|
||||
branches:
|
||||
- master
|
||||
- v*
|
||||
|
||||
env:
|
||||
STRUCTOR_VERSION: v1.11.2
|
||||
MIXTUS_VERSION: v0.4.1
|
||||
|
||||
jobs:
|
||||
|
||||
docs:
|
||||
name: Doc Process
|
||||
runs-on: ubuntu-20.04
|
||||
if: github.repository == 'traefik/traefik'
|
||||
|
||||
steps:
|
||||
- name: Check out code
|
||||
uses: actions/checkout@v2
|
||||
with:
|
||||
fetch-depth: 0
|
||||
|
||||
- name: Login to DockerHub
|
||||
uses: docker/login-action@v1
|
||||
with:
|
||||
username: ${{ secrets.DOCKERHUB_USERNAME }}
|
||||
password: ${{ secrets.DOCKERHUB_TOKEN }}
|
||||
|
||||
- name: Install Structor ${{ env.STRUCTOR_VERSION }}
|
||||
run: curl -sSfL https://raw.githubusercontent.com/traefik/structor/master/godownloader.sh | sh -s -- -b $HOME/bin ${STRUCTOR_VERSION}
|
||||
|
||||
- name: Install Seo-doc
|
||||
run: curl -sSfL https://raw.githubusercontent.com/traefik/seo-doc/master/godownloader.sh | sh -s -- -b "${HOME}/bin"
|
||||
|
||||
- name: Install Mixtus ${{ env.MIXTUS_VERSION }}
|
||||
run: curl -sSfL https://raw.githubusercontent.com/traefik/mixtus/master/godownloader.sh | sh -s -- -b $HOME/bin ${MIXTUS_VERSION}
|
||||
|
||||
- name: Build documentation
|
||||
run: $HOME/bin/structor -o traefik -r traefik --dockerfile-url="https://raw.githubusercontent.com/traefik/traefik/v1.7/docs.Dockerfile" --menu.js-url="https://raw.githubusercontent.com/traefik/structor/master/traefik-menu.js.gotmpl" --rqts-url="https://raw.githubusercontent.com/traefik/structor/master/requirements-override.txt" --force-edit-url --exp-branch=master --debug
|
||||
env:
|
||||
STRUCTOR_LATEST_TAG: ${{ secrets.STRUCTOR_LATEST_TAG }}
|
||||
|
||||
- name: Apply seo
|
||||
run: $HOME/bin/seo -path=./site
|
||||
|
||||
- name: Publish documentation
|
||||
run: $HOME/bin/mixtus --dst-doc-path="./traefik" --dst-owner=traefik --dst-repo-name=doc --git-user-email="30906710+traefiker@users.noreply.github.com" --git-user-name=traefiker --src-doc-path="./site" --src-owner=traefik --src-repo-name=traefik
|
||||
env:
|
||||
GITHUB_TOKEN: ${{ secrets.GH_TOKEN_REPO }}
|
||||
37
.github/workflows/experimental.yaml
vendored
Normal file
37
.github/workflows/experimental.yaml
vendored
Normal file
@@ -0,0 +1,37 @@
|
||||
name: Build experimental image on branch
|
||||
|
||||
on:
|
||||
push:
|
||||
branches:
|
||||
- master
|
||||
- v*
|
||||
|
||||
jobs:
|
||||
|
||||
experimental:
|
||||
if: github.repository == 'traefik/traefik'
|
||||
name: Build experimental image on branch
|
||||
runs-on: ubuntu-20.04
|
||||
|
||||
steps:
|
||||
|
||||
# https://github.com/marketplace/actions/checkout
|
||||
- name: Check out code
|
||||
uses: actions/checkout@v2
|
||||
with:
|
||||
fetch-depth: 0
|
||||
|
||||
- name: Branch name
|
||||
run: echo ${GITHUB_REF##*/}
|
||||
|
||||
- name: Build docker experimental image
|
||||
run: docker build -t traefik/traefik:experimental-${GITHUB_REF##*/} -f exp.Dockerfile .
|
||||
|
||||
- name: Login to Docker Hub
|
||||
uses: docker/login-action@v1
|
||||
with:
|
||||
username: ${{ secrets.DOCKERHUB_USERNAME }}
|
||||
password: ${{ secrets.DOCKERHUB_TOKEN }}
|
||||
|
||||
- name: Push to Docker Hub
|
||||
run: docker push traefik/traefik:experimental-${GITHUB_REF##*/}
|
||||
53
.github/workflows/test-unit.yaml
vendored
Normal file
53
.github/workflows/test-unit.yaml
vendored
Normal file
@@ -0,0 +1,53 @@
|
||||
name: Test Unit
|
||||
|
||||
on:
|
||||
pull_request:
|
||||
branches:
|
||||
- '*'
|
||||
|
||||
env:
|
||||
GO_VERSION: 1.16
|
||||
PRE_TARGET: ""
|
||||
|
||||
jobs:
|
||||
|
||||
test-unit:
|
||||
runs-on: ubuntu-20.04
|
||||
|
||||
defaults:
|
||||
run:
|
||||
working-directory: ${{ github.workspace }}/go/src/github.com/traefik/traefik
|
||||
|
||||
steps:
|
||||
- name: Set up Go ${{ env.GO_VERSION }}
|
||||
uses: actions/setup-go@v2
|
||||
with:
|
||||
go-version: ${{ env.GO_VERSION }}
|
||||
|
||||
- name: Check out code
|
||||
uses: actions/checkout@v2
|
||||
with:
|
||||
path: go/src/github.com/traefik/traefik
|
||||
fetch-depth: 0
|
||||
|
||||
- name: Cache Go modules
|
||||
uses: actions/cache@v2
|
||||
with:
|
||||
path: |
|
||||
~/go/pkg/mod
|
||||
~/.cache/go-build
|
||||
key: ${{ runner.os }}-test-unit-go-${{ hashFiles('**/go.sum') }}
|
||||
restore-keys: ${{ runner.os }}-test-unit-go-
|
||||
|
||||
- name: Install gobindata
|
||||
run: |
|
||||
curl -fsSL -o $(go env GOPATH)/bin/go-bindata https://github.com/containous/go-bindata/releases/download/v1.0.0/go-bindata
|
||||
chmod +x $(go env GOPATH)/bin/go-bindata
|
||||
|
||||
- name: Avoid generating webui
|
||||
run: mkdir -p webui/static && touch webui/static/index.html
|
||||
|
||||
- name: Tests
|
||||
env:
|
||||
DOCKER_RUN_TRAEFIK: ""
|
||||
run: make test-unit
|
||||
102
.github/workflows/validate.yaml
vendored
Normal file
102
.github/workflows/validate.yaml
vendored
Normal file
@@ -0,0 +1,102 @@
|
||||
name: Validate
|
||||
|
||||
on:
|
||||
pull_request:
|
||||
branches:
|
||||
- '*'
|
||||
|
||||
env:
|
||||
GO_VERSION: 1.16
|
||||
MISSSPELL_VERSION: v0.3.4
|
||||
PRE_TARGET: ""
|
||||
|
||||
jobs:
|
||||
|
||||
validate:
|
||||
runs-on: ubuntu-20.04
|
||||
|
||||
defaults:
|
||||
run:
|
||||
working-directory: ${{ github.workspace }}/go/src/github.com/traefik/traefik
|
||||
|
||||
steps:
|
||||
- name: Set up Go ${{ env.GO_VERSION }}
|
||||
uses: actions/setup-go@v2
|
||||
with:
|
||||
go-version: ${{ env.GO_VERSION }}
|
||||
|
||||
- name: Check out code
|
||||
uses: actions/checkout@v2
|
||||
with:
|
||||
path: go/src/github.com/traefik/traefik
|
||||
fetch-depth: 0
|
||||
|
||||
- name: Cache Go modules
|
||||
uses: actions/cache@v2
|
||||
with:
|
||||
path: |
|
||||
~/go/pkg/mod
|
||||
~/.cache/go-build
|
||||
key: ${{ runner.os }}-validate-go-${{ hashFiles('**/go.sum') }}
|
||||
restore-keys: ${{ runner.os }}-validate-go-
|
||||
|
||||
- name: Install golint
|
||||
run: go install golang.org/x/lint/golint@latest
|
||||
|
||||
- name: Install missspell ${{ env.MISSSPELL_VERSION }}
|
||||
run: curl -sfL https://raw.githubusercontent.com/client9/misspell/master/install-misspell.sh | sh -s -- -b $(go env GOPATH)/bin ${MISSSPELL_VERSION}
|
||||
|
||||
- name: Install gobindata
|
||||
run: |
|
||||
curl -fsSL -o $(go env GOPATH)/bin/go-bindata https://github.com/containous/go-bindata/releases/download/v1.0.0/go-bindata
|
||||
chmod +x $(go env GOPATH)/bin/go-bindata
|
||||
|
||||
- name: Avoid generating webui
|
||||
run: mkdir -p webui/static && touch webui/static/index.html
|
||||
|
||||
- name: Validate
|
||||
env:
|
||||
DOCKER_RUN_TRAEFIK: ""
|
||||
run: make validate
|
||||
|
||||
validate-generate:
|
||||
runs-on: ubuntu-20.04
|
||||
|
||||
defaults:
|
||||
run:
|
||||
working-directory: ${{ github.workspace }}/go/src/github.com/traefik/traefik
|
||||
|
||||
steps:
|
||||
- name: Set up Go ${{ env.GO_VERSION }}
|
||||
uses: actions/setup-go@v2
|
||||
with:
|
||||
go-version: ${{ env.GO_VERSION }}
|
||||
|
||||
- name: Check out code
|
||||
uses: actions/checkout@v2
|
||||
with:
|
||||
path: go/src/github.com/traefik/traefik
|
||||
fetch-depth: 0
|
||||
|
||||
- name: Cache Go modules
|
||||
uses: actions/cache@v2
|
||||
with:
|
||||
path: |
|
||||
~/go/pkg/mod
|
||||
~/.cache/go-build
|
||||
key: ${{ runner.os }}-validate-generate-go-${{ hashFiles('**/go.sum') }}
|
||||
restore-keys: ${{ runner.os }}-validate-generate-go-
|
||||
|
||||
- name: Install gobindata
|
||||
run: |
|
||||
curl -fsSL -o $(go env GOPATH)/bin/go-bindata https://github.com/containous/go-bindata/releases/download/v1.0.0/go-bindata
|
||||
chmod +x $(go env GOPATH)/bin/go-bindata
|
||||
|
||||
- name: go generate
|
||||
run: |
|
||||
go generate
|
||||
git diff --exit-code
|
||||
- name: go mod tidy
|
||||
run: |
|
||||
go mod tidy
|
||||
git diff --exit-code
|
||||
4
.gitignore
vendored
4
.gitignore
vendored
@@ -10,8 +10,12 @@
|
||||
/site/
|
||||
/docs/site/
|
||||
/traefik.toml
|
||||
/traefik.yml
|
||||
/dist
|
||||
/traefik
|
||||
*.log
|
||||
*.exe
|
||||
cover.out
|
||||
vendor/
|
||||
plugins-storage/
|
||||
traefik_changelog.md
|
||||
|
||||
@@ -1,42 +0,0 @@
|
||||
{
|
||||
"Vendor": true,
|
||||
"Sort": [
|
||||
"path",
|
||||
"line",
|
||||
"column",
|
||||
"severity",
|
||||
"linter"
|
||||
],
|
||||
"Test": true,
|
||||
"Cyclo": 15,
|
||||
"Enable": [
|
||||
"gotypex",
|
||||
"nakedret",
|
||||
"vet",
|
||||
"goimports",
|
||||
"golint",
|
||||
"ineffassign",
|
||||
"gotype",
|
||||
"misspell",
|
||||
"structcheck",
|
||||
"gosimple",
|
||||
"unconvert",
|
||||
"varcheck",
|
||||
"errcheck",
|
||||
"unused",
|
||||
"deadcode",
|
||||
"staticcheck"
|
||||
],
|
||||
"Disable": [
|
||||
"gas",
|
||||
"maligned",
|
||||
"interfacer",
|
||||
"goconst",
|
||||
"gocyclo",
|
||||
"vetshadow"
|
||||
],
|
||||
"Exclude": [
|
||||
"autogen/.*"
|
||||
],
|
||||
"Deadline": "5m"
|
||||
}
|
||||
106
.semaphore/semaphore.yml
Normal file
106
.semaphore/semaphore.yml
Normal file
@@ -0,0 +1,106 @@
|
||||
version: v1.0
|
||||
name: Traefik
|
||||
agent:
|
||||
machine:
|
||||
type: e1-standard-4
|
||||
os_image: ubuntu1804
|
||||
|
||||
fail_fast:
|
||||
stop:
|
||||
when: "branch != 'master'"
|
||||
|
||||
auto_cancel:
|
||||
queued:
|
||||
when: "branch != 'master'"
|
||||
running:
|
||||
when: "branch != 'master'"
|
||||
|
||||
global_job_config:
|
||||
prologue:
|
||||
commands:
|
||||
- curl -sSfL https://raw.githubusercontent.com/ldez/semgo/master/godownloader.sh | sudo sh -s -- -b "/usr/local/bin"
|
||||
- sudo semgo go1.16
|
||||
- export "GOPATH=$(go env GOPATH)"
|
||||
- export "GOROOT=$(go env GOROOT)"
|
||||
- export "SEMAPHORE_GIT_DIR=${GOPATH}/src/github.com/traefik/${SEMAPHORE_PROJECT_NAME}"
|
||||
- export "PATH=${GOPATH}/bin:${GOROOT}/bin:${PATH}"
|
||||
- mkdir -vp "${SEMAPHORE_GIT_DIR}" "${GOPATH}/bin"
|
||||
- curl -fsSL -o ${GOPATH}/bin/go-bindata https://github.com/containous/go-bindata/releases/download/v1.0.0/go-bindata
|
||||
- chmod +x ${GOPATH}/bin/go-bindata
|
||||
- export GOPROXY=https://proxy.golang.org,direct
|
||||
- checkout
|
||||
- cache restore traefik-$(checksum go.sum)
|
||||
|
||||
blocks:
|
||||
- name: Test Integration Container
|
||||
dependencies: []
|
||||
run:
|
||||
when: "branch =~ '.*' OR pull_request =~'.*'"
|
||||
task:
|
||||
env_vars:
|
||||
- name: DOCKER_RUN_TRAEFIK
|
||||
value: ""
|
||||
- name: TEST_CONTAINER
|
||||
value: "1"
|
||||
jobs:
|
||||
- name: Test Integration Container
|
||||
commands:
|
||||
- make pull-images
|
||||
- mkdir -p webui/static && touch webui/static/index.html # Avoid generating webui
|
||||
- make binary-with-no-ui
|
||||
- sudo CONTAINER=DOCKER DOCKER_RUN_TRAEFIK="" TEST_CONTAINER=1 make test-integration-container
|
||||
- df -h
|
||||
epilogue:
|
||||
always:
|
||||
commands:
|
||||
- cache store traefik-$(checksum go.sum) $HOME/go/pkg/mod
|
||||
|
||||
- name: Test Integration Host
|
||||
dependencies: []
|
||||
run:
|
||||
when: "branch =~ '.*' OR pull_request =~'.*'"
|
||||
task:
|
||||
env_vars:
|
||||
- name: DOCKER_RUN_TRAEFIK
|
||||
value: ""
|
||||
jobs:
|
||||
- name: Test Integration Host
|
||||
commands:
|
||||
- mkdir -p webui/static && touch webui/static/index.html # Avoid generating webui
|
||||
- make binary-with-no-ui
|
||||
- sudo DOCKER_RUN_TRAEFIK="" TEST_HOST=1 make test-integration-host
|
||||
epilogue:
|
||||
always:
|
||||
commands:
|
||||
- cache store traefik-$(checksum go.sum) $HOME/go/pkg/mod
|
||||
|
||||
- name: Release
|
||||
dependencies: []
|
||||
run:
|
||||
when: "tag =~ '.*'"
|
||||
task:
|
||||
agent:
|
||||
machine:
|
||||
type: e1-standard-8
|
||||
os_image: ubuntu1804
|
||||
secrets:
|
||||
- name: traefik
|
||||
env_vars:
|
||||
- name: GH_VERSION
|
||||
value: 1.12.1
|
||||
- name: CODENAME
|
||||
value: "maroilles"
|
||||
- name: DOCKER_RUN_TRAEFIK
|
||||
value: ""
|
||||
prologue:
|
||||
commands:
|
||||
- export VERSION=${SEMAPHORE_GIT_TAG_NAME}
|
||||
- curl -sSL -o /tmp/gh_${GH_VERSION}_linux_amd64.tar.gz https://github.com/cli/cli/releases/download/v${GH_VERSION}/gh_${GH_VERSION}_linux_amd64.tar.gz
|
||||
- tar -zxvf /tmp/gh_${GH_VERSION}_linux_amd64.tar.gz -C /tmp
|
||||
- sudo mv /tmp/gh_${GH_VERSION}_linux_amd64/bin/gh /usr/local/bin/gh
|
||||
jobs:
|
||||
- name: Release
|
||||
commands:
|
||||
- make crossbinary-parallel
|
||||
- gh release create ${SEMAPHORE_GIT_TAG_NAME} ./dist/traefik* --repo traefik/traefik --title ${SEMAPHORE_GIT_TAG_NAME} --notes ${SEMAPHORE_GIT_TAG_NAME}
|
||||
- ./script/deploy.sh
|
||||
@@ -1,4 +0,0 @@
|
||||
#!/usr/bin/env bash
|
||||
set -e
|
||||
|
||||
sudo rm -rf static
|
||||
@@ -1,20 +0,0 @@
|
||||
#!/usr/bin/env bash
|
||||
|
||||
set -e
|
||||
|
||||
curl -O https://dl.google.com/go/go1.14.linux-amd64.tar.gz
|
||||
|
||||
tar -xvf go1.14.linux-amd64.tar.gz
|
||||
rm -rf go1.14.linux-amd64.tar.gz
|
||||
|
||||
sudo mkdir -p /usr/local/golang/1.14/go
|
||||
sudo mv go /usr/local/golang/1.14/
|
||||
|
||||
sudo rm /usr/local/bin/go
|
||||
sudo chmod +x /usr/local/golang/1.14/go/bin/go
|
||||
sudo ln -s /usr/local/golang/1.14/go/bin/go /usr/local/bin/go
|
||||
|
||||
export GOROOT="/usr/local/golang/1.14/go"
|
||||
export GOTOOLDIR="/usr/local/golang/1.14/go/pkg/tool/linux_amd64"
|
||||
|
||||
go version
|
||||
@@ -1,6 +0,0 @@
|
||||
#!/usr/bin/env bash
|
||||
set -e
|
||||
|
||||
if [ -n "$SHOULD_TEST" ]; then ci_retry make pull-images; fi
|
||||
|
||||
if [ -n "$SHOULD_TEST" ]; then ci_retry make test-integration; fi
|
||||
@@ -1,8 +0,0 @@
|
||||
#!/usr/bin/env bash
|
||||
set -e
|
||||
|
||||
ci_retry make validate
|
||||
|
||||
if [ -n "$SHOULD_TEST" ]; then ci_retry make test-unit; fi
|
||||
|
||||
if [ -n "$SHOULD_TEST" ]; then make -j${N_MAKE_JOBS} crossbinary-default-parallel; fi
|
||||
@@ -1,16 +0,0 @@
|
||||
#!/usr/bin/env bash
|
||||
set -e
|
||||
|
||||
export DOCKER_VERSION=18.09.7
|
||||
|
||||
source .semaphoreci/vars
|
||||
|
||||
if [ -z "${PULL_REQUEST_NUMBER}" ]; then SHOULD_TEST="-*-"; else TEMP_STORAGE=$(curl --silent https://patch-diff.githubusercontent.com/raw/traefik/traefik/pull/${PULL_REQUEST_NUMBER}.diff | patch --dry-run -p1 -R); fi
|
||||
|
||||
if [ -n "$TEMP_STORAGE" ]; then SHOULD_TEST=$(echo "$TEMP_STORAGE" | grep -Ev '(.md|.yaml|.yml)' || :); fi
|
||||
|
||||
if [ -n "$SHOULD_TEST" ]; then sudo -E apt-get -yq update; fi
|
||||
|
||||
if [ -n "$SHOULD_TEST" ]; then sudo -E apt-get -yq --no-install-suggests --no-install-recommends --force-yes install docker-ce=${DOCKER_VERSION}*; fi
|
||||
|
||||
if [ -n "$SHOULD_TEST" ]; then docker version; fi
|
||||
@@ -1,37 +0,0 @@
|
||||
#!/usr/bin/env bash
|
||||
set -e
|
||||
|
||||
export REPO='traefik/traefik'
|
||||
|
||||
if VERSION=$(git describe --exact-match --abbrev=0 --tags);
|
||||
then
|
||||
export VERSION
|
||||
else
|
||||
export VERSION=''
|
||||
fi
|
||||
|
||||
export CODENAME=maroilles
|
||||
|
||||
export N_MAKE_JOBS=2
|
||||
|
||||
|
||||
function ci_retry {
|
||||
|
||||
local NRETRY=3
|
||||
local NSLEEP=5
|
||||
local n=0
|
||||
|
||||
until [ $n -ge $NRETRY ]
|
||||
do
|
||||
"$@" && break
|
||||
n=$[$n+1]
|
||||
echo "$@ failed, attempt ${n}/${NRETRY}"
|
||||
sleep $NSLEEP
|
||||
done
|
||||
|
||||
[ $n -lt $NRETRY ]
|
||||
|
||||
}
|
||||
|
||||
export -f ci_retry
|
||||
|
||||
58
.travis.yml
58
.travis.yml
@@ -1,58 +0,0 @@
|
||||
sudo: required
|
||||
dist: trusty
|
||||
|
||||
git:
|
||||
depth: false
|
||||
|
||||
services:
|
||||
- docker
|
||||
|
||||
env:
|
||||
global:
|
||||
- REPO: $TRAVIS_REPO_SLUG
|
||||
- VERSION: $TRAVIS_TAG
|
||||
- CODENAME: maroilles
|
||||
- N_MAKE_JOBS: 2
|
||||
- DOCS_VERIFY_SKIP: true
|
||||
|
||||
script:
|
||||
- echo "Skipping tests... (Tests are executed on SemaphoreCI)"
|
||||
- if [ "$TRAVIS_PULL_REQUEST" != "false" ]; then make docs-verify; fi
|
||||
|
||||
before_deploy:
|
||||
- >
|
||||
if ! [ "$BEFORE_DEPLOY_RUN" ]; then
|
||||
export BEFORE_DEPLOY_RUN=1;
|
||||
sudo -E apt-get -yq update;
|
||||
sudo -E apt-get -yq --no-install-suggests --no-install-recommends --force-yes install docker-ce=${DOCKER_VERSION}*;
|
||||
docker version;
|
||||
make image;
|
||||
if [ "$TRAVIS_TAG" ]; then
|
||||
make -j${N_MAKE_JOBS} crossbinary-parallel;
|
||||
tar cfz dist/traefik-${VERSION}.src.tar.gz --exclude-vcs --exclude dist .;
|
||||
fi;
|
||||
curl -sfL https://raw.githubusercontent.com/traefik/structor/master/godownloader.sh | bash -s -- -b "${GOPATH}/bin" ${STRUCTOR_VERSION}
|
||||
curl -sSfL https://raw.githubusercontent.com/traefik/mixtus/master/godownloader.sh | sh -s -- -b "${GOPATH}/bin" ${MIXTUS_VERSION}
|
||||
structor -o traefik -r traefik --dockerfile-url="https://raw.githubusercontent.com/traefik/traefik/v1.7/docs.Dockerfile" --menu.js-url="https://raw.githubusercontent.com/traefik/structor/master/traefik-menu.js.gotmpl" --rqts-url="https://raw.githubusercontent.com/traefik/structor/master/requirements-override.txt" --exp-branch=master --force-edit-url --debug;
|
||||
fi
|
||||
deploy:
|
||||
- provider: releases
|
||||
api_key: ${GITHUB_TOKEN}
|
||||
file: dist/traefik*
|
||||
skip_cleanup: true
|
||||
file_glob: true
|
||||
on:
|
||||
repo: traefik/traefik
|
||||
tags: true
|
||||
- provider: script
|
||||
script: sh script/deploy.sh
|
||||
skip_cleanup: true
|
||||
on:
|
||||
repo: traefik/traefik
|
||||
tags: true
|
||||
- provider: script
|
||||
script: mixtus --dst-doc-path="./traefik" --dst-owner=traefik --dst-repo-name=doc --git-user-email="30906710+traefiker@users.noreply.github.com" --git-user-name=traefiker --src-doc-path="./site" --src-owner=containous --src-repo-name=traefik
|
||||
skip_cleanup: true
|
||||
on:
|
||||
repo: traefik/traefik
|
||||
all_branches: true
|
||||
45
CHANGELOG.md
45
CHANGELOG.md
@@ -1,7 +1,42 @@
|
||||
# Change Log
|
||||
|
||||
## [v1.7.27](https://github.com/traefik/traefik/tree/v1.7.27) (2021-01-13)
|
||||
[All Commits](https://github.com/traefik/traefik/compare/v1.7.26...v1.7.27)
|
||||
## [v1.7.34](https://github.com/traefik/traefik/tree/v1.7.34) (2021-12-10)
|
||||
[All Commits](https://github.com/traefik/traefik/compare/v1.7.33...v1.7.34)
|
||||
|
||||
**Bug fixes:**
|
||||
- **[k8s]** require that secret is a valid PEM ([#8624](https://github.com/traefik/traefik/pull/8624) by [jr0d](https://github.com/jr0d))
|
||||
|
||||
## [v1.7.33](https://github.com/traefik/traefik/tree/v1.7.33) (2021-10-07)
|
||||
[All Commits](https://github.com/traefik/traefik/compare/v1.7.32...v1.7.33)
|
||||
|
||||
**Bug fixes:**
|
||||
- **[acme]** acme: fix non-cluster mode ([#8501](https://github.com/traefik/traefik/pull/8501) by [ldez](https://github.com/ldez))
|
||||
|
||||
## [v1.7.32](https://github.com/traefik/traefik/tree/v1.7.32) (2021-10-06)
|
||||
[All Commits](https://github.com/traefik/traefik/compare/v1.7.31...v1.7.32)
|
||||
|
||||
**Bug fixes:**
|
||||
- **[acme]** acme: add missing preferred chain ([#8495](https://github.com/traefik/traefik/pull/8495) by [ldez](https://github.com/ldez))
|
||||
|
||||
## [v1.7.31](https://github.com/traefik/traefik/tree/v1.7.31) (2021-10-04)
|
||||
[All Commits](https://github.com/traefik/traefik/compare/v1.7.30...v1.7.31)
|
||||
|
||||
**Bug fixes:**
|
||||
- **[acme,k8s]** acme: add support of preferredchain in Traefik v1 ([#8482](https://github.com/traefik/traefik/pull/8482) by [ldez](https://github.com/ldez))
|
||||
|
||||
## [v1.7.30](https://github.com/traefik/traefik/tree/v1.7.30) (2021-04-07)
|
||||
[All Commits](https://github.com/traefik/traefik/compare/v1.7.29...v1.7.30)
|
||||
|
||||
**Bug fixes:**
|
||||
- **[metrics]** Prometheus: fixing prom Handler to use the custom registry ([#8040](https://github.com/traefik/traefik/pull/8040) by [tomMoulard](https://github.com/tomMoulard))
|
||||
|
||||
## [v1.7.29](https://github.com/traefik/traefik/tree/v1.7.29) (2021-03-22)
|
||||
[All Commits](https://github.com/traefik/traefik/compare/v1.7.28...v1.7.29)
|
||||
|
||||
- Update to go1.16 ([#7967](https://github.com/traefik/traefik/pull/7967) by [ldez](https://github.com/ldez))
|
||||
|
||||
## [v1.7.28](https://github.com/traefik/traefik/tree/v1.7.28) (2021-01-13)
|
||||
[All Commits](https://github.com/traefik/traefik/compare/v1.7.26...v1.7.28)
|
||||
|
||||
**Bug fixes:**
|
||||
- **[middleware,tracing]** Do not SetError on tracing event when whitelist allowed request ([#7174](https://github.com/traefik/traefik/pull/7174) by [LordGaav](https://github.com/LordGaav))
|
||||
@@ -9,9 +44,15 @@
|
||||
|
||||
**Documentation:**
|
||||
- **[acme]** docs: fix language "if challenge is not known" ([#7718](https://github.com/traefik/traefik/pull/7718) by [amosshapira](https://github.com/amosshapira))
|
||||
- Prepare release v1.7.27 ([#7755](https://github.com/traefik/traefik/pull/7755) by [rtribotte](https://github.com/rtribotte))
|
||||
- Update copyright year for 2021 ([#7753](https://github.com/traefik/traefik/pull/7753) by [rtribotte](https://github.com/rtribotte))
|
||||
- Fix typo "CA's" instead of "CA:s" ([#7315](https://github.com/traefik/traefik/pull/7315) by [amosshapira](https://github.com/amosshapira))
|
||||
|
||||
## [v1.7.27](https://github.com/traefik/traefik/tree/v1.7.27) (2021-01-13)
|
||||
[All Commits](https://github.com/traefik/traefik/compare/v1.7.26...v1.7.27)
|
||||
|
||||
Release canceled due to a bad tag.
|
||||
|
||||
## [v1.7.26](https://github.com/traefik/traefik/tree/v1.7.26) (2020-07-28)
|
||||
[All Commits](https://github.com/traefik/traefik/compare/v1.7.25...v1.7.26)
|
||||
|
||||
|
||||
2668
Gopkg.lock
generated
2668
Gopkg.lock
generated
File diff suppressed because it is too large
Load Diff
278
Gopkg.toml
278
Gopkg.toml
@@ -1,278 +0,0 @@
|
||||
# Gopkg.toml example
|
||||
#
|
||||
# Refer to https://github.com/golang/dep/blob/master/docs/Gopkg.toml.md
|
||||
# for detailed Gopkg.toml documentation.
|
||||
#
|
||||
# required = ["github.com/user/thing/cmd/thing"]
|
||||
# ignored = ["github.com/user/project/pkgX", "bitbucket.org/user/project/pkgA/pkgY"]
|
||||
#
|
||||
# [[constraint]]
|
||||
# name = "github.com/user/project"
|
||||
# version = "1.0.0"
|
||||
#
|
||||
# [[constraint]]
|
||||
# name = "github.com/user/project2"
|
||||
# branch = "dev"
|
||||
# source = "github.com/myfork/project2"
|
||||
#
|
||||
# [[override]]
|
||||
# name = "github.com/x/y"
|
||||
# version = "2.4.0"
|
||||
|
||||
[prune]
|
||||
non-go = true
|
||||
go-tests = true
|
||||
unused-packages = true
|
||||
|
||||
[[constraint]]
|
||||
branch = "master"
|
||||
name = "github.com/ArthurHlt/go-eureka-client"
|
||||
|
||||
[[constraint]]
|
||||
branch = "master"
|
||||
name = "github.com/BurntSushi/toml"
|
||||
|
||||
[[constraint]]
|
||||
branch = "master"
|
||||
name = "github.com/BurntSushi/ty"
|
||||
|
||||
[[constraint]]
|
||||
name = "github.com/Masterminds/sprig"
|
||||
version = "2.19.0"
|
||||
|
||||
[[constraint]]
|
||||
branch = "master"
|
||||
name = "github.com/NYTimes/gziphandler"
|
||||
|
||||
[[constraint]]
|
||||
branch = "containous-fork"
|
||||
name = "github.com/abbot/go-http-auth"
|
||||
source = "github.com/containous/go-http-auth"
|
||||
|
||||
[[constraint]]
|
||||
name = "github.com/thoas/stats"
|
||||
# related to https://github.com/thoas/stats/pull/32
|
||||
revision = "4975baf6a358ed3ddaa42133996e1959f96c9300"
|
||||
|
||||
[[constraint]]
|
||||
branch = "master"
|
||||
name = "github.com/armon/go-proxyproto"
|
||||
|
||||
[[constraint]]
|
||||
name = "github.com/aws/aws-sdk-go"
|
||||
version = "1.13.11"
|
||||
|
||||
[[constraint]]
|
||||
name = "github.com/cenk/backoff"
|
||||
version = "v2.1.1"
|
||||
|
||||
[[constraint]]
|
||||
name = "github.com/containous/flaeg"
|
||||
version = "1.4.1"
|
||||
|
||||
[[constraint]]
|
||||
branch = "master"
|
||||
name = "github.com/containous/mux"
|
||||
|
||||
[[constraint]]
|
||||
name = "github.com/containous/staert"
|
||||
version = "3.1.1"
|
||||
|
||||
[[constraint]]
|
||||
name = "github.com/containous/traefik-extra-service-fabric"
|
||||
version = "1.7.0"
|
||||
|
||||
[[constraint]]
|
||||
name = "github.com/coreos/go-systemd"
|
||||
version = "14.0.0"
|
||||
|
||||
[[constraint]]
|
||||
branch = "master"
|
||||
name = "github.com/docker/leadership"
|
||||
source = "github.com/containous/leadership"
|
||||
|
||||
[[constraint]]
|
||||
name = "github.com/eapache/channels"
|
||||
version = "1.1.0"
|
||||
|
||||
[[constraint]]
|
||||
branch = "master"
|
||||
name = "github.com/elazarl/go-bindata-assetfs"
|
||||
|
||||
[[constraint]]
|
||||
branch = "fork-containous"
|
||||
name = "github.com/go-check/check"
|
||||
source = "github.com/containous/check"
|
||||
|
||||
[[override]]
|
||||
branch = "fork-containous"
|
||||
name = "github.com/go-check/check"
|
||||
source = "github.com/containous/check"
|
||||
|
||||
[[constraint]]
|
||||
name = "github.com/go-kit/kit"
|
||||
version = "0.7.0"
|
||||
|
||||
[[constraint]]
|
||||
branch = "master"
|
||||
name = "github.com/gorilla/websocket"
|
||||
|
||||
[[constraint]]
|
||||
name = "github.com/hashicorp/consul"
|
||||
version = "1.0.6"
|
||||
|
||||
[[constraint]]
|
||||
name = "github.com/influxdata/influxdb"
|
||||
version = "1.3.7"
|
||||
|
||||
[[constraint]]
|
||||
name = "github.com/jjcollinge/servicefabric"
|
||||
revision = "8eebe170fa1ba25d3dfb928b3f86a7313b13b9fe"
|
||||
|
||||
[[constraint]]
|
||||
branch = "master"
|
||||
name = "github.com/abronan/valkeyrie"
|
||||
|
||||
[[constraint]]
|
||||
name = "github.com/mesosphere/mesos-dns"
|
||||
source = "https://github.com/containous/mesos-dns.git"
|
||||
|
||||
[[constraint]]
|
||||
name = "github.com/opentracing/opentracing-go"
|
||||
version = "1.0.2"
|
||||
|
||||
[[constraint]]
|
||||
branch = "containous-fork"
|
||||
name = "github.com/rancher/go-rancher-metadata"
|
||||
source = "github.com/containous/go-rancher-metadata"
|
||||
|
||||
[[constraint]]
|
||||
branch = "master"
|
||||
name = "github.com/ryanuber/go-glob"
|
||||
|
||||
[[constraint]]
|
||||
branch = "master"
|
||||
name = "github.com/stvp/go-udp-testing"
|
||||
|
||||
[[constraint]]
|
||||
name = "github.com/stretchr/testify"
|
||||
version = "1.2.1"
|
||||
|
||||
[[constraint]]
|
||||
name = "github.com/uber/jaeger-client-go"
|
||||
version = "2.15.0"
|
||||
|
||||
[[constraint]]
|
||||
name = "github.com/uber/jaeger-lib"
|
||||
version = "1.1.0"
|
||||
|
||||
[[constraint]]
|
||||
name = "github.com/unrolled/secure"
|
||||
version = "1.0.5"
|
||||
|
||||
[[constraint]]
|
||||
name = "github.com/vdemeester/shakers"
|
||||
version = "0.1.0"
|
||||
|
||||
[[constraint]]
|
||||
version = "v1.1.0"
|
||||
name = "github.com/vulcand/oxy"
|
||||
|
||||
[[constraint]]
|
||||
name = "github.com/go-acme/lego"
|
||||
version = "2.7.2"
|
||||
|
||||
[[constraint]]
|
||||
name = "github.com/golang/protobuf"
|
||||
version = "v1.3.0"
|
||||
|
||||
[[constraint]]
|
||||
name = "google.golang.org/grpc"
|
||||
version = "1.5.2"
|
||||
|
||||
[[constraint]]
|
||||
name = "gopkg.in/fsnotify.v1"
|
||||
source = "github.com/fsnotify/fsnotify"
|
||||
version = "1.4.2"
|
||||
|
||||
[[constraint]]
|
||||
name = "k8s.io/client-go"
|
||||
version = "6.0.0"
|
||||
|
||||
[[constraint]]
|
||||
name = "k8s.io/api"
|
||||
version = "kubernetes-1.9.0"
|
||||
|
||||
[[constraint]]
|
||||
name = "k8s.io/apimachinery"
|
||||
version = "kubernetes-1.9.0"
|
||||
|
||||
[[constraint]]
|
||||
branch = "master"
|
||||
name = "github.com/libkermit/docker"
|
||||
|
||||
[[constraint]]
|
||||
branch = "master"
|
||||
name = "github.com/libkermit/docker-check"
|
||||
|
||||
[[constraint]]
|
||||
branch = "master"
|
||||
name = "github.com/libkermit/compose"
|
||||
|
||||
[[constraint]]
|
||||
name = "github.com/docker/docker"
|
||||
revision = "7848b8beb9d38a98a78b75f78e05f8d2255f9dfe"
|
||||
|
||||
[[override]]
|
||||
name = "github.com/docker/docker"
|
||||
revision = "7848b8beb9d38a98a78b75f78e05f8d2255f9dfe"
|
||||
|
||||
[[override]]
|
||||
name = "github.com/docker/cli"
|
||||
revision = "6b63d7b96a41055baddc3fa71f381c7f60bd5d8e"
|
||||
|
||||
[[override]]
|
||||
name = "github.com/docker/distribution"
|
||||
revision = "edc3ab29cdff8694dd6feb85cfeb4b5f1b38ed9c"
|
||||
|
||||
[[override]]
|
||||
branch = "master"
|
||||
name = "github.com/docker/libcompose"
|
||||
|
||||
[[override]]
|
||||
name = "github.com/Nvveen/Gotty"
|
||||
revision = "a8b993ba6abdb0e0c12b0125c603323a71c7790c"
|
||||
source = "github.com/ijc25/Gotty"
|
||||
|
||||
[[override]]
|
||||
# ALWAYS keep this override
|
||||
name = "github.com/mailgun/timetools"
|
||||
revision = "7e6055773c5137efbeb3bd2410d705fe10ab6bfd"
|
||||
|
||||
[[override]]
|
||||
version = "v1.1.1"
|
||||
name = "github.com/miekg/dns"
|
||||
|
||||
[[constraint]]
|
||||
name = "github.com/patrickmn/go-cache"
|
||||
version = "2.1.0"
|
||||
|
||||
[[constraint]]
|
||||
name = "gopkg.in/DataDog/dd-trace-go.v1"
|
||||
version = "1.13.0"
|
||||
|
||||
[[constraint]]
|
||||
name = "github.com/google/uuid"
|
||||
version = "0.2.0"
|
||||
|
||||
[[constraint]]
|
||||
name = "github.com/shopspring/decimal"
|
||||
revision = "f1972eb1d1f519e2e5f4b51f2dea765e8c93a130"
|
||||
|
||||
[[override]]
|
||||
name = "contrib.go.opencensus.io/exporter/ocagent"
|
||||
version = "0.4.12"
|
||||
|
||||
[[override]]
|
||||
name = "github.com/decker502/dnspod-go"
|
||||
revision = "071171b22a9b65e4f544b61c143befd54a08a64e"
|
||||
20
Makefile
20
Makefile
@@ -27,7 +27,7 @@ DOCS_VERIFY_SKIP ?= false
|
||||
|
||||
DOCKER_BUILD_ARGS := $(if $(DOCKER_VERSION), "--build-arg=DOCKER_VERSION=$(DOCKER_VERSION)",)
|
||||
DOCKER_RUN_OPTS := $(TRAEFIK_ENVS) $(TRAEFIK_MOUNT) "$(TRAEFIK_DEV_IMAGE)"
|
||||
DOCKER_RUN_TRAEFIK := docker run $(INTEGRATION_OPTS) -it $(DOCKER_RUN_OPTS)
|
||||
DOCKER_RUN_TRAEFIK ?= docker run $(INTEGRATION_OPTS) -it $(DOCKER_RUN_OPTS)
|
||||
DOCKER_RUN_TRAEFIK_NOTTY := docker run $(INTEGRATION_OPTS) -i $(DOCKER_RUN_OPTS)
|
||||
DOCKER_RUN_DOC_PORT := 8000
|
||||
DOCKER_RUN_DOC_MOUNT := -v $(CURDIR):/mkdocs
|
||||
@@ -44,6 +44,9 @@ all: generate-webui build ## validate all checks, build linux binary, run all te
|
||||
binary: generate-webui build ## build the linux binary
|
||||
$(DOCKER_RUN_TRAEFIK) ./script/make.sh generate binary
|
||||
|
||||
binary-with-no-ui: ## build the linux binary without the ui generation
|
||||
$(DOCKER_RUN_TRAEFIK) ./script/make.sh generate binary
|
||||
|
||||
crossbinary: generate-webui build ## cross build the non-linux binaries
|
||||
$(DOCKER_RUN_TRAEFIK) ./script/make.sh generate crossbinary
|
||||
|
||||
@@ -75,6 +78,12 @@ test-integration: build ## run the integration tests
|
||||
$(DOCKER_RUN_TRAEFIK) ./script/make.sh generate binary test-integration
|
||||
TEST_HOST=1 ./script/make.sh test-integration
|
||||
|
||||
test-integration-container: build ## Run the container integration tests
|
||||
$(DOCKER_RUN_TRAEFIK) ./script/make.sh test-integration
|
||||
|
||||
test-integration-host: build ## Run the host integration tests
|
||||
TEST_HOST=1 ./script/make.sh test-integration
|
||||
|
||||
validate: build ## validate code, vendor and autogen
|
||||
$(DOCKER_RUN_TRAEFIK) ./script/make.sh validate-gofmt validate-golint validate-misspell validate-vendor validate-autogen
|
||||
|
||||
@@ -133,7 +142,7 @@ generate-webui: build-webui
|
||||
if [ ! -d "static" ]; then \
|
||||
mkdir -p static; \
|
||||
docker run --rm -v "$$PWD/static":'/src/static' traefik-webui npm run build; \
|
||||
echo 'For more informations show `webui/readme.md`' > $$PWD/static/DONT-EDIT-FILES-IN-THIS-DIRECTORY.md; \
|
||||
echo 'For more information show `webui/readme.md`' > $$PWD/static/DONT-EDIT-FILES-IN-THIS-DIRECTORY.md; \
|
||||
fi
|
||||
|
||||
lint:
|
||||
@@ -145,12 +154,5 @@ fmt:
|
||||
pull-images:
|
||||
grep --no-filename -E '^\s+image:' ./integration/resources/compose/*.yml | awk '{print $$2}' | sort | uniq | xargs -P 6 -n 1 docker pull
|
||||
|
||||
dep-ensure:
|
||||
dep ensure -v
|
||||
./script/prune-dep.sh
|
||||
|
||||
dep-prune:
|
||||
./script/prune-dep.sh
|
||||
|
||||
help: ## this help
|
||||
@awk 'BEGIN {FS = ":.*?## "} /^[a-zA-Z_-]+:.*?## / {sub("\\\\n",sprintf("\n%22c"," "), $$2);printf "\033[36m%-20s\033[0m %s\n", $$1, $$2}' $(MAKEFILE_LIST)
|
||||
|
||||
@@ -14,8 +14,8 @@ import (
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/go-acme/lego/certcrypto"
|
||||
"github.com/go-acme/lego/registration"
|
||||
"github.com/go-acme/lego/v4/certcrypto"
|
||||
"github.com/go-acme/lego/v4/registration"
|
||||
"github.com/traefik/traefik/log"
|
||||
acmeprovider "github.com/traefik/traefik/provider/acme"
|
||||
"github.com/traefik/traefik/types"
|
||||
|
||||
46
acme/acme.go
46
acme/acme.go
@@ -21,14 +21,14 @@ import (
|
||||
"github.com/containous/mux"
|
||||
"github.com/containous/staert"
|
||||
"github.com/eapache/channels"
|
||||
"github.com/go-acme/lego/certificate"
|
||||
"github.com/go-acme/lego/challenge"
|
||||
"github.com/go-acme/lego/challenge/dns01"
|
||||
"github.com/go-acme/lego/challenge/http01"
|
||||
"github.com/go-acme/lego/lego"
|
||||
legolog "github.com/go-acme/lego/log"
|
||||
"github.com/go-acme/lego/providers/dns"
|
||||
"github.com/go-acme/lego/registration"
|
||||
"github.com/go-acme/lego/v4/certificate"
|
||||
"github.com/go-acme/lego/v4/challenge"
|
||||
"github.com/go-acme/lego/v4/challenge/dns01"
|
||||
"github.com/go-acme/lego/v4/challenge/http01"
|
||||
"github.com/go-acme/lego/v4/lego"
|
||||
legolog "github.com/go-acme/lego/v4/log"
|
||||
"github.com/go-acme/lego/v4/providers/dns"
|
||||
"github.com/go-acme/lego/v4/registration"
|
||||
"github.com/sirupsen/logrus"
|
||||
"github.com/traefik/traefik/cluster"
|
||||
"github.com/traefik/traefik/log"
|
||||
@@ -46,6 +46,7 @@ var (
|
||||
// ACME allows to connect to lets encrypt and retrieve certs
|
||||
// Deprecated Please use provider/acme/Provider
|
||||
type ACME struct {
|
||||
PreferredChain string `description:"Preferred chain to use."`
|
||||
Email string `description:"Email address used for registration"`
|
||||
Domains []types.Domain `description:"SANs (alternative domains) to each main domain using format: --acme.domains='main.com,san1.com,san2.com' --acme.domains='main.net,san1.net,san2.net'"`
|
||||
Storage string `description:"File or key used for certificates storage."`
|
||||
@@ -376,11 +377,13 @@ func (a *ACME) renewACMECertificate(certificateResource *DomainsCertificate) (*C
|
||||
CertStableURL: certificateResource.Certificate.CertStableURL,
|
||||
PrivateKey: certificateResource.Certificate.PrivateKey,
|
||||
Certificate: certificateResource.Certificate.Certificate,
|
||||
}, true, OSCPMustStaple)
|
||||
}, true, OSCPMustStaple, a.PreferredChain)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
log.Infof("Renewed certificate from LE: %+v", certificateResource.Domains)
|
||||
|
||||
return &Certificate{
|
||||
Domain: renewedCert.Domain,
|
||||
CertURL: renewedCert.CertURL,
|
||||
@@ -448,14 +451,18 @@ func (a *ACME) buildACMEClient(account *Account) (*lego.Client, error) {
|
||||
|
||||
err = client.Challenge.SetDNS01Provider(provider,
|
||||
dns01.CondOption(len(a.DNSChallenge.Resolvers) > 0, dns01.AddRecursiveNameservers(a.DNSChallenge.Resolvers)),
|
||||
dns01.CondOption(a.DNSChallenge.DisablePropagationCheck || a.DNSChallenge.DelayBeforeCheck > 0,
|
||||
dns01.AddPreCheck(func(_, _ string) (bool, error) {
|
||||
if a.DNSChallenge.DelayBeforeCheck > 0 {
|
||||
log.Debugf("Delaying %d rather than validating DNS propagation now.", a.DNSChallenge.DelayBeforeCheck)
|
||||
time.Sleep(time.Duration(a.DNSChallenge.DelayBeforeCheck))
|
||||
}
|
||||
dns01.WrapPreCheck(func(domain, fqdn, value string, check dns01.PreCheckFunc) (bool, error) {
|
||||
if a.DNSChallenge.DelayBeforeCheck > 0 {
|
||||
log.Debugf("Delaying %d rather than validating DNS propagation now.", a.DNSChallenge.DelayBeforeCheck)
|
||||
time.Sleep(time.Duration(a.DNSChallenge.DelayBeforeCheck))
|
||||
}
|
||||
|
||||
if a.DNSChallenge.DisablePropagationCheck {
|
||||
return true, nil
|
||||
})),
|
||||
}
|
||||
|
||||
return check(fqdn, value)
|
||||
}),
|
||||
)
|
||||
return client, err
|
||||
}
|
||||
@@ -703,9 +710,10 @@ func (a *ACME) getDomainsCertificates(domains []string) (*Certificate, error) {
|
||||
bundle := true
|
||||
|
||||
request := certificate.ObtainRequest{
|
||||
Domains: cleanDomains,
|
||||
Bundle: bundle,
|
||||
MustStaple: OSCPMustStaple,
|
||||
Domains: cleanDomains,
|
||||
Bundle: bundle,
|
||||
MustStaple: OSCPMustStaple,
|
||||
PreferredChain: a.PreferredChain,
|
||||
}
|
||||
|
||||
cert, err := a.client.Certificate.Obtain(request)
|
||||
|
||||
@@ -6,7 +6,7 @@ import (
|
||||
"time"
|
||||
|
||||
"github.com/cenk/backoff"
|
||||
"github.com/go-acme/lego/challenge"
|
||||
"github.com/go-acme/lego/v4/challenge"
|
||||
"github.com/traefik/traefik/cluster"
|
||||
"github.com/traefik/traefik/log"
|
||||
"github.com/traefik/traefik/safe"
|
||||
|
||||
@@ -8,8 +8,8 @@ import (
|
||||
"time"
|
||||
|
||||
"github.com/cenk/backoff"
|
||||
"github.com/go-acme/lego/challenge"
|
||||
"github.com/go-acme/lego/challenge/tlsalpn01"
|
||||
"github.com/go-acme/lego/v4/challenge"
|
||||
"github.com/go-acme/lego/v4/challenge/tlsalpn01"
|
||||
"github.com/traefik/traefik/cluster"
|
||||
"github.com/traefik/traefik/log"
|
||||
"github.com/traefik/traefik/safe"
|
||||
|
||||
@@ -1,4 +1,4 @@
|
||||
FROM golang:1.14-alpine
|
||||
FROM golang:1.16-alpine
|
||||
|
||||
RUN apk --update upgrade \
|
||||
&& apk --no-cache --no-progress add git mercurial bash gcc musl-dev curl tar ca-certificates tzdata \
|
||||
@@ -10,22 +10,25 @@ RUN go get golang.org/x/lint/golint \
|
||||
|
||||
# Which docker version to test on
|
||||
ARG DOCKER_VERSION=18.09.7
|
||||
ARG DEP_VERSION=0.5.1
|
||||
|
||||
# Download go-bindata binary to bin folder in $GOPATH
|
||||
RUN mkdir -p /usr/local/bin \
|
||||
&& curl -fsSL -o /usr/local/bin/go-bindata https://github.com/containous/go-bindata/releases/download/v1.0.0/go-bindata \
|
||||
&& chmod +x /usr/local/bin/go-bindata
|
||||
|
||||
# Download dep binary to bin folder in $GOPATH
|
||||
RUN mkdir -p /usr/local/bin \
|
||||
&& curl -fsSL -o /usr/local/bin/dep https://github.com/golang/dep/releases/download/v${DEP_VERSION}/dep-linux-amd64 \
|
||||
&& chmod +x /usr/local/bin/dep
|
||||
|
||||
# Download docker
|
||||
RUN mkdir -p /usr/local/bin \
|
||||
&& curl -fL https://download.docker.com/linux/static/stable/x86_64/docker-${DOCKER_VERSION}.tgz \
|
||||
| tar -xzC /usr/local/bin --transform 's#^.+/##x'
|
||||
|
||||
# Download go-bindata binary to bin folder in $GOPATH
|
||||
RUN mkdir -p /usr/local/bin \
|
||||
&& curl -fsSL -o /usr/local/bin/go-bindata https://github.com/containous/go-bindata/releases/download/v1.0.0/go-bindata \
|
||||
&& chmod +x /usr/local/bin/go-bindata
|
||||
|
||||
# Download misspell binary to bin folder in $GOPATH
|
||||
RUN curl -sfL https://raw.githubusercontent.com/client9/misspell/master/install-misspell.sh | bash -s -- -b $GOPATH/bin v0.3.4
|
||||
|
||||
WORKDIR /go/src/github.com/traefik/traefik
|
||||
|
||||
# Download go modules
|
||||
COPY go.mod .
|
||||
COPY go.sum .
|
||||
RUN GO111MODULE=on GOPROXY=https://proxy.golang.org go mod download
|
||||
|
||||
COPY . /go/src/github.com/traefik/traefik
|
||||
|
||||
@@ -1,14 +1,14 @@
|
||||
package configuration
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/containous/flaeg"
|
||||
servicefabric "github.com/containous/traefik-extra-service-fabric"
|
||||
"github.com/go-acme/lego/challenge/dns01"
|
||||
"github.com/pkg/errors"
|
||||
"github.com/go-acme/lego/v4/challenge/dns01"
|
||||
"github.com/traefik/traefik/acme"
|
||||
"github.com/traefik/traefik/api"
|
||||
"github.com/traefik/traefik/log"
|
||||
@@ -452,18 +452,19 @@ func (gc *GlobalConfiguration) InitACMEProvider() (*acmeprovider.Provider, error
|
||||
if gc.Cluster == nil {
|
||||
provider := &acmeprovider.Provider{}
|
||||
provider.Configuration = &acmeprovider.Configuration{
|
||||
KeyType: gc.ACME.KeyType,
|
||||
OnHostRule: gc.ACME.OnHostRule,
|
||||
OnDemand: gc.ACME.OnDemand,
|
||||
Email: gc.ACME.Email,
|
||||
Storage: gc.ACME.Storage,
|
||||
HTTPChallenge: gc.ACME.HTTPChallenge,
|
||||
DNSChallenge: gc.ACME.DNSChallenge,
|
||||
TLSChallenge: gc.ACME.TLSChallenge,
|
||||
Domains: gc.ACME.Domains,
|
||||
ACMELogging: gc.ACME.ACMELogging,
|
||||
CAServer: gc.ACME.CAServer,
|
||||
EntryPoint: gc.ACME.EntryPoint,
|
||||
KeyType: gc.ACME.KeyType,
|
||||
OnHostRule: gc.ACME.OnHostRule,
|
||||
OnDemand: gc.ACME.OnDemand,
|
||||
Email: gc.ACME.Email,
|
||||
PreferredChain: gc.ACME.PreferredChain,
|
||||
Storage: gc.ACME.Storage,
|
||||
HTTPChallenge: gc.ACME.HTTPChallenge,
|
||||
DNSChallenge: gc.ACME.DNSChallenge,
|
||||
TLSChallenge: gc.ACME.TLSChallenge,
|
||||
Domains: gc.ACME.Domains,
|
||||
ACMELogging: gc.ACME.ACMELogging,
|
||||
CAServer: gc.ACME.CAServer,
|
||||
EntryPoint: gc.ACME.EntryPoint,
|
||||
}
|
||||
|
||||
store := acmeprovider.NewLocalStore(provider.Storage)
|
||||
|
||||
@@ -1,4 +1,4 @@
|
||||
FROM alpine:3.7
|
||||
FROM alpine:3.14
|
||||
|
||||
ENV PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin:/root/.local/bin
|
||||
|
||||
@@ -6,5 +6,5 @@ COPY requirements.txt /mkdocs/
|
||||
WORKDIR /mkdocs
|
||||
VOLUME /mkdocs
|
||||
|
||||
RUN apk --no-cache --no-progress add py-pip \
|
||||
&& pip install --user -r requirements.txt
|
||||
RUN apk --no-cache --no-progress add py3-pip gcc musl-dev python3-dev \
|
||||
&& pip3 install --user -r requirements.txt
|
||||
|
||||
@@ -93,6 +93,13 @@ entryPoint = "https"
|
||||
#
|
||||
# caServer = "https://acme-staging-v02.api.letsencrypt.org/directory"
|
||||
|
||||
# Preferred chain to use.
|
||||
#
|
||||
# Optional
|
||||
# Default: empty
|
||||
#
|
||||
preferredChain = "ISRG Root X1"
|
||||
|
||||
# KeyType to use.
|
||||
#
|
||||
# Optional
|
||||
@@ -186,6 +193,17 @@ caServer = "https://acme-staging-v02.api.letsencrypt.org/directory"
|
||||
# ...
|
||||
```
|
||||
|
||||
### `preferredChain`
|
||||
|
||||
Preferred chain to use.
|
||||
|
||||
```toml
|
||||
[acme]
|
||||
# ...
|
||||
preferredChain = "ISRG Root X1"
|
||||
# ...
|
||||
```
|
||||
|
||||
### ACME Challenge
|
||||
|
||||
#### `tlsChallenge`
|
||||
|
||||
@@ -196,7 +196,7 @@ by watching the Docker API through this socket.
|
||||
!!! important
|
||||
Depending on your context and your usage, accessing the Docker API without any restriction might be a security concern.
|
||||
|
||||
As explained on the Docker documentation: ([Docker Daemon Attack Surface page](https://docs.docker.com/engine/security/security/#docker-daemon-attack-surface)):
|
||||
As explained on the Docker documentation: ([Docker Daemon Attack Surface page](https://docs.docker.com/engine/security/#docker-daemon-attack-surface)):
|
||||
|
||||
`[...] only **trusted** users should be allowed to control your Docker daemon [...]`
|
||||
|
||||
@@ -209,7 +209,7 @@ to let Traefik accessing the Docker Socket of the Swarm manager node.
|
||||
More information about Docker's security:
|
||||
|
||||
- [KubeCon EU 2018 Keynote, Running with Scissors, from Liz Rice](https://www.youtube.com/watch?v=ltrV-Qmh3oY)
|
||||
- [Don't expose the Docker socket (not even to a container)](https://www.lvh.io/posts/dont-expose-the-docker-socket-not-even-to-a-container.html)
|
||||
- [Don't expose the Docker socket (not even to a container)](https://www.lvh.io/posts/dont-expose-the-docker-socket-not-even-to-a-container/)
|
||||
- [A thread on Stack Overflow about sharing the `/var/run/docker.sock` file](https://news.ycombinator.com/item?id=17983623)
|
||||
- [To Dind or not to DinD](https://blog.loof.fr/2018/01/to-dind-or-not-do-dind.html)
|
||||
|
||||
@@ -246,7 +246,7 @@ Use the following ressources to get started:
|
||||
### Using Docker with Swarm Mode
|
||||
|
||||
If you use a compose file with the Swarm mode, labels should be defined in the `deploy` part of your service.
|
||||
This behavior is only enabled for docker-compose version 3+ ([Compose file reference](https://docs.docker.com/compose/compose-file/#labels-1)).
|
||||
This behavior is only enabled for docker-compose version 3+ ([Compose file reference](https://docs.docker.com/compose/compose-file/compose-file-v3/#labels-2)).
|
||||
|
||||
```yaml
|
||||
version: "3"
|
||||
|
||||
@@ -373,7 +373,7 @@ Something more tricky using `regex`.
|
||||
|
||||
In this case a slash is added to `siteexample.io/portainer` and redirect to `siteexample.io/portainer/`. For more details: https://github.com/traefik/traefik/issues/563
|
||||
|
||||
The double sign `$$` are variables managed by the docker compose file ([documentation](https://docs.docker.com/compose/compose-file/#variable-substitution)).
|
||||
The double sign `$$` are variables managed by the docker compose file ([documentation](https://docs.docker.com/compose/compose-file/compose-file-v3/#variable-substitution)).
|
||||
|
||||
```
|
||||
portainer:
|
||||
|
||||
@@ -7,12 +7,12 @@ RUN mkdir -p $WEBUI_DIR
|
||||
COPY ./webui/ $WEBUI_DIR/
|
||||
|
||||
WORKDIR $WEBUI_DIR
|
||||
RUN yarn install
|
||||
|
||||
RUN yarn install
|
||||
RUN npm run build
|
||||
|
||||
# BUILD
|
||||
FROM golang:1.14-alpine as gobuild
|
||||
FROM golang:1.16-alpine as gobuild
|
||||
|
||||
RUN apk --update upgrade \
|
||||
&& apk --no-cache --no-progress add git mercurial bash gcc musl-dev curl tar ca-certificates tzdata \
|
||||
@@ -24,6 +24,12 @@ RUN mkdir -p /usr/local/bin \
|
||||
&& chmod +x /usr/local/bin/go-bindata
|
||||
|
||||
WORKDIR /go/src/github.com/traefik/traefik
|
||||
|
||||
# Download go modules
|
||||
COPY go.mod .
|
||||
COPY go.sum .
|
||||
RUN GO111MODULE=on GOPROXY=https://proxy.golang.org go mod download
|
||||
|
||||
COPY . /go/src/github.com/traefik/traefik
|
||||
|
||||
RUN rm -rf /go/src/github.com/traefik/traefik/static/
|
||||
|
||||
118
go.mod
Normal file
118
go.mod
Normal file
@@ -0,0 +1,118 @@
|
||||
module github.com/traefik/traefik
|
||||
|
||||
go 1.16
|
||||
|
||||
require (
|
||||
github.com/ArthurHlt/go-eureka-client v0.0.0-20170403140305-9d0a49cbd39a
|
||||
github.com/ArthurHlt/gominlog v0.0.0-20170402142412-72eebf980f46 // indirect
|
||||
github.com/Azure/azure-sdk-for-go v40.3.0+incompatible // indirect
|
||||
github.com/BurntSushi/toml v0.3.1
|
||||
github.com/BurntSushi/ty v0.0.0-20140213233908-6add9cd6ad42
|
||||
github.com/Masterminds/sprig v2.19.0+incompatible
|
||||
github.com/Microsoft/go-winio v0.4.3 // indirect
|
||||
github.com/NYTimes/gziphandler v1.0.1
|
||||
github.com/Shopify/sarama v1.30.0 // indirect
|
||||
github.com/VividCortex/gohistogram v1.0.0 // indirect
|
||||
github.com/abbot/go-http-auth v0.0.0-00010101000000-000000000000
|
||||
github.com/abronan/valkeyrie v0.2.0
|
||||
github.com/apache/thrift v0.12.0 // indirect
|
||||
github.com/armon/go-metrics v0.3.8 // indirect
|
||||
github.com/armon/go-proxyproto v0.0.0-20170620220930-48572f11356f
|
||||
github.com/aws/aws-sdk-go v1.39.0
|
||||
github.com/cenk/backoff v2.1.1+incompatible
|
||||
github.com/codahale/hdrhistogram v0.9.0 // indirect
|
||||
github.com/containous/flaeg v1.4.1
|
||||
github.com/containous/mux v0.0.0-20181024131434-c33f32e26898
|
||||
github.com/containous/staert v3.1.2+incompatible
|
||||
github.com/containous/traefik-extra-service-fabric v1.7.1-0.20210227093100-8dcd57b609a8
|
||||
github.com/coreos/go-systemd v0.0.0-20190321100706-95778dfbb74e
|
||||
github.com/davecgh/go-spew v1.1.1
|
||||
github.com/docker/docker v1.4.2-0.20171023200535-7848b8beb9d3
|
||||
github.com/docker/go-connections v0.3.0
|
||||
github.com/docker/leadership v0.0.0-00010101000000-000000000000
|
||||
github.com/docker/libcompose v0.4.1-0.20190808084053-143e0f3f1ab9 // indirect
|
||||
github.com/donovanhide/eventsource v0.0.0-20170630084216-b8f31a59085e // indirect
|
||||
github.com/eapache/channels v1.1.0
|
||||
github.com/eknkc/amber v0.0.0-20171010120322-cdade1c07385 // indirect
|
||||
github.com/elazarl/go-bindata-assetfs v1.0.0
|
||||
github.com/gambol99/go-marathon v0.7.2-0.20180614232016-99a156b96fb2
|
||||
github.com/go-acme/lego/v4 v4.5.3
|
||||
github.com/go-check/check v0.0.0-00010101000000-000000000000
|
||||
github.com/go-kit/kit v0.9.0
|
||||
github.com/golang/protobuf v1.5.2
|
||||
github.com/google/go-github v9.0.0+incompatible
|
||||
github.com/google/gofuzz v1.2.0 // indirect
|
||||
github.com/google/uuid v1.1.2
|
||||
github.com/gorilla/websocket v1.4.2
|
||||
github.com/gravitational/trace v1.1.3 // indirect
|
||||
github.com/hashicorp/consul/api v1.9.1
|
||||
github.com/hashicorp/go-hclog v0.14.1 // indirect
|
||||
github.com/hashicorp/go-immutable-radix v1.3.0 // indirect
|
||||
github.com/hashicorp/go-msgpack v1.1.5 // indirect
|
||||
github.com/hashicorp/go-sockaddr v1.0.2 // indirect
|
||||
github.com/hashicorp/go-version v1.2.1
|
||||
github.com/hashicorp/golang-lru v0.5.4 // indirect
|
||||
github.com/hashicorp/memberlist v0.2.4 // indirect
|
||||
github.com/influxdata/influxdb1-client v0.0.0-20200827194710-b269163b24ab
|
||||
github.com/jjcollinge/servicefabric v0.0.2-0.20180125130438-8eebe170fa1b
|
||||
github.com/libkermit/compose v0.0.0-20171122111507-c04e39c026ad
|
||||
github.com/libkermit/docker v0.0.0-20171122101128-e6674d32b807
|
||||
github.com/libkermit/docker-check v0.0.0-20171122104347-1113af38e591
|
||||
github.com/matttproud/golang_protobuf_extensions v1.0.2-0.20181231171920-c182affec369 // indirect
|
||||
github.com/mesos/mesos-go v0.0.3-0.20150930144802-068d5470506e
|
||||
github.com/mesosphere/mesos-dns v0.0.0-00010101000000-000000000000
|
||||
github.com/miekg/dns v1.1.43
|
||||
github.com/mitchellh/copystructure v1.0.0
|
||||
github.com/mitchellh/go-testing-interface v1.14.1 // indirect
|
||||
github.com/mitchellh/hashstructure v1.0.0
|
||||
github.com/mitchellh/mapstructure v1.4.1
|
||||
github.com/mitchellh/reflectwalk v1.0.1 // indirect
|
||||
github.com/mvdan/xurls v1.1.1-0.20170309204242-db96455566f0
|
||||
github.com/ogier/pflag v0.0.2-0.20160129220114-45c278ab3607
|
||||
github.com/opencontainers/image-spec v1.0.0-rc5.0.20170515205857-f03dbe35d449 // indirect
|
||||
github.com/opencontainers/runc v1.0.0-rc3.0.20170425215914-b6b70e534517 // indirect
|
||||
github.com/opentracing-contrib/go-observer v0.0.0-20170622124052-a52f23424492 // indirect
|
||||
github.com/opentracing/opentracing-go v1.0.2
|
||||
github.com/openzipkin-contrib/zipkin-go-opentracing v0.3.5
|
||||
github.com/patrickmn/go-cache v2.1.0+incompatible
|
||||
github.com/philhofer/fwd v1.0.0 // indirect
|
||||
github.com/prometheus/client_golang v1.11.0
|
||||
github.com/prometheus/client_model v0.2.0
|
||||
github.com/rancher/go-rancher v0.1.1-0.20171004213057-52e2f4895340
|
||||
github.com/rancher/go-rancher-metadata v0.0.0-00010101000000-000000000000
|
||||
github.com/ryanuber/go-glob v1.0.0
|
||||
github.com/shopspring/decimal v1.1.1-0.20191009025716-f1972eb1d1f5
|
||||
github.com/sirupsen/logrus v1.8.1
|
||||
github.com/stretchr/testify v1.7.0
|
||||
github.com/stvp/go-udp-testing v0.0.0-20171104055251-c4434f09ec13
|
||||
github.com/thoas/stats v0.0.0-20190104110215-4975baf6a358
|
||||
github.com/tinylib/msgp v1.0.2 // indirect
|
||||
github.com/tv42/zbase32 v0.0.0-20150911225513-03389da7e0bf // indirect
|
||||
github.com/uber/jaeger-client-go v2.15.0+incompatible
|
||||
github.com/uber/jaeger-lib v1.5.0
|
||||
github.com/unrolled/render v0.0.0-20170109143244-50716a0a8537
|
||||
github.com/unrolled/secure v1.0.5
|
||||
github.com/urfave/negroni v0.2.1-0.20170426175938-490e6a555d47
|
||||
github.com/vdemeester/shakers v0.1.0
|
||||
github.com/vulcand/oxy v1.2.0
|
||||
go.etcd.io/bbolt v1.3.5 // indirect
|
||||
golang.org/x/net v0.0.0-20210917221730-978cfadd31cf
|
||||
google.golang.org/grpc v1.38.0
|
||||
gopkg.in/DataDog/dd-trace-go.v1 v1.13.0
|
||||
gopkg.in/fsnotify.v1 v1.4.7
|
||||
gopkg.in/yaml.v2 v2.4.0
|
||||
k8s.io/api v0.21.0
|
||||
k8s.io/apimachinery v0.21.0
|
||||
k8s.io/client-go v0.21.0
|
||||
k8s.io/utils v0.0.0-20210709001253-0e1f9d693477 // indirect
|
||||
)
|
||||
|
||||
replace (
|
||||
github.com/abbot/go-http-auth => github.com/containous/go-http-auth v0.4.1-0.20180112153951-65b0cdae8d7f
|
||||
github.com/docker/docker => github.com/docker/engine v0.0.0-20190725163905-fa8dd90ceb7b
|
||||
github.com/docker/leadership => github.com/containous/leadership v0.1.1-0.20180123135645-a2e096d9fe0a
|
||||
github.com/go-check/check => github.com/containous/check v0.0.0-20170915194414-ca0bf163426a
|
||||
github.com/mesosphere/mesos-dns => github.com/containous/mesos-dns v0.5.3-rc1.0.20160623212649-b47dc4c19f21
|
||||
github.com/rancher/go-rancher-metadata => github.com/containous/go-rancher-metadata v0.0.0-20180116133453-e937e8308985
|
||||
gopkg.in/fsnotify.v1 => github.com/fsnotify/fsnotify v1.4.2
|
||||
)
|
||||
47
integration/fixtures/https/generate_certs.sh
Normal file
47
integration/fixtures/https/generate_certs.sh
Normal file
@@ -0,0 +1,47 @@
|
||||
openssl req -newkey rsa:2048 \
|
||||
-new -nodes -x509 \
|
||||
-days 3650 \
|
||||
-out snitest.com.cert \
|
||||
-keyout snitest.com.key \
|
||||
-subj "/CN=snitest.com" \
|
||||
-addext "subjectAltName = DNS:snitest.com"
|
||||
|
||||
openssl req -newkey rsa:2048 \
|
||||
-new -nodes -x509 \
|
||||
-days 3650 \
|
||||
-out www.snitest.com.cert \
|
||||
-keyout www.snitest.com.key \
|
||||
-subj "/CN=www.snitest.com" \
|
||||
-addext "subjectAltName = DNS:www.snitest.com"
|
||||
|
||||
openssl req -newkey rsa:2048 \
|
||||
-new -nodes -x509 \
|
||||
-days 3650 \
|
||||
-out snitest.org.cert \
|
||||
-keyout snitest.org.key \
|
||||
-subj "/CN=snitest.org" \
|
||||
-addext "subjectAltName = DNS:snitest.org"
|
||||
|
||||
openssl req -newkey rsa:2048 \
|
||||
-new -nodes -x509 \
|
||||
-days 3650 \
|
||||
-out uppercase_wildcard.www.snitest.com.cert \
|
||||
-keyout uppercase_wildcard.www.snitest.com.key \
|
||||
-subj "/CN=FOO.WWW.SNITEST.COM" \
|
||||
-addext "subjectAltName = DNS:*.WWW.SNITEST.COM"
|
||||
|
||||
openssl req -newkey rsa:2048 \
|
||||
-new -nodes -x509 \
|
||||
-days 3650 \
|
||||
-out wildcard.www.snitest.com.cert \
|
||||
-keyout wildcard.www.snitest.com.key \
|
||||
-subj "/CN=*.www.snitest.com" \
|
||||
-addext "subjectAltName = DNS:*.www.snitest.com"
|
||||
|
||||
openssl req -newkey rsa:2048 \
|
||||
-new -nodes -x509 \
|
||||
-days 3650 \
|
||||
-out wildcard.snitest.com.cert \
|
||||
-keyout wildcard.snitest.com.key \
|
||||
-subj "/CN=*.snitest.com" \
|
||||
-addext "subjectAltName = DNS:*.snitest.com"
|
||||
@@ -1,19 +1,19 @@
|
||||
-----BEGIN CERTIFICATE-----
|
||||
MIIC/zCCAeegAwIBAgIJAL858pci5XyjMA0GCSqGSIb3DQEBBQUAMBYxFDASBgNV
|
||||
BAMMC3NuaXRlc3QuY29tMB4XDTE1MTEyMzIyMDU1NloXDTI1MTEyMDIyMDU1Nlow
|
||||
FjEUMBIGA1UEAwwLc25pdGVzdC5jb20wggEiMA0GCSqGSIb3DQEBAQUAA4IBDwAw
|
||||
ggEKAoIBAQDVF25wEroSIUO/dNgHxlyt8pZFVpJ8fNaJw7cnlZ1JP2hLuEbmjAFT
|
||||
dHqS8wKDNYHktsBEOUfN/qbk0AiGb+SvhQw6kfM/QSj9fXVQ7KhYP9XYOekTOH7d
|
||||
M0Z2L3RGgqs8z+83exOOnAFVvIJCMZJXEeijV6iJlmpCcJa0Kg/JHlxhoWTEeZuU
|
||||
G+hITafk1yWOKorTCPlMhB30wuQoWfbHP+3G0bsERLXFiMANE8EtQu8+ZhfseBUh
|
||||
5Tu5gIC4Fnria7mRixAZeEiAblFP9h0vrNRcP3nmuVz0tHPIeQsJQiEhxaZ09oUW
|
||||
h9WqTsYCP1821+SVazM9oFRTpy6chZyTAgMBAAGjUDBOMB0GA1UdDgQWBBSz9mbX
|
||||
ia1TM5FG4Zgagaet24S8HDAfBgNVHSMEGDAWgBSz9mbXia1TM5FG4Zgagaet24S8
|
||||
HDAMBgNVHRMEBTADAQH/MA0GCSqGSIb3DQEBBQUAA4IBAQB79W8XTxlozh9w/W7T
|
||||
5vDev67G4T/wetABSb68CRrqojt78PMJuS89JarA8I3ts00O+0JYnsHxp+9qC7pf
|
||||
jWHcDSiLwRUMu7MXW/KIen1EB8BQNA0xWbAiQaWYPHzsBlX48+9wBe0HTDx7Lcxb
|
||||
OsmnXHBF5fd2EY+R8qJu+PyTDDL1WLItFJpzHiFiGiYF8Tyic3kkPjje6eIOxRmT
|
||||
hq+qbwApzbzz6h/VD5xR3zBDFBo2Xs5tdP264KIw/YXDpaXVBiJ5DDjQ3dtJw1G5
|
||||
yzgrHQZWJN8Gs8ZZgGdgRf7PHox8xEZtqPiMkChDz6T7Ha3U0xYN6TZGNZOR6DHs
|
||||
K9/8
|
||||
MIIDJTCCAg2gAwIBAgIUFbXUF7w4KLaRfECfv1YNNblvk1AwDQYJKoZIhvcNAQEL
|
||||
BQAwFjEUMBIGA1UEAwwLc25pdGVzdC5jb20wHhcNMjEwMjI0MTQ0NTE1WhcNMzEw
|
||||
MjIyMTQ0NTE1WjAWMRQwEgYDVQQDDAtzbml0ZXN0LmNvbTCCASIwDQYJKoZIhvcN
|
||||
AQEBBQADggEPADCCAQoCggEBAL9VhPl0K8Ji0jtVs4PONYKm8Kl/m0P2X8nnf+do
|
||||
JsB+5O2xzbUfm+0egNnLePkyQTk3kZocdPPJEa3e1pZFB5k6mab+sSFJ44HitQb7
|
||||
VhbBn2GE6JqMnIGF9mBMFcEcTl83Qmtf0Zn/q5FrATV3V19i/V8KtOj0vMyCf5RS
|
||||
FQL8FBs8J4AynOaWL/HUbXJajD2imNyORuiVGX3t8y6Y3WMGp6KKOKzT5789UpOl
|
||||
ugM45xlAVnFfJ061j1kf6q9zF2ZMgjU9u8JTfe+r4v8Ih/hB5fZv2arHCe9x2+Uv
|
||||
9WFo2hH4w59XgpyZLiqaPSUsR/kQ7m63VtOKAKcW1wUnv9MCAwEAAaNrMGkwHQYD
|
||||
VR0OBBYEFFf6hm4MeuP2OC/lFnMfeZ29HA3wMB8GA1UdIwQYMBaAFFf6hm4MeuP2
|
||||
OC/lFnMfeZ29HA3wMA8GA1UdEwEB/wQFMAMBAf8wFgYDVR0RBA8wDYILc25pdGVz
|
||||
dC5jb20wDQYJKoZIhvcNAQELBQADggEBAEA8ZCTlGRNRdfcA9utHWXeemXL0Nd9a
|
||||
PTSpPc0EbrLQqN30KBMVqhj7rbZvDjhtgi5UWvvlYNSwh+g7eltKcLi/DNPlGwir
|
||||
1FcDmXz2D/a3o5K1mOZBT1vXA2dDYyqQLRr3iVk4TEIskwH7d/BpDNb74sDTtsky
|
||||
HgsPh4fjeoytMH856v3mM72oImG/P+E/7dMWxITMIA29M5EMswb0qYLa2UElSGVu
|
||||
F36Xb+BWI6F/sd+sVyctcdEvoKY8h+rhn+G7UId6D5Dnvx+p2x0kJF5WvK8IRZUM
|
||||
hCv+6Ndqb8QVLrUOJ1KdCQBpq2JEjV6CCze8/Taxwf0OJwWJywOuMNo=
|
||||
-----END CERTIFICATE-----
|
||||
|
||||
@@ -1,27 +1,28 @@
|
||||
-----BEGIN RSA PRIVATE KEY-----
|
||||
MIIEpQIBAAKCAQEA1RducBK6EiFDv3TYB8ZcrfKWRVaSfHzWicO3J5WdST9oS7hG
|
||||
5owBU3R6kvMCgzWB5LbARDlHzf6m5NAIhm/kr4UMOpHzP0Eo/X11UOyoWD/V2Dnp
|
||||
Ezh+3TNGdi90RoKrPM/vN3sTjpwBVbyCQjGSVxHoo1eoiZZqQnCWtCoPyR5cYaFk
|
||||
xHmblBvoSE2n5NcljiqK0wj5TIQd9MLkKFn2xz/txtG7BES1xYjADRPBLULvPmYX
|
||||
7HgVIeU7uYCAuBZ64mu5kYsQGXhIgG5RT/YdL6zUXD955rlc9LRzyHkLCUIhIcWm
|
||||
dPaFFofVqk7GAj9fNtfklWszPaBUU6cunIWckwIDAQABAoIBABAdQYDAKcoNMe5c
|
||||
i6mq2n9dBPghX9qCJkcswcEAk3BilySCvvnYRJFnEY3jSqFZfoUpPMjr+/4b78sF
|
||||
4F8qPwT27sHPH7H833ir8B86hlCGI0nCt1l4wD9CDWYKmKRsZT6oCtMLP6NdMMyn
|
||||
AMK4tPRYqlsP2fLtqQN1ODBPrfnraoNHtOVE784iBCD5dewICA5RIQG2i/d2+CGF
|
||||
+bahFqUXVCqHoxBz4AVvrRFL99VcP7P2iZyk6hDQ7fci7Xay8Wb/HutRxuqvF0aU
|
||||
bG6Enk6CCtNZHLwNPp4Hqft0Udvg2tG8okYwbEmoEO40nQsCSzRCpq5Uvzi+LX1k
|
||||
LykQ6+ECgYEA7x8vQoyOK60Q3LPpJFGDec2+XJPoesTfJTT6idaP7ukUL8p3FsUo
|
||||
9vtxRRfhSOdPoAINmrL0TyMekO2B6zXx0pmWVpMrFwZW6zMwZAnLp/w+3USpbGCy
|
||||
K12IIwvRYzTzKwoMTVAKTXm36b6oqr2La4bTdJR7REY6G374FrJb/H0CgYEA5CHk
|
||||
Ym0h7cf00fw9UEHRfzUZxmCfRWY6K8InOuHdLi+u4TiyXzs8x5s0e/DN/raNmTGx
|
||||
QO81UzuS3nKwc4n5QyXjVnhzR5DbbSACDwHtdnxZByL0D1KvPjtRF8F+rWXViXv2
|
||||
TM7UiOmn6R375FPSAPxeyMx8Womc3EnAAfLWGk8CgYEAv8I2WBv3dzcWqqbsdF+a
|
||||
G/fOjNdgO/PdLy1JLXiPfHwV4C1xSyVZMJd7wnjgBWLaC+sZldGk8kGrpXWSFlnw
|
||||
T38zfMIQcCp5Uax/RfpFA7XZhAAoDe2NdBFRtyknBXPU/dLVArsJSBAwWJa5FBNk
|
||||
1xoMQRVBtQLMXnh341utQNECgYEA4o1R2/ka16NaWmpPjXM/lD9skFgF84p4vFn8
|
||||
UXpaB3LtDdcbNH2Ed4mHToouWAR8jCUQLTcg0r53tRdaafMcKfXnVUka2nhdoHpH
|
||||
8RVt99u3IeIxU0I+q+OGPbw3jAV0UStcxpwj7q9zw4q2SuJ+y+HUUz7XQ6Yjs5Q9
|
||||
7PF2c/sCgYEAhdVn5gZ5FvYKrBi46t3pxPsWK476HmQEVHVi5+od7wg+araDelAe
|
||||
8QE8hc8qdZGbjdB/AHSPCeUxfO2vnpsCoSRs29o6pDvQuqvHYs+M53l5LEYeOjof
|
||||
t6J/DK5Pim2CAFjYFcZk8/Gyl5HjTw3PpdWxoPD5v2Xw3bbY57IIbm4=
|
||||
-----END RSA PRIVATE KEY-----
|
||||
-----BEGIN PRIVATE KEY-----
|
||||
MIIEvQIBADANBgkqhkiG9w0BAQEFAASCBKcwggSjAgEAAoIBAQC/VYT5dCvCYtI7
|
||||
VbODzjWCpvCpf5tD9l/J53/naCbAfuTtsc21H5vtHoDZy3j5MkE5N5GaHHTzyRGt
|
||||
3taWRQeZOpmm/rEhSeOB4rUG+1YWwZ9hhOiajJyBhfZgTBXBHE5fN0JrX9GZ/6uR
|
||||
awE1d1dfYv1fCrTo9LzMgn+UUhUC/BQbPCeAMpzmli/x1G1yWow9opjcjkbolRl9
|
||||
7fMumN1jBqeiijis0+e/PVKTpboDOOcZQFZxXydOtY9ZH+qvcxdmTII1PbvCU33v
|
||||
q+L/CIf4QeX2b9mqxwnvcdvlL/VhaNoR+MOfV4KcmS4qmj0lLEf5EO5ut1bTigCn
|
||||
FtcFJ7/TAgMBAAECggEAIly/hvWALmcFDrbziIZuQcqtiiNqxSfoSCCVbf/chdHE
|
||||
V5QJN7A3lrxVs8fBMDAAWsBKG/1kW6wOysUskMxQ8wn9MmqncS0N+PO4Q0q8SVX4
|
||||
w9DU3XzHVtOyTdUcSxREVwFbomERCbFLJNCs5OdL/8uNaZZIq1XY2nnNY9vh/ijs
|
||||
CSZF1ZX2GMIQajf0lFTtXs0Vtzf/sMYC9pEoBGuI5BkdoHBBE2ZlsY4XTnwiDgjL
|
||||
Z0Y6jLHtDDjWjqR4Ck3JOmyEPQtG1YnDIcr/WcFLWwvqXsSyonSkZfYgvPOZMeHK
|
||||
SajFv+esbGKqiCu+zwml9/PCZZu5lzi7ISomfPNMUQKBgQDhdZUv1/mK+5jRPklc
|
||||
1FcHbXCrnAMa5+CJ1Cn6gwGr128pErnKxdc1xxQSjVSbU1KvE0cJevjiVAV9B+/P
|
||||
Xnjva8qoOCUFVrdZsEazEEH4CXYPrC1vKsz/bfaKDVDLmKgdwIr53kyx67aenkeO
|
||||
oWzminLYhFgfgUkArIdhBmB7zwKBgQDZQIzuyT6DswtwRPUDvCBz/AiXEjjOSToc
|
||||
+8LfA6UsEnyHtOEOmbLDoa/de9LescBXmmfAheWk8B6h94Lfh6k2SR44nJRjDkne
|
||||
Inb0O9gn31Nwyejy2UFc+C7Uq0bHLhZiXwV8qBXmkgT+PQ8l0bvg8MbWgXI+QsLB
|
||||
CTEz+ywovQKBgF7trTUJ0K4uJTfk6+rEFSixccEv3TbU7vvZcxthUbDS8qDum3+/
|
||||
MVAZVGBDVZ5YxPWmPkDFoElbazwrvVMoye61Cg0uJphK3MRMrlfyvnP7N9R9gFvQ
|
||||
P6RE3otd8+jli9OLIHj4mn5Hawr6RdLOMRYYsSZtAtWxD9XSzuWRU8qNAoGASiHf
|
||||
tV+QaGpBHfc50CNyw4EeAUbjgWi1Xsx965GX3BkWy0vqfAvwVxcmJqKpCvqGkp4K
|
||||
h0RpX0XcfhIyY0oscGy01kZ/z+lBRX5nLuH42Wo4U8ViKvP9fNbnTASB61eLpdec
|
||||
zLOifYIYGBUHxTTzMkeW4f1b2Wj4gdw8IOu1XtUCgYEAkCsTPSR2R+jwIobwbM6h
|
||||
VDzCfOpelPx/OUytRHZ8hxZLyTF5J0dlzqy9JhIz/ZP/Josv5d5bjeRjKK9fgNJf
|
||||
xPcWiNo2Tjewj58vvRiYIZJT7P9chA+gSJVisR3Ue+DkSeojgVvSNCLEN/xTbiLK
|
||||
p0tRRYFUyJiHVzcIKQBGF3g=
|
||||
-----END PRIVATE KEY-----
|
||||
|
||||
@@ -1,19 +1,19 @@
|
||||
-----BEGIN CERTIFICATE-----
|
||||
MIIC/zCCAeegAwIBAgIJALAYHG/vGqWEMA0GCSqGSIb3DQEBBQUAMBYxFDASBgNV
|
||||
BAMMC3NuaXRlc3Qub3JnMB4XDTE1MTEyMzIyMDU0NFoXDTI1MTEyMDIyMDU0NFow
|
||||
FjEUMBIGA1UEAwwLc25pdGVzdC5vcmcwggEiMA0GCSqGSIb3DQEBAQUAA4IBDwAw
|
||||
ggEKAoIBAQC8b2Qv68Xnv4wgJ6HNupxABSUA5KXmv9g7pwwsFMSOK15o2qGFzx/x
|
||||
9loIi5pMIYIy4SVwJNrYUi772nCYMqSIVXlwct/CE70j2Jb2geIHu3jHbFWXruWb
|
||||
W1tGGUYzvnsOUziPE3rLWa/NObNYLLlUKJaxfHrxnpuKpQUsXzoLl25cJEVr4jg2
|
||||
ZITpdraxaBLisdlWY7EwwHBLu2nxH5Rn+nIjenFfdUwKF9s5dGy63tfBc8LX9yJk
|
||||
+kOwy1al/Wxa0DUb6rSt0QDCcD+rXnjk2zWPtsHz1btwtqM+FLtN5z0Lmnx7DF3C
|
||||
tCf1TMzduzZ6aeHk77zc664ZQun5cH33AgMBAAGjUDBOMB0GA1UdDgQWBBRn/nNz
|
||||
PUsmDKmKv3GGo3km5KKvUDAfBgNVHSMEGDAWgBRn/nNzPUsmDKmKv3GGo3km5KKv
|
||||
UDAMBgNVHRMEBTADAQH/MA0GCSqGSIb3DQEBBQUAA4IBAQBkuutIcbBdESgvNLLr
|
||||
k/8HUDuFm72lYHZFE+c76CxqYN52w02NCTiq1InoDUvqZXb/StATBwRRduTUPCj9
|
||||
KUkC7pOjAFxjzjExsHrtZSq01WinrxNI+qSKvI8jFngMHnwN1omTt7/D7nxeW5Of
|
||||
FJTkElnxtELAGHoIwZ+bKprnexefpn9UW84VJvJ2crSR63vBvdTrgsrEGW6kQj1I
|
||||
62laDpax4+x8t2h+sfG6uNIA1cFrG8Sk+O2Bi3ogB7Y/4e8r6WA23IRP+aSv0J2b
|
||||
k5fvuuXbIc979pQOoO03zG0S7Wpmpsw+9dQB9TOxGITOLfCZwEuIhnv+M9lLqCks
|
||||
7H2A
|
||||
MIIDJTCCAg2gAwIBAgIUVpx6GHgOUQjmpGadSM4KhbPHt6cwDQYJKoZIhvcNAQEL
|
||||
BQAwFjEUMBIGA1UEAwwLc25pdGVzdC5vcmcwHhcNMjEwMjI0MTQ0NTE1WhcNMzEw
|
||||
MjIyMTQ0NTE1WjAWMRQwEgYDVQQDDAtzbml0ZXN0Lm9yZzCCASIwDQYJKoZIhvcN
|
||||
AQEBBQADggEPADCCAQoCggEBAK396aaCL0/RHqvzfiqIBYkaB7AMfVunk6mahSAx
|
||||
+dMpRGoZhHqQVfDFNEpwlzfDrWSISVWj1IWjBRFOVBQnNN9r5cl7TH4DUX1GJGSA
|
||||
+l3tZ84KhGEXTqbnOrWDU4tUBXk36xgi1mzvXePOGGt3r2yJMJgbrRzOHqFPCEi8
|
||||
H3lYsnaAlVIv0VkfANiP70H/Zax0Oj36DvzjimivnalAEI8Gz1y3EmelhNnBEkeq
|
||||
GvpUuAU0s/NC1/K4Zb0f55x5Oy2dA0Fvv4E7tzL6JY7qLH4tAUexghH5AVV6vuMv
|
||||
aiVH4UcZXiyYb+zDW30cMyjlIpkf/Zo+wUTwaCD8RnOjcXcCAwEAAaNrMGkwHQYD
|
||||
VR0OBBYEFC6bo2fNwaXJlgPT+g+BU8b08083MB8GA1UdIwQYMBaAFC6bo2fNwaXJ
|
||||
lgPT+g+BU8b08083MA8GA1UdEwEB/wQFMAMBAf8wFgYDVR0RBA8wDYILc25pdGVz
|
||||
dC5vcmcwDQYJKoZIhvcNAQELBQADggEBAF5GAS/6U5bGJQAeAdcm7YO23SdidulC
|
||||
miSeshB0iLtU7ztbPhT7MsMe+ifuGWg1hbcyApkatwnTxIpt8px/GUtGzxeOGATl
|
||||
uWSn5n05G+rbMm9yhallTjwQaMXR4w5wV1qdK6nDX9ncCAYlMQGkAiBw7tpnElzT
|
||||
65WvAXPmv/ckP3+GFJUcM5ggBvPFDaOsLtYBnZP7OM6PlwFVKcfL/O4yN6kPv4hp
|
||||
4GRGOZ4zv4tuaH8ny5ZwKrwqzVAXO3CSZjWEte6bGs7sjp3QC7WdgmVKFVDC+IAa
|
||||
XRYAC76wqwEhp43oAR3DOxnsHsuFxi6cXdVCf1GVPVjOteMW5TtkUCk=
|
||||
-----END CERTIFICATE-----
|
||||
|
||||
@@ -1,27 +1,28 @@
|
||||
-----BEGIN RSA PRIVATE KEY-----
|
||||
MIIEogIBAAKCAQEAvG9kL+vF57+MICehzbqcQAUlAOSl5r/YO6cMLBTEjiteaNqh
|
||||
hc8f8fZaCIuaTCGCMuElcCTa2FIu+9pwmDKkiFV5cHLfwhO9I9iW9oHiB7t4x2xV
|
||||
l67lm1tbRhlGM757DlM4jxN6y1mvzTmzWCy5VCiWsXx68Z6biqUFLF86C5duXCRF
|
||||
a+I4NmSE6Xa2sWgS4rHZVmOxMMBwS7tp8R+UZ/pyI3pxX3VMChfbOXRsut7XwXPC
|
||||
1/ciZPpDsMtWpf1sWtA1G+q0rdEAwnA/q1545Ns1j7bB89W7cLajPhS7Tec9C5p8
|
||||
ewxdwrQn9UzM3bs2emnh5O+83OuuGULp+XB99wIDAQABAoIBAGOn9bByXQQnhZAr
|
||||
5aLMIn6pOdyzEBptM4q42fMmOJ2HyjJiDjKaTCbHRu5mBoBk6FrIP+iDVUo6jKad
|
||||
7BZSEjoYGlWiKzyU+97NWWmdX1D/kOzHGq1RzhTPyAHWtA4Bm0sEMFFa2AJbuGIt
|
||||
NfBYFtuva6MKVmsamuBETewdoLEnxzzDFcuOaxXRfTC/ikWcYyB4KEWA5fjroUQC
|
||||
Llo9/UTGTkh1Hynv9AXY6Qia/RbrIQjKveKCRj6PjxyE/qN9qfmngczz2pK0hRhL
|
||||
Z+K06y8G+Yj1I1zm5jNg1kakVQKoBsnaYkmIUBUSmWv6ERotedOWtOAMlOKa+0l2
|
||||
DS7Ou2ECgYEA91doi+3XrMVsgyTEm/ArzEyRUfM5dCSvBCRFhO7QQp2OYAbjJk5S
|
||||
pmdpqmwTsXNNMU+XNkWCLug5pk0PTJwP0mVLE2fLYqCCXoyaMltQ0Yk2gaun/RwE
|
||||
w5EfyMwOQakLFY/ODvduQfyNpaoWgFz4/WPNTVNCGs04LepSGKaFNy0CgYEAwwgV
|
||||
jKeFA+QZGooTInyk07ZlAbenEPu/c2y3UUFxclP0CjP2/VBOpz9B62vhzCKbjD1c
|
||||
/L3x1CKC4n4lbeyHi4vrF69LX9SHr1Jm0SUtyKeV3g0EAzIWI0HFhVUkMvtbibQ4
|
||||
HXrLVCJO77xetQ7RQszss1z9g3WotAAiBMiQgDMCgYBTLjoilOIrYFmV4Q+dwa95
|
||||
DWbxwHJZ9NxG8EvQ4N95B7OR578Matqwy6ZlgeM9kiErrDCWN9oIHGEG5HN4uCM6
|
||||
BoaxB/8GNCSj13Uj6kHLtfF2ulvMa1fOzUd7J+TDgC4SGkKaFewmlOCuDf1zPdEe
|
||||
pimtD4rzqIB0MJFbaOT0IQKBgDBPjlb7IB3ooLdMQJUoXwP6iGa2gXHZioEjCv3b
|
||||
wihZ13e3i5UQEYuoRcH1RUd1wyYoBSKuQnsT2WwVZ1wlXSYaELAbQgaI9NtfBA0G
|
||||
sqKjsKICg13vSECPiEgQ4Rin3vLra4MR6c/7d6Y2+RbMhtWPQYrkm/+2Y4XDCqo4
|
||||
rGK1AoGAOFZ3RVhuwXzFdKNe32LM1wm1eZ7waxjI4bQS2xUN/3C/uWS7A3LaSlc3
|
||||
eRG3DaVpez4DQVupZDHMgxJUYqqKynUj6GD1YiaxGROj3TYCu6e7OxyhalhCllSu
|
||||
w/X5M802XqzLjeec5zHoZDfknnAkgR9MsxZYmZPFaDyL6GOKUB8=
|
||||
-----END RSA PRIVATE KEY-----
|
||||
-----BEGIN PRIVATE KEY-----
|
||||
MIIEvQIBADANBgkqhkiG9w0BAQEFAASCBKcwggSjAgEAAoIBAQCt/emmgi9P0R6r
|
||||
834qiAWJGgewDH1bp5OpmoUgMfnTKURqGYR6kFXwxTRKcJc3w61kiElVo9SFowUR
|
||||
TlQUJzTfa+XJe0x+A1F9RiRkgPpd7WfOCoRhF06m5zq1g1OLVAV5N+sYItZs713j
|
||||
zhhrd69siTCYG60czh6hTwhIvB95WLJ2gJVSL9FZHwDYj+9B/2WsdDo9+g7844po
|
||||
r52pQBCPBs9ctxJnpYTZwRJHqhr6VLgFNLPzQtfyuGW9H+eceTstnQNBb7+BO7cy
|
||||
+iWO6ix+LQFHsYIR+QFVer7jL2olR+FHGV4smG/sw1t9HDMo5SKZH/2aPsFE8Ggg
|
||||
/EZzo3F3AgMBAAECggEBAJl8VSJq03uwuULs0I6KYX+5c7Csw+UXph5GzktBauYJ
|
||||
EG4cRo6sRvPXszI7sloiBxKZWz/t/ytITM8gbvE8SNsS3qlnWGoNWVSOjMoQMUys
|
||||
ghbzliXkHqEDd/npeMh+/TkayAwJp2h/nC66fzZGZXz0ZOa5+3NerQyVoHQxZ/5t
|
||||
eCJktp0KkDlO1nnhRGFcI+wkNJnRHvV8w6+2z3Z7Pr+RX4zZXtmDLPQnJA4HAhhP
|
||||
lObkMWNz+tgEfnv57qnw3AEYmCt6Vb+6dQs/RXfrymNYnvwJCU2fwfKUG2Hj9l6K
|
||||
igi553qQ6CEfDBcgNDoFA5LW0Sl5/EL8YmW13hww+VECgYEA4LNA/XRkeq7/w/Vv
|
||||
0Ueq1LN3Ft7e29hJP3+I7LU6r4RN2omzbh26FpeSecKDh69rfBpHp7smsG93AiXz
|
||||
Wrppbx5wHT+UdWS8M4Kyc07Aen8kHanfMm3EnHc7kbFCp8C9x8Ohx2CVghaAwdGP
|
||||
uA1Ok3+fWkzgzSh5rwiQZ+u6Ji8CgYEAxjppsf46p5RIUoquKfvniF4csZi5vAna
|
||||
nS6iW4nzy0/coNu09fFgOA639B5RPUD51X1qOl/geLSwW9Eyll5YsTZLc3LGSmPX
|
||||
ncVvnLdREFuYOWqSKiH71O2F/Gk2Ulw+/z68I5uCU3KflZmWUqttsJAtbha7mp2X
|
||||
nzQ8wWFy3zkCgYAWs2VkdcbU8isaxAgI76GdnYfxgpCnS0GTLbJKn8CPDOHEq1a6
|
||||
UDyFnciJjpI1Og602f7VPj4ZYxl12Rmt19lhBYU0H0FDXur4lUKsDY+6kG+4o3nO
|
||||
IXLUj+Mvw+XFBt7leKk2eFaCbv2RFzfa01VV53XU7KFleKpaeZYFObZztwKBgDX0
|
||||
RDyMRA7Ez4KX+Ju40uZNJ2Y3eaZy0CwkO1DLFa5rCFRIONMQZe4cI3mwntAbVWUE
|
||||
g9trqT9xfuce47QF7gIeEx9TSCGLV268BIsVYl0fcSl/llUddtwX4nY3dO+p6AO1
|
||||
YaQXebuY4VLowP+zZWG/fH4QdfO5nUFaCIdiLNARAoGAJa/HSJ+N9NriGnXelyzR
|
||||
FwH5nxrZhOMr0VeULNfV7Nng3AgmPz1MKBqaCahinOrczyC10N9priEjM2o3rzTW
|
||||
orkMXEFn9/63TZF2RlJ/ps4hM91JVaQsYO62LbEwlwqapvSSkOVcLPDJ+hcljueU
|
||||
VhRkheUJSAIEEXWsJtFA0KQ=
|
||||
-----END PRIVATE KEY-----
|
||||
|
||||
@@ -1,19 +1,20 @@
|
||||
-----BEGIN CERTIFICATE-----
|
||||
MIIDDDCCAfSgAwIBAgIJAI1YpPACcsQnMA0GCSqGSIb3DQEBCwUAMB4xHDAaBgNV
|
||||
BAMME0ZPTy5XV1cuU05JVEVTVC5DT00wHhcNMTgxMDI5MTU1NDI4WhcNMjgxMDI2
|
||||
MTU1NDI4WjAeMRwwGgYDVQQDDBNGT08uV1dXLlNOSVRFU1QuQ09NMIIBIjANBgkq
|
||||
hkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEAxyWr+1O/tf4yjwhfp3/SDGT5fD0chhGs
|
||||
Qc+QM7Ewb5SOmIL5UskxT5pCKc6Kuie5qqEp9xH8Rrfo18iEJQPdhFC1YkaBEI0L
|
||||
l1qvN4jmXzAK/E/u4+X+FFprHyruXCmuXqsWQt/qEOqU1ciN47GE9+ZW4R+q70uB
|
||||
zrEQ+dzN7IBsyf1lzzS3/TwDgj085QmiZYxKxX40d5hZW6AHxPEKJa2p+Gweqg74
|
||||
SpzBWL1DYQLcqHUuMKlbigHg+gleqcO8NiHT5UdeSPVokD5VJPO1La1PMqkLmJYr
|
||||
3vVkQ9YzNQ615bX98VMIi17cmE7LE+vz+v287cdFT2f1pNXr3pCGzQIDAQABo00w
|
||||
SzALBgNVHQ8EBAMCBDAwCQYDVR0TBAIwADATBgNVHSUEDDAKBggrBgEFBQcDATAc
|
||||
BgNVHREEFTATghEqLldXVy5TTklURVNULkNPTTANBgkqhkiG9w0BAQsFAAOCAQEA
|
||||
HJyMCj9oHwECmSGWHnYHkO42zeyj24RKlhNG5skUCqZmpmeDc2BRMYH4fjP75MD2
|
||||
kuasZBMAxyQnur/DEn8TzQ1mlKxYCqoza1ql5PkfcwNUp/tvQ7Jhf45Z5mQVeUM7
|
||||
RSiBhpeetjHY5/xQb7gXHa97+OjDoRJ6NL/dzGxqypf37kiQPw2jWI5RTFBkP+h/
|
||||
sPbeAZJjmiEzvw31SAw9IGj3VvIwcuTxbsdJQITU7hCXDSd1EIocmzAoobY7WRcT
|
||||
B1pLmHlP/BaIsM7m0NF/HgUsgo/kgSsxnGA2MHMYQiTImR2DUgrJYzKlJ5acscLK
|
||||
sMq9xUnjr6KF1C15R2FpDw==
|
||||
MIIDOzCCAiOgAwIBAgIUQsQetefGQudD3Bl0iMjMDDzXXnkwDQYJKoZIhvcNAQEL
|
||||
BQAwHjEcMBoGA1UEAwwTRk9PLldXVy5TTklURVNULkNPTTAeFw0yMTAyMjQxNDQ1
|
||||
MTVaFw0zMTAyMjIxNDQ1MTVaMB4xHDAaBgNVBAMME0ZPTy5XV1cuU05JVEVTVC5D
|
||||
T00wggEiMA0GCSqGSIb3DQEBAQUAA4IBDwAwggEKAoIBAQChildqIUA28A9pNwFf
|
||||
X6/Zb/t5pcbm0v/8J7ITOw7qOo0qH6meSxunT4xg5sJHYo+RyriDX9vUO3vK3HLS
|
||||
2KGu8FPcGalvzPeUd7sCdQAgH+/UjBS6jxMKZremhq+gL/Vtew90QxA/F/Ftqdn1
|
||||
gcxftcXOkuwJga9Z9DcO9N7NTkVhIo16yrKdksPYJikDIQcn8rIuHy08vrWSDCg2
|
||||
uKYXNQN/3XpZ0ru/yk/QtswhsGCg/Rq0453Dk45TorBfBjKMBfbEBGinHne1mgOq
|
||||
nP4BHLQrhUFtaf48YsUH0mMnhZyIf6XPBLnTUCjsu8cTlfLhNE/lezDqQFsZJmLq
|
||||
CCPLAgMBAAGjcTBvMB0GA1UdDgQWBBRqf55YRhG0NFigWohI8hztByMkJzAfBgNV
|
||||
HSMEGDAWgBRqf55YRhG0NFigWohI8hztByMkJzAPBgNVHRMBAf8EBTADAQH/MBwG
|
||||
A1UdEQQVMBOCESouV1dXLlNOSVRFU1QuQ09NMA0GCSqGSIb3DQEBCwUAA4IBAQCX
|
||||
9k6GO/UzPVVfaFdKtnWhb8Km568g2cVmb+5NGwfCImp/9d2cqbYemZwnxBUZOPe+
|
||||
nkS2gK0e4OHWMtKTj+AGIlflrpUQFHX/oHhxIFERU32QGBooYmLRu/f3Jdx/4KiH
|
||||
DGSoMKdE4FPslvBlo8SJyBwil+Gpab0PUEsx4xAtoqK7Rvj1Y/eNrNv0NpoHfWUV
|
||||
Y+AjB8VEo0KN4g7k7dKq/mwVSPHVbkm/7eaLmOrcJ8XwSkOKRmw0y7Nvhe5WqoBz
|
||||
1zUMszzXp7ploaDrgU7GPHpgrcb301iJcLp4p7ikEaSV193EoXSt+cxrx/rBzOY7
|
||||
NkTkmMUnc8piUpzi49NQ
|
||||
-----END CERTIFICATE-----
|
||||
|
||||
@@ -1,28 +1,28 @@
|
||||
-----BEGIN PRIVATE KEY-----
|
||||
MIIEvgIBADANBgkqhkiG9w0BAQEFAASCBKgwggSkAgEAAoIBAQDHJav7U7+1/jKP
|
||||
CF+nf9IMZPl8PRyGEaxBz5AzsTBvlI6YgvlSyTFPmkIpzoq6J7mqoSn3EfxGt+jX
|
||||
yIQlA92EULViRoEQjQuXWq83iOZfMAr8T+7j5f4UWmsfKu5cKa5eqxZC3+oQ6pTV
|
||||
yI3jsYT35lbhH6rvS4HOsRD53M3sgGzJ/WXPNLf9PAOCPTzlCaJljErFfjR3mFlb
|
||||
oAfE8Qolran4bB6qDvhKnMFYvUNhAtyodS4wqVuKAeD6CV6pw7w2IdPlR15I9WiQ
|
||||
PlUk87UtrU8yqQuYlive9WRD1jM1DrXltf3xUwiLXtyYTssT6/P6/bztx0VPZ/Wk
|
||||
1evekIbNAgMBAAECggEAVOFEnTmD47D1oasjAgRj5a5/+6kcaDROJDqwrqeeCmDa
|
||||
KjzgwZ1JLDGGc8U5scBOzWAlv83lpcqrLpWjZRdxqfywYrPEPOaxAxC+z7/E2Ntk
|
||||
Q0hafL5BfjFPqRgmQhft3yGyukwvuogRadEyUNMP5o1BiHBz7cxUBmHH54dqKZuO
|
||||
ueUMgqraJX/GK+Om2rIUst0oOT9yUED+f6ciIjVAmCx1EVxZmX7sxKig10e70eOJ
|
||||
rfHlRguJWtxy0+Wl8R8TVrpI5r7qsE8y2fet9RqFOof/4ds8uA2nlZ3NpGkAq3Oo
|
||||
+65h/2fjD5uQ7jmT+XZcbC7SGhboV42zIrmn0DyNIQKBgQDneeqzMlooNzLD6x+v
|
||||
bXo6BJAHXuml440zS5i5RawKc3+/GxGQjBvnfhFH6AQ7cL4ohYyfuAo4srgifRle
|
||||
x3Gl8yvFf0uLaQHj811HPWV0fU8bwekI77jmH7WZi2ED/qX7X06R2vvUPGshvJi5
|
||||
yPCmJpDQQA6wmxBG1U4SqNw0xQKBgQDcPu2DMAJpbMWWeb5xWv5/6h6TUF4tV7fV
|
||||
eIBWuVfe9Jry3gAnb6YUOKYmA5xYJJ+fTz4Nhe4+LQbFS1esT/7ZIATvILogZc3S
|
||||
X9+ZCYG/tmDDZvhZqIWWSzzdrjb7dseP1RI4Wp6OnRqHWErrkfzDJKuN15qgW5vR
|
||||
FUR2ykV6aQKBgQCv5ZQ00dly3+ciu+QbAb00o0zzXOt91Lnytcp7V3dRhc0YYrBp
|
||||
QB7gPYtSMfwtUxIdZsaihE64IQ8NnjSOMk6pRW0Iqh+083mtR7ylKwGSkLpxpFu6
|
||||
H7hInuX3pNN3HqXwq87fxSFCeRsLyu3fl9NO3tWCenrvNxYaTXMDeO/E5QKBgE7D
|
||||
XlMU/zfOg1bN0PJe1TbPdgG+sv9KKF76CgN5otgD58nE5I812VHP9HMRxX6sEj15
|
||||
rDpP1CR+G7bAu+jObtgdIEaYEJf3cES0rpTfFnyF71LR5yzBHIzj+S9Z1yXUk4d3
|
||||
bl2i4qMjwdH3HEvkWF09JvDB0vVX7YA3N9W3fmNJAoGBALRi9EbkEBW1vMPwMzps
|
||||
YoJ1lp/YyDGTFcg6KFgTfNaOYccb6EXL2Cd21qvDsJw6wthXS+cSqX3qlTLAVLY8
|
||||
az/NfyFmW1fUtGjs2s0ZtplStGBhv8VR+2fpt9fgDOOrGYiN2dtmPm7jCAmyQQq7
|
||||
JCg7Vq6f0q95DUwiUAo24CBn
|
||||
MIIEvAIBADANBgkqhkiG9w0BAQEFAASCBKYwggSiAgEAAoIBAQChildqIUA28A9p
|
||||
NwFfX6/Zb/t5pcbm0v/8J7ITOw7qOo0qH6meSxunT4xg5sJHYo+RyriDX9vUO3vK
|
||||
3HLS2KGu8FPcGalvzPeUd7sCdQAgH+/UjBS6jxMKZremhq+gL/Vtew90QxA/F/Ft
|
||||
qdn1gcxftcXOkuwJga9Z9DcO9N7NTkVhIo16yrKdksPYJikDIQcn8rIuHy08vrWS
|
||||
DCg2uKYXNQN/3XpZ0ru/yk/QtswhsGCg/Rq0453Dk45TorBfBjKMBfbEBGinHne1
|
||||
mgOqnP4BHLQrhUFtaf48YsUH0mMnhZyIf6XPBLnTUCjsu8cTlfLhNE/lezDqQFsZ
|
||||
JmLqCCPLAgMBAAECggEAAddc1bXZUIb99foNhvPFQlo3aJSOJtVi25HsQrHaoqrW
|
||||
LxO7UDWU9BxBAl2++ydRcZz4625tSAcBizdmC6cyeJb+rbSRLNgOPl/6/rUssjEK
|
||||
/8Y0+kdlNSokj75bRB0n0/6iF3YoOj/gnZphBJ5dIP4magtcq2MSB5l0AzShUX+9
|
||||
QdC16c6Tf4anDxtKEa/Dz4LsG52bQeoSBCWZu+sNXIJD0LcqYkSU9SauKswg+ROZ
|
||||
UL/dH+ghZcmCVaEUKKThDGIsMWY0xNyrHv+7JXhGRivQWEWRkGy0I7SoFjoMrdHT
|
||||
GIkz/17vATNgXXl4opEap9cxHvoAQGBnUH2fM8+KMQKBgQDOJJhwbXtO9Ew3aCF7
|
||||
lqZxp3Yt1+FPdByXFM1zTl0c5WTsi7PuhJvMa/vd8kbA24yXeiQMKaEJTvrk5sHt
|
||||
XbkIncEu74bphMSQAfVjhVvGG9+2exzeGPoSSo8R/N+mvdfNDMLgy76JysixrD4t
|
||||
8z5vSDHk6vFk6a5AEbYrBd+CiQKBgQDInCo/1JifmXWkKqwzFiyeqQRbtqOxZXq7
|
||||
X4+gKupZBhMmChoXKjeLAa522IKg9x8yPhchIO8y0CFb7UAbLM/W6AY1/Bts8pC+
|
||||
WThIAHDxJUs1LYZCbrvPBGciHm+DOd7YnOCSGIugvKkzGtlnw470tYGTBG9UnJ6M
|
||||
gBeQKSFuswKBgHDMe5GBhujEKvRiiXfMDpXAUZ1yNHq6iW5uVqGSrsbCC0CYjUOl
|
||||
0m7AY9il2utq93pnvlPdb53dVtJsJo+RKk+5qtzY+pTLLpeySqlDbt1voTGEGl65
|
||||
ghUCMVlhaqwd03kDU5uYQVG64ai/HGMeZ/qSoaKmoKJccBnk1y6hjxUpAoGACAK8
|
||||
aSobIhxlpzWRBMU46OkE5IiphrbvrLXHLXUpQH7OmvFCU3R7HVDm35YLuVPE6iOy
|
||||
lr8S7iuDeYFC0xL6BbHkNZmomrQg9Yjoh2+FEwwTMb9sohB77WtWlvUyLMJN1MaG
|
||||
7S6PvM5rzz+eONeFSN5Km01NCMrsRd4Dkt0hmaMCgYAfXSlXFUEP5cA+/Ti5QdvJ
|
||||
e+Q56PsKxrRUnh60vqxZckb+RzRlDc5LmLIda63feMJF2cGpwammvSneg/G7oNac
|
||||
dQ+LpqYNjtsuwb7a6QPdl8fR5eHCz0HFJXmJzKcnzKvOE4EwGwKsSrHnAzGu0MUH
|
||||
/I/PgPrWCLDOOYFQHNB5fw==
|
||||
-----END PRIVATE KEY-----
|
||||
|
||||
@@ -1,29 +1,19 @@
|
||||
-----BEGIN CERTIFICATE-----
|
||||
MIIE4DCCAsgCCQCBCSnAJ0he3jANBgkqhkiG9w0BAQsFADAyMQswCQYDVQQGEwJV
|
||||
UzELMAkGA1UECAwCQUwxFjAUBgNVBAMMDSouc25pdGVzdC5jb20wHhcNMTgwNjE5
|
||||
MjAyMTEzWhcNMTgwNzE5MjAyMTEzWjAyMQswCQYDVQQGEwJVUzELMAkGA1UECAwC
|
||||
QUwxFjAUBgNVBAMMDSouc25pdGVzdC5jb20wggIiMA0GCSqGSIb3DQEBAQUAA4IC
|
||||
DwAwggIKAoICAQDOuys7ZjGVxwY/Rp5OMECkYNTOfZ1CEyAL0+pod5cd8et7k0cc
|
||||
T+tEP/25rNR9N0d3/AmtPX3XTy1MA05bYGD07cD+8meANJ+rLGMfcWh8QbBCFAWZ
|
||||
+zWkOSwpwW/DvI+67FvxHNa04u3Wv2qUld6qb0mSuZi9hKQ2s6/L3o/SxtDL/G4N
|
||||
rW71ZCkIwfzkIqvh6KWYQCZvOAIF+BFVZ2UgLbRD7/RYDrIIOrNAkW8O26C7Tck/
|
||||
JhDHDCOmfNkYqfhUW+D+GgoCi38uJqngZyxypscKdaA6SM2oFoo1jCShxDrOhvJU
|
||||
rE/l+T2Xtyr+FupJUv93iowibUwHWR5YwRNYOPkdDSG3oSKxz5xzi7Qa8L2fI7wo
|
||||
A50TDVh2AvmMCUufd5adS70bLYBfxdFNmnUhH+LHbg4v83K1eR4xMiWjpvLZ6Oub
|
||||
ufVJF6s5QEw+3K3s31UPbjzam073afSMLBpfHOsbwvcb1MBYWvf0intQo3a8MvYZ
|
||||
DCj3Y7W1Vw8lbn4v1N37KSLSNMMX1SyKxK6386t/AHwFuCM9ygI6f/l/XERL1B61
|
||||
qj9rZngKOo2cW3Yjj+oUETF3nHmcCwKBYTiWLswBI3fg6oFHTMocypY3eKhiyVaU
|
||||
mf9kBMgkDGUjWrAfOEuW9jCaDnag+Yy4XUXOlc/XaT9M2Ajvpfh9gYxWIQIDAQAB
|
||||
MA0GCSqGSIb3DQEBCwUAA4ICAQC3ut8Qeq3pYt/OGTATaUcYxrfqezW5hJN6bVfr
|
||||
/+UN+B0DfGd1/gKRUmb/t3RtmeMctTW6F21c8jyXBObjOhYVV6aE6iF61Uopozux
|
||||
+VZq8H3VJ5Qiu/Yfb5dh0iGf9srREeSAkUHBuJ9qAosM6iJsoaxQuhw/yDSxrhhg
|
||||
3jS850EZYEt9ZFjz3IdSnPCiLYqu+wMOCfT2sqBD3S8JCohTdpzuvKI2KaaY5drW
|
||||
NK4mrpJIjucfZIqbA1hbd/lCqzI4jW6i86GFhoikxxWbJCEuSWOiLJtxVnOvQgxi
|
||||
qOnOIMx88ivIxrZUHTvy2ncv/RH0q5qsaQddPkY+ll1E+1T7L7CeMTAMANS2vdlH
|
||||
nwJgsQqowisLYJQ4ztsbvpZung2szwx4ImASICYF5aVkbuNJd+lRVUoHEfSYNIYM
|
||||
Rtjteu48lYFzoBMwl6TFJA2yvL1LNaTE4/zTDgGx21aDHK14J3eIG+05wZE8AXkC
|
||||
lNGsY6n2Fn6yLK9nLxcOkpGyY62ndZwDEezqr0liOz+CKBeSZwk+VFxcgE3uGo+r
|
||||
DUcfLaU7Lx5KovP1DYAKJR3caSPAIVPnQth2kunCNs4kD7370JmmnvTlS7CTeUBT
|
||||
1P7wLh3QMrq826lGtLXNMasR1w+Q6jVx7HoOD2HFRbJHFv10R/GuWpAWAD8X5m92
|
||||
a/HFDg==
|
||||
MIIDKzCCAhOgAwIBAgIUe45njH+1bJlmvavGuNmvri/RHsowDQYJKoZIhvcNAQEL
|
||||
BQAwGDEWMBQGA1UEAwwNKi5zbml0ZXN0LmNvbTAeFw0yMTAyMjQxNDQ1MTVaFw0z
|
||||
MTAyMjIxNDQ1MTVaMBgxFjAUBgNVBAMMDSouc25pdGVzdC5jb20wggEiMA0GCSqG
|
||||
SIb3DQEBAQUAA4IBDwAwggEKAoIBAQC5CNhN057KK14UDmwXKWUENOA8rTCXo5jC
|
||||
wd8w5nWdrMeObNJVWAcTCJ9w/JmXq+wxoziEmqOvkgFjYNnhBCnA2oz+5d7NkCh1
|
||||
NJx4uHekUwE8EYwqbz3SjcsH9w6hQL7TJ4IECNXAyLYuXS0oOFMYOKQRzdoL3KBU
|
||||
wcsoPq6f7/q/kWm8jvRaMfTIWO3p1B9h/qAmVQBHmwtexrT15R+xahkJ1WuPDLdG
|
||||
XMwOwsS1w2SoXcSUb5aZl+01ubxdyte9NpJgZlbnsSu16beiln9BvQdG3BbkpBUl
|
||||
1ZR902FaPzDOCNRrHv2G1kMHHobH/3QFhnF3sS9z6UoYb3Kz4L9zAgMBAAGjbTBr
|
||||
MB0GA1UdDgQWBBTL54RZT81YeRmsiXWID7u7yDv+CDAfBgNVHSMEGDAWgBTL54RZ
|
||||
T81YeRmsiXWID7u7yDv+CDAPBgNVHRMBAf8EBTADAQH/MBgGA1UdEQQRMA+CDSou
|
||||
c25pdGVzdC5jb20wDQYJKoZIhvcNAQELBQADggEBAHEkkHJUcAce450kbTZbXTN1
|
||||
JRRSqszLASTPKiDL2vKv5Podg2LnrpDsn6Ir0aNPKR1LUPbOdL75UZCeq7yUyL8T
|
||||
C2ihUkjk/ND9qsgYVPJYvux3Z7qP+5tJv061Mo+mWBTFoEkNtlprNMTOtQ6OsvUd
|
||||
eCdBzSu8BSbFVFSpCAYleGNOLjsabTP2vmgVKCLKgM1mytVnlVEov0QIUvjN0tNM
|
||||
t84MGFOJgG5tLButTCUquBoBiq0W8p0HySMp8i9QvPzmsnXmFLYBlYT69+BlzwEN
|
||||
vM27r5o5Rm3nN2g2W9aDKUXl6WZowhDLdRWYbhYzX5OJ9KM3uwoCAo3ArJkkzf4=
|
||||
-----END CERTIFICATE-----
|
||||
|
||||
@@ -1,52 +1,28 @@
|
||||
-----BEGIN PRIVATE KEY-----
|
||||
MIIJQgIBADANBgkqhkiG9w0BAQEFAASCCSwwggkoAgEAAoICAQDOuys7ZjGVxwY/
|
||||
Rp5OMECkYNTOfZ1CEyAL0+pod5cd8et7k0ccT+tEP/25rNR9N0d3/AmtPX3XTy1M
|
||||
A05bYGD07cD+8meANJ+rLGMfcWh8QbBCFAWZ+zWkOSwpwW/DvI+67FvxHNa04u3W
|
||||
v2qUld6qb0mSuZi9hKQ2s6/L3o/SxtDL/G4NrW71ZCkIwfzkIqvh6KWYQCZvOAIF
|
||||
+BFVZ2UgLbRD7/RYDrIIOrNAkW8O26C7Tck/JhDHDCOmfNkYqfhUW+D+GgoCi38u
|
||||
JqngZyxypscKdaA6SM2oFoo1jCShxDrOhvJUrE/l+T2Xtyr+FupJUv93iowibUwH
|
||||
WR5YwRNYOPkdDSG3oSKxz5xzi7Qa8L2fI7woA50TDVh2AvmMCUufd5adS70bLYBf
|
||||
xdFNmnUhH+LHbg4v83K1eR4xMiWjpvLZ6OubufVJF6s5QEw+3K3s31UPbjzam073
|
||||
afSMLBpfHOsbwvcb1MBYWvf0intQo3a8MvYZDCj3Y7W1Vw8lbn4v1N37KSLSNMMX
|
||||
1SyKxK6386t/AHwFuCM9ygI6f/l/XERL1B61qj9rZngKOo2cW3Yjj+oUETF3nHmc
|
||||
CwKBYTiWLswBI3fg6oFHTMocypY3eKhiyVaUmf9kBMgkDGUjWrAfOEuW9jCaDnag
|
||||
+Yy4XUXOlc/XaT9M2Ajvpfh9gYxWIQIDAQABAoICAHEDax/evw6lLaobveD6iewS
|
||||
r2Nu0jBT6jntEIEpl2gcX2I/4ij9G51E6jy92a/WL3DNTLDzI783noimagiUCIz9
|
||||
CHuXIrO4kOzvqASBZ+A9vNByx5kk9m8ffiAZijLT+zLxkVWfMVTTlbfHDsnJoF9F
|
||||
1U+rvG8meusYker+cVuFqpFJHxTFEhp+Ndx+x/QjbBlkqFox/5DfamO++CLbEjJk
|
||||
Kd7V55rX9cV/6YxLtQ3HTPf4DyNBePyHi1mxeLD+Ai6Dx9zBeWVoww8EvetaG7dV
|
||||
qwvxv7T9JchVAhtB0KjKcGeE6CcXx9ntxhkRXiRnfI63G8dK607KtzxxIKDec+bU
|
||||
O0A9F3DCU1qQcNsHhKButgN3SAKu8lERTpa1Y/Wu9YOmXRwexRtS8D2ktFjYyERJ
|
||||
NUkU1WST707avYxNi5SfVr2tCpMtiERzqVdsgExBoQcliJmtb2r3qhz4TI0Q6MjT
|
||||
R1icUYfQv4+xzO0TMP3+8DLWxg2t7f3082b2ig29N6z/jD67U6jzc8hxwrPqvq2b
|
||||
ubD7YcIfRWwRbaieypEymtqTW7uc+Qs6z4brC8hTdAjOlOn8HN92gN8E9ilpyjam
|
||||
QZQpMD5OSeF0cDfrgMkXvcv5xrHUjfWf0KSMYqVDz2mp3101WExKpMiBv9dHJmVm
|
||||
XsCa5UW5o4CGK4SM6LmNAoIBAQDoyymGgFL+DMvp1dlpakZxz4mE/+qK03/YKaOU
|
||||
TSY/g1szwnB1z+cKybsCdRWfqQVq2N23dOq/3Afu2hs4F71ZbNZpRn2JBuH8wa7S
|
||||
V6K8N95He/zjr/lz3p6qOxcmG1m84HDJEJPE4aNcq/qpBSQ/hzc8YGdNxthBiSQ4
|
||||
FgJMCnnyOYAwYZvOQqdGI/LTGh+vr/CaEn5Zco9CZPLuOGSTtvblKKY1zSwttg4/
|
||||
ZLb2ebq9HK9zoD0IDMx7zfPC+P/MhHPGR2HHPSHh653k7TRjpD3u30DNQDF0YWvK
|
||||
uYgLI1MReofShBA8I/rwEPe8nDn6KLmv3KoLt6M6Ied6YLEPAoIBAQDjVuYC67FL
|
||||
i7wDLSLLjWBYGTS6r1XwKppc73wQv7YTNvPUWrhxvRXlTv6laBVUmbmllDuDdmXI
|
||||
TOyQB4rKN2KIv+Cdt+itmEAMcbfVM7wIIP32MOyZP3D/nd95MfSS06+M/D30DCFA
|
||||
U+Oi4XA3NN6reXbASYjt0wNsgXDuYHrZpOB00LHEHIWvfLtf9VMWQPgVSPU1T4Bf
|
||||
0LSKRkE6Zl1ZY9RoH9U25APuCEpR1+SBusMqhZdtNTogfrEtmrOs15FoRdzm1E9G
|
||||
E9Zt7C06A6tTN9YOcckIjHMCrwPKdiAgiQVy7gThbMZk2qHuhz38xtgmAlBLhl9+
|
||||
6pwwMA5j2iXPAoIBAQCHqV2ZtE6pHmv26Vi5xeUnjfpmN31HSdnG7v0U/6C6gqIz
|
||||
l6xR+8Z40vbYh8MCOE2f5qHOt6PWCzPUTeZu2ebOpk6NKzcdE5W+5mAq1EdRyH0Q
|
||||
y4Ckb3i/vYxZR/ZFjsrM9z7C7ZYvtg6tgsuglA57tyDJXqTU/nwoNPOWe7z681/9
|
||||
eOTrTPavTMiOZ4Sq4R52E+Hy57QaDFjQKGQpz1NNgeJ/ySCTWe3U9bN33gmBuY7J
|
||||
hl340/i9KDhCLdNQXCs11Dpj4lVo9oc4UUbCkjlll+E/w3rQIgiv+dYHXfeaBgvy
|
||||
s6VTWQLdCVrDbB/zGlfvIKyVf9LY4TuONRPgjVihAoIBAATG0aRkEWCV+ghTDXUb
|
||||
blfLh8kYYATg0Ed9nKy5anjy4aKnmVKCd5BO3ZjaHACgDj+FYs67UR4pR5srHWZs
|
||||
TXy0E2Mc9x2Wolnglc07/gpprwxaMM5zf8tPJN/mBc6D9h9POXoEOzqfyJumgvYV
|
||||
/Uu7DJyzrtXYZi0Edzv6+PnTtgeeTu3g74olY8Z7YBiKmuvPkZ9iIT9iIjj5iutQ
|
||||
NUvohhD+AjvaBJ8eu3kGwT1ckDc3gVwBD0yZfN2Jb5cFHIAFX8PV2CiPyCSdHsIm
|
||||
S5Y/CRdamq+8S7pVtQ2u97PXTS8CA0Y9Q9ngoiBh5RKHlwkNaWR82UrQYSG+EL9W
|
||||
WQ8CggEAONeHx+9BeeIpu6jXjs2GqGuLgYbPSwoAo3StO5O+3Q1EgORv6n29xasv
|
||||
s+/IJBqhKeYFSNhXFvsyaacOMRwY8+vpr8FgKrytSlc86OEjGPXss6Zl7RuLvk/8
|
||||
S3wm593Lx3GLIfVIX+S2naurxq4Td9oDeKukjD7sKtOy8DhmLbLC48t5P/FoThZH
|
||||
PUqyLJ5XDf+pSV31Z2LqTwYdgKOqqTTJFvZLUzYZDL5Yd2nHrfvwF6H70zzuee0t
|
||||
Hp7QFDD14ZSMv+QOjkwenqyj1O87JJpPKH7NLRaaEk+gI7yttwavbxJYjFUWOrRB
|
||||
F6gCgvoJLFw+v5SAX5kx3hxj+QYH5w==
|
||||
MIIEvAIBADANBgkqhkiG9w0BAQEFAASCBKYwggSiAgEAAoIBAQC5CNhN057KK14U
|
||||
DmwXKWUENOA8rTCXo5jCwd8w5nWdrMeObNJVWAcTCJ9w/JmXq+wxoziEmqOvkgFj
|
||||
YNnhBCnA2oz+5d7NkCh1NJx4uHekUwE8EYwqbz3SjcsH9w6hQL7TJ4IECNXAyLYu
|
||||
XS0oOFMYOKQRzdoL3KBUwcsoPq6f7/q/kWm8jvRaMfTIWO3p1B9h/qAmVQBHmwte
|
||||
xrT15R+xahkJ1WuPDLdGXMwOwsS1w2SoXcSUb5aZl+01ubxdyte9NpJgZlbnsSu1
|
||||
6beiln9BvQdG3BbkpBUl1ZR902FaPzDOCNRrHv2G1kMHHobH/3QFhnF3sS9z6UoY
|
||||
b3Kz4L9zAgMBAAECggEAYXLCEELaRtkOI/vwe0ZGuC9sxSflBoeXRUK7W9KTsOW0
|
||||
91Jid6Xzm9eXuPafoVxe+Ulf055phGadqovwU2xQ3Qt85CWF8LbyR7/chEuKXxWD
|
||||
+fG34wVRBtm8SsaXgt9JYIVXGO7aPDy4jbRuutbnFEOITQQfyM159wlVCC3y47Gm
|
||||
UD9zJxEjfuNzW9hC3G/y50179eIbCLZr1Ekyz9jpVyKRIo/0g5jYW+eP+kvqeWtf
|
||||
xRNKOhO/kMo1Azj+aEzlgok3zTRvDcxLIKpDIcUYxQBUtMsTS8oj2RIoGdn3g2hI
|
||||
B/3LuzpGyZ4GC1q+hq4LvJPEqdnXag+vQZNidzGVKQKBgQDsgFyDQNQ6kiOtWo8a
|
||||
ZFw1w1eOEbIxh2kTLQ+BK6Wqp9homMuMalFFlxK1ymfqV128aBMqGv4m7fGSamTr
|
||||
YuQKRjxNV+pcrzq+b0/a9gavSo+K+xvKPJdPcnzhzQrlHYaCDU/x96//0EvMJH4a
|
||||
O6N92JHzGknXeElgBRt04IryNQKBgQDISjcfWoGWf4/Q9DIidqK8dni0YMs9GIWK
|
||||
fZWneUFtwOPUyqiFqPBQMOzqKKVBWim/iZJS/lAGZWeMZXrpwiEM2u6XuvOzpyMx
|
||||
b2oLsrGo69+SSV1PhTsfy0nqHkQUoBySiUnQ6rMtEHtKpacMYB6ekWpbPY74maT+
|
||||
IRqv3EegBwKBgEm5oh1QglhX4kNSLbYefxhLcunY3mHOQpU1wNWVk6lldXL13s5A
|
||||
/mDhPXXqSQamgZQQJZlEnOHp3xqzjA5M36XMS1QZCfgmr/tZm904QNE4PxMmQH2E
|
||||
8e5UVFXXo4PU9m9oUvmtKvJmLprO7XikCs1aS/UqxsYiu0zxiqOes/HZAoGAKefd
|
||||
QKIfGu31mTJ+itdnlSb0ekkGPPI0fpVoCnJETkJdaNg+Cjkejqit6RA4A0RqF7kg
|
||||
9FxhQfVVhPp2jRIg+B+WbAihlKUTp/Q3q9rncQBo9qKC83Bcvno6LNHITMhzMfWn
|
||||
jB6O/o7EPQoLuXsi7Opf5lcq4rjC3KjwfHsKQWECgYA0VoFmvoeRzW6XY6UXRlTT
|
||||
u1jAo9q/Dn7vrhP/cIuI+xhQTtb1h9BAMh/549XT83Cuy2rdKCsd8uVfI6Ur1qYc
|
||||
vXm8HpuuPkk23r8XoFw7Tk7sxQTwFSkF7782Kq3sJbklILFByMZLugIGAz4jBmf5
|
||||
LcrCdYVLaJl9dux87RvQRg==
|
||||
-----END PRIVATE KEY-----
|
||||
|
||||
@@ -1,20 +1,20 @@
|
||||
-----BEGIN CERTIFICATE-----
|
||||
MIIDMDCCAhgCCQC425NNs+WWZzANBgkqhkiG9w0BAQsFADBaMQswCQYDVQQGEwJG
|
||||
UjELMAkGA1UECAwCTFkxDTALBgNVBAcMBEx5b24xEzARBgNVBAoMCkNvbnRhaW5v
|
||||
dXMxGjAYBgNVBAMMESoud3d3LnNuaXRlc3QuY29tMB4XDTE4MTAyMjE0MjcxNFoX
|
||||
DTI4MTAxOTE0MjcxNFowWjELMAkGA1UEBhMCRlIxCzAJBgNVBAgMAkxZMQ0wCwYD
|
||||
VQQHDARMeW9uMRMwEQYDVQQKDApDb250YWlub3VzMRowGAYDVQQDDBEqLnd3dy5z
|
||||
bml0ZXN0LmNvbTCCASIwDQYJKoZIhvcNAQEBBQADggEPADCCAQoCggEBAMwFbc/I
|
||||
gpOVNoefnIQrAy2wqK3VKSjFT5z5E8MVrHSU9PpC8bGQb0hTULmfHSzRTsajRLjv
|
||||
rLM/EZDrJL+PQHcCG+XVYbqMmVis4qsevuOyFdFdfe66LIsV+zmsSUbMyssGS2Qw
|
||||
AZx2D8RDtY35VcSA845gjQH+KfF1ST4s/73sr8ID5ZEEn4J6fbmrVfbxhygsx036
|
||||
VNw8OKby+7Gx3irz1ZC6JZ6jmzqlsu4EuDY1cjHCZSUD/JQ1jHz3gIRLV9OiglN/
|
||||
PAPu8zZZ/vtalEGytpLUcbjmvNg24Yc94vd3W3r4Ne13FhDLnB3w8Gz4pYZsEgkk
|
||||
18LzttWcqHnNwg8CAwEAATANBgkqhkiG9w0BAQsFAAOCAQEAW1XJBk7oCGkzF4nR
|
||||
0l2cEpG2QkHAUuXRa4PqH9QALUj2taAZHGiFF0UsknjbCnTsX6rzSLy1NFiJxyuO
|
||||
CmaiZ9Y9mcYw+T+SXo862Yu1Jch48LoD5x1vW/F8ZT+Fnl+gXoh7ssAtjQ4YViWy
|
||||
Z3A1y54Mb6JhuVjfOBuzbGwI9DDAetKZgTVY7SCm7MTrF5z/YMly5rixV5th1XCj
|
||||
4bqZ9p4CZyP++Y4RffKuCf35cyD/9Y7Boq5A3E8LoxMRFzszyn9RhKdkKLOevGgc
|
||||
r4H/w92uaQqQGRTxQfNWfphBdNuc+ZgXYIGiexcpqxJfA0Ei7XSsKVxxXNxLoJe5
|
||||
3xs+Lg==
|
||||
MIIDNzCCAh+gAwIBAgIUT3bjHgL4cSBhQYJD3ZLnzG+nM98wDQYJKoZIhvcNAQEL
|
||||
BQAwHDEaMBgGA1UEAwwRKi53d3cuc25pdGVzdC5jb20wHhcNMjEwMjI0MTQ0NTE1
|
||||
WhcNMzEwMjIyMTQ0NTE1WjAcMRowGAYDVQQDDBEqLnd3dy5zbml0ZXN0LmNvbTCC
|
||||
ASIwDQYJKoZIhvcNAQEBBQADggEPADCCAQoCggEBALZEYWm29ug58BT3n+8bXQx0
|
||||
P4//Qp9n6nJsM0hfuACOz9Uwo4U7YalDADmeO1U37GLa+0yYMrk+Ihw9c5dIY+k/
|
||||
3U/yfeK8HOgJlpBBEG9trvuMPjCowtY9Nf27hX3p+BVj2y7iD9ePRPAldgVWq0u1
|
||||
XBVHarpMe7w39gwksDOUqqFMmP8HXmduPMel+U1W3PM1VsaS/USNuf88cpzhb1NL
|
||||
vLIwedAJSEg/23DG2B1jXWJBphdfLvVxbwzTgBxkumBoC7JrnbkxX4AuBbjeeV25
|
||||
vhhlSAZMmIc4lRsliJhkdZAU6dMgV0cmdvk8MH4x2vLZQ1TA1NpAMH51ZH0MmZMC
|
||||
AwEAAaNxMG8wHQYDVR0OBBYEFLroO1zbuMKx/mnAzvH8hboQBCxVMB8GA1UdIwQY
|
||||
MBaAFLroO1zbuMKx/mnAzvH8hboQBCxVMA8GA1UdEwEB/wQFMAMBAf8wHAYDVR0R
|
||||
BBUwE4IRKi53d3cuc25pdGVzdC5jb20wDQYJKoZIhvcNAQELBQADggEBAJWG90TJ
|
||||
vrSWV1jXDnb2sxSeBqDdB1XUYgk236EMghwLFblUGPNzli0iAkmaQEHE7Hbrmtx3
|
||||
4IHq4aIFToZ8sccUez5SCWdG0B+Rkhz4q8p8/HDxZ6a/d3EVhxtsakgJC8Mb6SpC
|
||||
TJBnVU9eNf0/D0LLMFqOrwvZW4JkLv4z1i+4VtCUtpzsba6YmHNXqY1rcFfuLnEk
|
||||
WlsqCX+jF2koiXJFUWEkbmLKf2tR2ri5ssZewDxnQxEIXNamMqUFhFY3g0vWKgVR
|
||||
xgybtnR4VCc2X7bfZbqQDLqt6D5X5Xr6wWNP99NwN8RMl2UhtAbKG9WSPH/byXF0
|
||||
4f7iRivBAkSqbNs=
|
||||
-----END CERTIFICATE-----
|
||||
|
||||
@@ -1,28 +1,28 @@
|
||||
-----BEGIN PRIVATE KEY-----
|
||||
MIIEvAIBADANBgkqhkiG9w0BAQEFAASCBKYwggSiAgEAAoIBAQDMBW3PyIKTlTaH
|
||||
n5yEKwMtsKit1SkoxU+c+RPDFax0lPT6QvGxkG9IU1C5nx0s0U7Go0S476yzPxGQ
|
||||
6yS/j0B3Ahvl1WG6jJlYrOKrHr7jshXRXX3uuiyLFfs5rElGzMrLBktkMAGcdg/E
|
||||
Q7WN+VXEgPOOYI0B/inxdUk+LP+97K/CA+WRBJ+Cen25q1X28YcoLMdN+lTcPDim
|
||||
8vuxsd4q89WQuiWeo5s6pbLuBLg2NXIxwmUlA/yUNYx894CES1fTooJTfzwD7vM2
|
||||
Wf77WpRBsraS1HG45rzYNuGHPeL3d1t6+DXtdxYQy5wd8PBs+KWGbBIJJNfC87bV
|
||||
nKh5zcIPAgMBAAECggEAW6lEwMmRAMVVDnHDXA4HC4wG/LJ8H3kmX5v4KPmf1XDm
|
||||
71kMRX5iwNfNuNenv+75uXy4722e5Zk8RyOeCwJNMCqeZhAMLEfmzVQ/MipKEPp9
|
||||
muaqIYs7X/GsQSkKcuinY7ecP5Lh5m2Uf9T7yKFwyyw0QI9YSsDqDzVmhqyo6aaT
|
||||
ob4Bua9mTOTMCjEaIk06SkS0Z5sCqtvKMMx/fI2XYSmxQvbwYPHInpyu2LQAvKTw
|
||||
wpwDLF4Zetw1Tutbk8TSTaoC2rn6ZH5DYdJ9pk55/+UqVPo8tu/M//8JN0t9GY1/
|
||||
aqJ25juHjj0pfp+0830NOs4n6symBcR4bSbDn7r/4QKBgQDnTOdo09jtzJimGlbH
|
||||
zEqYOi0NrWU/mLkpqbczjKqx8BnTyfF3FudhY7Gp2v1WX/ofjYS/P/2nY6sXKvig
|
||||
9htqLRCe0Tk9vavY3eSEyaHu9Tbeixx7lM4pQfHCASreMp37RyhIisSPkzdCChNb
|
||||
OuqYpTW4C2u9schMlmCVaWYtTQKBgQDhzsoIlWAAD//h2xqCGpcar0SzgPCHdUH6
|
||||
4ejVhmWPfy5Jlk1CwStlsO4BlcTW7ahN81GqIlyiqpi3O2JZ4HfdoZgKNdMK6YD5
|
||||
TkmXnABa42RrQtYHltvJCthctmjP7qoRxvDrDKLBY481AZjC1MNgPlpSrfALMibx
|
||||
wyd6rjQuywKBgH+nuAfo8866nnz+CGsY2wqNARSNYFXrKjZOTqgKuKKgCwEScUvy
|
||||
vhzH8uP10t/69Ia5ikwrOwlJPsH4m2PqsFK3MHcWrerfZZq5TEflKJRDjdbhHAUw
|
||||
qV+n34/dKRWdBggKy7bNr5I2A8dU3D37lEJO3AkJdJsrJYrva7rKgvP5AoGAXNer
|
||||
VfAk8qGhcfmmYowQSNZ7htqjCu75W+/6zaBerat7GqKDzcii0UL3+QrdTgmVQ8eh
|
||||
cjSCphdCh0QRYiba4fOJEdmjlj7/2oGH3KA1vSj1puxqF+C9KWIeJ7CQU74rivej
|
||||
IuGlIaKPxRmM976HPlEkzg3aPqA2Rv0YhGaP6hUCgYA3hEG6daHOj6/P+rR28wTp
|
||||
xyraym7/8BOVWLweUFVM7YKOKrLAa7lhd254Twy0wUvTgiIw/XamhiVmdSh80gI9
|
||||
hooqYern7WGoL9zU2spVaEe2AzhSRvTuLqlRRyLLnPC6uaGVeC+SYD7zIDB2cwyC
|
||||
bbvXmg15uPp02YpLtm8wyw==
|
||||
MIIEvgIBADANBgkqhkiG9w0BAQEFAASCBKgwggSkAgEAAoIBAQC2RGFptvboOfAU
|
||||
95/vG10MdD+P/0KfZ+pybDNIX7gAjs/VMKOFO2GpQwA5njtVN+xi2vtMmDK5PiIc
|
||||
PXOXSGPpP91P8n3ivBzoCZaQQRBvba77jD4wqMLWPTX9u4V96fgVY9su4g/Xj0Tw
|
||||
JXYFVqtLtVwVR2q6THu8N/YMJLAzlKqhTJj/B15nbjzHpflNVtzzNVbGkv1Ejbn/
|
||||
PHKc4W9TS7yyMHnQCUhIP9twxtgdY11iQaYXXy71cW8M04AcZLpgaAuya525MV+A
|
||||
LgW43nldub4YZUgGTJiHOJUbJYiYZHWQFOnTIFdHJnb5PDB+Mdry2UNUwNTaQDB+
|
||||
dWR9DJmTAgMBAAECggEADyVjiutfXTDjWHqzWaVGeDB4z7iAisqR2uxO3qbFnZZA
|
||||
gxDFpd6nYO9mZ2NUvuIkzJox482J+YbGQt6AaoPjeZCLDYezGlJTfJy/XEFr4jaV
|
||||
IeYnjNeZSUBQ6kLY9vuoV+6yWhUDCACp0+LpCcyH84SgoEwM/Go6pgtocN3jnoWK
|
||||
CVWWb5OX87w/y4BkZlaGvEdHYE16w/FXoBWB8x1n8zl/390pUt/ImnkIVQQMkwtL
|
||||
ro9AQH1Ho9rh5EGLyDXLJekLp+nZiJC3qGBrSiQhp671YHB7bDP4CpJK9UlFyJZz
|
||||
BwloxyhvbjTNuJKHK6UTjtUuOQvxlbt6JGKg/LUyoQKBgQDvXEHBgbnZm0MGKEH2
|
||||
jBWub6DKHVTHKBIV/BmzdCi6eQnDYkmwWCBMRslxPF0BZDbPbZc39XgIP0mfwvBt
|
||||
RuOlI84eTM19gI9g7KNmv66e4Siv7zbgYp4MuSqJpU9LtYpUxmG8U//XpBVf8fVh
|
||||
Sr49jCVJQu1e2hzQ8zSocZEc2QKBgQDC8BJHl+hJsb3Nnulr1AONVhOsLOqVL8Ay
|
||||
eGlxkXu8QAG17xzKB+MuGmSz0L2f3J+X7ZIi7vgpYnViFbxWiRdA34SzWGKMCSTZ
|
||||
9BcX8Wk2KK1lz2eSYi7FCLQwhYa+6fx2FSwusS3qN/lOzttE/gpDPv6TBQhEYFvJ
|
||||
mSF1GHKWSwKBgQCd5zyxzUSvm09x5mjtnqhG9PkbnJHq+DJGMpM54W1k5qhcLqDy
|
||||
JpDYAPKvMuUgH95QpLlXfyX6OtX1AQLe9B8FOEOnlX8SRhdQF4wMInOt+FeBCMwo
|
||||
5qXhOt4nA9j11wM0vdKIDNoAhgxbySDZWBd8PtHyLKavJjBQX8DjRlg2sQKBgD2z
|
||||
DKYaX4xLtJ6LSKg3X7RxZSKeyNTVW0jeMcmfWYGY5LG37+3GF1K1SeBcFlit1Nma
|
||||
eSGTb3pxzmNwK79b/r8qv+6f7/kQv9g4WMLcRwe8Uj2iKs77QoBco+Ojr6Vnm9+o
|
||||
Debtb3Htq3wVHvO0UH0+SAcylY5GzWZ+kEdmqIYHAoGBAN2cMo3xdqhjnNeGIqi6
|
||||
iX7yQaA6NR4Q1w7QIwlLCDKJxMDUtJ8oJWF77+a5GDKW9U2D3BclUlTOqTYqzGHY
|
||||
cQunblwEK1QBzYlWHAa5D1MLKJKhur7royGNnFTyHOV1pjl3mxlevliTgC8Oej/2
|
||||
WdUwgSFVJ5SyiDX0n3OqdRwK
|
||||
-----END PRIVATE KEY-----
|
||||
|
||||
@@ -1,29 +1,20 @@
|
||||
-----BEGIN CERTIFICATE-----
|
||||
MIIE5DCCAswCCQDXCA89wY62zzANBgkqhkiG9w0BAQsFADA0MQswCQYDVQQGEwJV
|
||||
UzELMAkGA1UECAwCQUwxGDAWBgNVBAMMD3d3dy5zbml0ZXN0LmNvbTAeFw0xODA2
|
||||
MTkyMDIyMTRaFw0xODA3MTkyMDIyMTRaMDQxCzAJBgNVBAYTAlVTMQswCQYDVQQI
|
||||
DAJBTDEYMBYGA1UEAwwPd3d3LnNuaXRlc3QuY29tMIICIjANBgkqhkiG9w0BAQEF
|
||||
AAOCAg8AMIICCgKCAgEA+vPeeTESpGmzGHvyR4kCdGlmJjA9x230ghFU2tdCMl1C
|
||||
aAR3uaZWxg9ldAiu54yvX3ViV/BMpNyQu6Knb293W5wcxidi8aHXcqACRLNtwwmn
|
||||
NMX48Su3OvnU7Dc/fi0mpQLyblxXloCyOG/gtNjzZXVwrn3weMCe/XsvxkpcAOJz
|
||||
7ZZrXCsrQ8pk5V0vMgryQ19zMc+uK3aAPQ+ePFjraWlVH2rOxtzRBGnVM864J9XR
|
||||
tL0ZOAD2gdu4CVIt4xiU24E7W8jfZ3CTePERKhSCBGnkO4roPmRiNwgnP0Wk5lrR
|
||||
kOQkhh4JF+GPMy4IDf6elCmEpnCT39+p36vRSP9sip1OctdfuVyCJMYgb1YCh4k2
|
||||
5CMR+MrkzxzrB2Spl46he5mGkVWXssr70F/gFrIeZPUweh7OBDHnS7twWfhhsElP
|
||||
QYOXpJBWjWJkUKANDqWxM+ObUA+Kjdgk5NEOvQs7yVxpGB8Z9yK+OIJ0k77QDazD
|
||||
VIWhjxjlwgpJW4KALn9xXkUKLhsn7P3hrEDkpTYnr0g22cgPjsgnAFfVVkcloeRi
|
||||
pSfFINIJUBFLGtU0GSyqPJ9aj8CpZZe798nyt6FpSq9AuA2DF0MoECjNbch6C2gi
|
||||
VUqNyuCVjUezw9VtKy3M16GYtnMSsNOY6tnkvfXeXmLrQlfsBs01a8DQBcmOK2MC
|
||||
AwEAATANBgkqhkiG9w0BAQsFAAOCAgEAkugSNyzc+Y6MBE4/Y+Bz5HrGtKIweuar
|
||||
7F70fBk9PgWpKIBJC8s+xJRgBXMFAy5HXZir1tNWvCeJhjCbBZRnpvKvDD61gBcM
|
||||
odde6BLc4r8cRT5l0rILA01cVwyr3C3TzRREThInqNLSsnf845jA9TB9YKN2P6QB
|
||||
TT4j3VMVRlR6OL9EaAUpIWHgKPfqXfbgPQ6rfPrQQGxvZbkL2g85IkpPH+DecN42
|
||||
PK53YZG6NW1+V2Z3agvc2/4qskqoVNdpe3JkafNicokDXTVd24MNtUemWzP3gq0i
|
||||
tv75zgcwLBVVOP43mVFo5e+xZgdS65ZrWyJVL2PG929gARJSEXjLHs9avRXlpXeE
|
||||
tBpCRC5gwvq2fnC7tVbKcbYyH3lr5u3nlfRlfsomoSACC6fw2cQKnp+us/+BsVyA
|
||||
ntqrGxqC/WbQ/LHtk/YJfwFSnuzEPGClKx/F7+EoDETZGAf526VkxLTKtDPmwh5N
|
||||
HFJpeczPE2IdxdaNdOnERUB5xeSDXnObTe3e8jIfpxF0rppGo5Dxw2tfhFscQGBM
|
||||
Cs6cT9gkfX71P81JjrFrbx0bWWDf8N5meNqKqcZNTI15+dDGKXfjr9YbgtI9HHYa
|
||||
Dhb+ondnii+KAcFchC7vCgDvG+bOuWxfM9N808bsBoPXvrKF+iWsOFeKmiV1B2OT
|
||||
w0ZLNJ3AW5o=
|
||||
MIIDMTCCAhmgAwIBAgIUVGf/44gRWl/1/uecYfeD9hn6HyIwDQYJKoZIhvcNAQEL
|
||||
BQAwGjEYMBYGA1UEAwwPd3d3LnNuaXRlc3QuY29tMB4XDTIxMDIyNDE0NDUxNVoX
|
||||
DTMxMDIyMjE0NDUxNVowGjEYMBYGA1UEAwwPd3d3LnNuaXRlc3QuY29tMIIBIjAN
|
||||
BgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEAqCq7NJ43TOTSef20KgVHWwh7m4Sk
|
||||
EzD3uL7V+Z1B+nzrywSDj6ApKQQMwo56zHJ36hnDyqVtvQW59sAIw4djdos9d0te
|
||||
MdGz/3EoU4o/dDVfGcjHWS2qLG6DHPI8kQppd1MBRnM3v4mmoJVIrzCEV+E0CU5W
|
||||
Y5qB8+wlK4F8Fp9wFeoAZ9n8AFGkVaAKOpkW8P87zy2CLP9OTTnfuz+ADpkZJ65k
|
||||
pYfvVoGmDmpKuIQifwDU2qungD1DuJ+2B1nRem0WIn8rMaazT3nRmji0xXNECQNj
|
||||
1Jle0GDy6yiFXa7RSGMigZ0UYjzvplD/Ck5mkB8PQ43+LBtT2N33I5utDwIDAQAB
|
||||
o28wbTAdBgNVHQ4EFgQUjXVu4thryQVm26x2uqcusSynRo0wHwYDVR0jBBgwFoAU
|
||||
jXVu4thryQVm26x2uqcusSynRo0wDwYDVR0TAQH/BAUwAwEB/zAaBgNVHREEEzAR
|
||||
gg93d3cuc25pdGVzdC5jb20wDQYJKoZIhvcNAQELBQADggEBAG0IWrWVYRgqOf52
|
||||
jyyJF/4n59COFAb7ftbpctOlv5sZLVAMOQ5utBSi1cpo5IppKphTt05rcII82e4z
|
||||
SoRKi2t+TqXgNTvSG/70UHhdZ5ebgPU62EpDUkn8rRhROdg/+pszFjbYLzk+Qhjv
|
||||
8e0fh5ZzIYqivbjO7qHLgmDJrmh92mAu5R9t3SULtJ8nCg3R5Q+QFujVgWOjYup5
|
||||
QPF9lVjAucc74AZl8mCFQrkVCtkCezSNwlYZLwwmIIFH8AHSIRXiu9AqW5OS5I8w
|
||||
KIl+HElibrvEJuy4iMVXDSFUZquU8EWHV45zaRDl5VCYIIX1u38ljYshZ0dIiTCz
|
||||
t2uj/Vo=
|
||||
-----END CERTIFICATE-----
|
||||
|
||||
@@ -1,52 +1,28 @@
|
||||
-----BEGIN PRIVATE KEY-----
|
||||
MIIJRAIBADANBgkqhkiG9w0BAQEFAASCCS4wggkqAgEAAoICAQD68955MRKkabMY
|
||||
e/JHiQJ0aWYmMD3HbfSCEVTa10IyXUJoBHe5plbGD2V0CK7njK9fdWJX8Eyk3JC7
|
||||
oqdvb3dbnBzGJ2LxoddyoAJEs23DCac0xfjxK7c6+dTsNz9+LSalAvJuXFeWgLI4
|
||||
b+C02PNldXCuffB4wJ79ey/GSlwA4nPtlmtcKytDymTlXS8yCvJDX3Mxz64rdoA9
|
||||
D548WOtpaVUfas7G3NEEadUzzrgn1dG0vRk4APaB27gJUi3jGJTbgTtbyN9ncJN4
|
||||
8REqFIIEaeQ7iug+ZGI3CCc/RaTmWtGQ5CSGHgkX4Y8zLggN/p6UKYSmcJPf36nf
|
||||
q9FI/2yKnU5y11+5XIIkxiBvVgKHiTbkIxH4yuTPHOsHZKmXjqF7mYaRVZeyyvvQ
|
||||
X+AWsh5k9TB6Hs4EMedLu3BZ+GGwSU9Bg5ekkFaNYmRQoA0OpbEz45tQD4qN2CTk
|
||||
0Q69CzvJXGkYHxn3Ir44gnSTvtANrMNUhaGPGOXCCklbgoAuf3FeRQouGyfs/eGs
|
||||
QOSlNievSDbZyA+OyCcAV9VWRyWh5GKlJ8Ug0glQEUsa1TQZLKo8n1qPwKlll7v3
|
||||
yfK3oWlKr0C4DYMXQygQKM1tyHoLaCJVSo3K4JWNR7PD1W0rLczXoZi2cxKw05jq
|
||||
2eS99d5eYutCV+wGzTVrwNAFyY4rYwIDAQABAoICAEJ7bMrKd1fbMLkhzPOqll3k
|
||||
tk0Tpqo4tPfoQ4SeVkklb7xCwr0KFh7uYUA2NK/fE27EmEMXxBZA4I707kqVSxeX
|
||||
6f+M26eL6pnRTgiJSGDNI+DVObgajrYvDXtuv4Fb0MsSVstp50JV4eEVsn/2obSV
|
||||
Qj7X2mcDEJuykNuFQ45wb6nXmaWXQiT5b3VcFG67e6bhmJDvpgKZqCuFAbSXEfah
|
||||
Ew35q8H/KdhzeSn6b8sN2Dp7hjzR9Hw+iyjc/o8VKgpk2CbetmCe8FKv+o4dVLx6
|
||||
mR41FIXC7koJ/OvENYVZNf+ekRZ+yoXrGZbDcRrUA4rY3O2DEYnTpRs+V3lxQX2J
|
||||
FX/UPt/2Z5Mwaj4DX8llslO2qNvgV0WnFzm7HjXulfYVaYqrz0npWZyLWVETIov+
|
||||
56V45dAXOGpTeORmgRMaasNHOTFjwyf4ffi+DAr7xf944rZLL4eIF0fjD9yEOrn9
|
||||
3hKADWa5MYP1bBf+pTY5PTYFaoavBQ0vATNCyqI3QvuETIF0MeGY/Ui8u3fnI+PT
|
||||
IcvWKx86z7TMMvhhq5Ym5uK9W6HrLEs8CdusJ7vFX7VXjS8FQ3LEycFbmT+D/Xvt
|
||||
cfMDQwjM1FZCiy07G+wZxe/cSvx529QXy0yDsorpkwduAj6IjiCTYzG+GjjX2NKB
|
||||
JdPuOcp24BeiJasyQEVBAoIBAQD/4x0j2rQxFL+CpEP165GtLKxGgSA84jFZ1Scd
|
||||
aYHkIelveGzPZEFpTGND6HURls+anXFYsjELK36nWpmVSK9R5LUeVAVqG/5rYe0G
|
||||
XcZ1XkUqEqBdq4cgl+1aumO7q97iJbYBDhjygwh4y/iQyGSnUQXATCoU1kDvKWIj
|
||||
GfAMbItqiI29F8DDjWCB9mIHRNickXtyA7XeBZU+7Jr+pIbVv95TaNr0FoeLJCyy
|
||||
YYA9kYQHtftkHGELU7yL8o3atz/YrRKWmFmTBNUsRu2/8PxC3B3fA+o2zu2VWEdo
|
||||
sAtinLtFZiyej8Sc7JV2WlO+k7URdqRGWSA7H0GbYWVSXaZzAoIBAQD7EDK68M1M
|
||||
G9VD/qeuF8ZUa7/S8Zf77kxG1RrH4p3HVz+pTwxaLGYi97yKUd0SVf6cT+kBZLz9
|
||||
Q31mIwYUg/BuJMfCLeD0y8UGULmXjMOL/jC1qwY/oXh+asnWLLiPp4iuV1AY6qau
|
||||
FvUS8nT60Wp3jOsWIJO79lEvM6PLL44hyVxnb++vMvlBv2gOQUQ8Xa2qLVTIL83b
|
||||
XzR72bZ3inTgJRFCBvC/c/Evdzwi1Nb2xYUkzWKEcYhsQXNIOYETraZLTHshA0aF
|
||||
r2iI9q6m/vh49yj3e/J2Znz2oo04HRMchXf4JMnIDplEJ0JoaDFjacSRLYgJ/8Q0
|
||||
kQppaomVMDtRAoIBAQC6sTIGgb9r+85J+50V5DwR1AERI46ovQLynsB+BgddsZxF
|
||||
1t/UZDoRIElgN06KebSYAvy6kK+VjbNHWKOrNi+rmSjHqteUdj4mjHjJZ0uvQAtI
|
||||
SfS0wrvA/PeQdWLkft4LsyXaGTX8YbuhnneI8pv1Mvj2NtuQ/ky78T6Hi5oHBn6l
|
||||
SGHZL2ZVhmV+DIuy7/j2KnKdWbWr+fjMwwXGebViaC1GP79XzMQxsT/nGZnd0bg5
|
||||
g/2ZKddn0z1CAcKba41qgcOJGjhoOmNpfYpiuujhwwUMPCf6uvi+OH1JFQAJf35m
|
||||
gMhXG1+AemAFzJtC9TNrPVtXdBk+6WwNeH7bHDafAoIBAQC6sXrn5HTlabUXEODj
|
||||
5q4GzPEiDaF1J+j0qzd0+CFXwJuIbU3EKEvzKMG9Ic8A+Y2R8yJTdPPMaUlwkA7P
|
||||
ZqV9YkBhNviXUIe8gH7iITywd18FWJ4W5x3Q89wPNcYwnOZYrnjTbnpv7oZjhoRS
|
||||
lzNSnymZlLQHC82nCgF88GoC2deq22QipgcQSyM3pnT1ZrvjVj47dsDfplZC2syC
|
||||
7CSpISdKMBsKY08wervvMtJ/QrYVfd0Km9pUlf8B8DD5zyFf0QmmrObeNmfHoZiS
|
||||
efuPCEwgbL0KKoA2bv4Qgh5aES37CnA6IhD6yy7osMI5KMeRJYiJ1vWyGUDizuRs
|
||||
WidhAoIBAQDXoWKEK5UigmP2QCmY/8aDan3AvZhuZ7iVgZESPXHlDYzzTKmXf0Vi
|
||||
y3KL9ox1uEWOnm+j4mmhwrLObIASR7G8soOKe+zT8HfxHBW//XHYJFrufZfAGT6b
|
||||
SusgLPaFl1LoaKDLKW4qfrai0hrW1QyfJCYZi3nK7SqxYrG/KKJgtxo0TDSr/0KR
|
||||
blAUDTF9tmRoajZqcS9uFys8fxXJfNcqfKlOEjeVEC2hzK3Gqi905OAHSO7lWALs
|
||||
L3R4pskqRFnlLEhy0VcDMV5t/vCxqqBiKwSREorEwnCkEupQDJ+FybCOZbbLx8ed
|
||||
3zJ/pivaO6YjG22SZ5fXH5BkKWnnwGa9
|
||||
MIIEvgIBADANBgkqhkiG9w0BAQEFAASCBKgwggSkAgEAAoIBAQCoKrs0njdM5NJ5
|
||||
/bQqBUdbCHubhKQTMPe4vtX5nUH6fOvLBIOPoCkpBAzCjnrMcnfqGcPKpW29Bbn2
|
||||
wAjDh2N2iz13S14x0bP/cShTij90NV8ZyMdZLaosboMc8jyRCml3UwFGcze/iaag
|
||||
lUivMIRX4TQJTlZjmoHz7CUrgXwWn3AV6gBn2fwAUaRVoAo6mRbw/zvPLYIs/05N
|
||||
Od+7P4AOmRknrmSlh+9WgaYOakq4hCJ/ANTaq6eAPUO4n7YHWdF6bRYifysxprNP
|
||||
edGaOLTFc0QJA2PUmV7QYPLrKIVdrtFIYyKBnRRiPO+mUP8KTmaQHw9Djf4sG1PY
|
||||
3fcjm60PAgMBAAECggEAETxX9ZultVbaj7BGD+x8lML40Ea2/lSNfzzn9257va2i
|
||||
CSvMRCM5NL/6gtJRmbS9v5umapWr+qykk7q46zlsI1FAGoL6A4cnuK2jYXSQ7lsu
|
||||
BRmD/QrmHymwfH4DYSY9CwLR5aOXeiUkk97ZFUOWvN8TE0UZK5pBaXuxj5T03+iK
|
||||
zp5t1Awozaa2mrB9dJKmZadDrTEbbTEYpFp0mmF8rGUTH+sOhbIJMmztb62cC9SC
|
||||
baL+9K0qSOkdkHxctueR3X/DJE808QWjOiznMh04Ulj6aX+SZxSh2BoMOuIPLgnf
|
||||
HTExNXN9SKlyeg7lQyc5SpkzWrQ4SbtDo55cM0uxgQKBgQDgK+pp6xDOsbmvda5g
|
||||
fna3J9UiMOIqV4j2YsgeWYkG0zGFO8PW4KzmmZ6uI+TOpwZA+kEb5gYCtwq3Icfz
|
||||
xmf/+mj86gdXGBslc0DZRcTBrUO0O/QSYghmgnB1I/p3l1fW7Mi9Rp8vbu6iUJ5G
|
||||
2lsoDj5WRi7sO29sHgK4T3RrTwKBgQDACy9m1DdAN8hjMcR01SuFsihVlvJnVYmi
|
||||
cjwOkLOmtFgpIZwGmCDvpHOvmBU3aVG8slCkC2U8R3x4BhhjuaKhP1DLRz4KcGtD
|
||||
eOWke3bKmP5n9J73jdCyTWjzle2HAZByByJ6oAAw8MM5pkqVp97zld41gY+WWAyL
|
||||
MqSs8rkyQQKBgQC3BqQJfobme2zjuO16k0ZSHA+ELu05LOcCY3yIawnZrK0N3tHy
|
||||
qtYAIQTRoTS7FNuiSbDHGnNl3BS/mLEsc52N/eGiPWFo1qGzXfpgavpONyzEBv8r
|
||||
0yaQq4ct3QYuhj6NmB39i5+Elsn3dGMF7lnmxlpXDk5ZSd2bJJWL7jNbOQKBgFl8
|
||||
XMgjluM7mwptxEdUlYUk8RqAzc9M0md5p3Z6AtxpKLNxz8xa0u4XyThHO4uobcfp
|
||||
px3pmXCvNtaOsIq3k4pjgHhYaBZhL/ouuYMdbMsW42T5XaGo2VhcLc1jTV0dY3te
|
||||
y7gsGiqz5aRVUmfN41vJt9Ni3c+Gk8HBBW75Y0aBAoGBALj3ROmCTxJJEtS/nuFD
|
||||
xe8BzzBpVXNLOwLZyX/a9nTsb9t/4yz4r2RID9zo3n9N4RAULLKWPpMtBTqo+k/R
|
||||
c5s/e+n7ko/SvAjvmFwIGYlSyho+cvVYu5Mw4UAfTOfOOzxReOUBMSU7E0s8j3nj
|
||||
W2PXy5C1DKBkfxEGRMDo0Vhb
|
||||
-----END PRIVATE KEY-----
|
||||
|
||||
@@ -66,7 +66,7 @@ func (s *HTTPSSuite) TestWithSNIConfigRoute(c *check.C) {
|
||||
defer cmd.Process.Kill()
|
||||
|
||||
// wait for Traefik
|
||||
err = try.GetRequest("http://127.0.0.1:8080/api/providers", 1*time.Second, try.BodyContains("Host:snitest.org"))
|
||||
err = try.GetRequest("http://127.0.0.1:8080/api/providers", 5*time.Second, try.BodyContains("Host:snitest.org"))
|
||||
c.Assert(err, checker.IsNil)
|
||||
|
||||
backend1 := startTestServer("9010", http.StatusNoContent)
|
||||
@@ -74,9 +74,9 @@ func (s *HTTPSSuite) TestWithSNIConfigRoute(c *check.C) {
|
||||
defer backend1.Close()
|
||||
defer backend2.Close()
|
||||
|
||||
err = try.GetRequest(backend1.URL, 1*time.Second, try.StatusCodeIs(http.StatusNoContent))
|
||||
err = try.GetRequest(backend1.URL, 5*time.Second, try.StatusCodeIs(http.StatusNoContent))
|
||||
c.Assert(err, checker.IsNil)
|
||||
err = try.GetRequest(backend2.URL, 1*time.Second, try.StatusCodeIs(http.StatusResetContent))
|
||||
err = try.GetRequest(backend2.URL, 5*time.Second, try.StatusCodeIs(http.StatusResetContent))
|
||||
c.Assert(err, checker.IsNil)
|
||||
|
||||
tr1 := &http.Transport{
|
||||
@@ -497,10 +497,10 @@ func (s *HTTPSSuite) TestWithRootCAsContentForHTTPSOnBackend(c *check.C) {
|
||||
defer cmd.Process.Kill()
|
||||
|
||||
// wait for Traefik
|
||||
err = try.GetRequest("http://127.0.0.1:8080/api/providers", 1*time.Second, try.BodyContains(backend.URL))
|
||||
err = try.GetRequest("http://127.0.0.1:8080/api/providers", 5*time.Second, try.BodyContains(backend.URL))
|
||||
c.Assert(err, checker.IsNil)
|
||||
|
||||
err = try.GetRequest("http://127.0.0.1:8081/ping", 1*time.Second, try.StatusCodeIs(http.StatusOK))
|
||||
err = try.GetRequest("http://127.0.0.1:8081/ping", 5*time.Second, try.StatusCodeIs(http.StatusOK))
|
||||
c.Assert(err, checker.IsNil)
|
||||
}
|
||||
|
||||
@@ -519,10 +519,10 @@ func (s *HTTPSSuite) TestWithRootCAsFileForHTTPSOnBackend(c *check.C) {
|
||||
defer cmd.Process.Kill()
|
||||
|
||||
// wait for Traefik
|
||||
err = try.GetRequest("http://127.0.0.1:8080/api/providers", 1*time.Second, try.BodyContains(backend.URL))
|
||||
err = try.GetRequest("http://127.0.0.1:8080/api/providers", 5*time.Second, try.BodyContains(backend.URL))
|
||||
c.Assert(err, checker.IsNil)
|
||||
|
||||
err = try.GetRequest("http://127.0.0.1:8081/ping", 1*time.Second, try.StatusCodeIs(http.StatusOK))
|
||||
err = try.GetRequest("http://127.0.0.1:8081/ping", 5*time.Second, try.StatusCodeIs(http.StatusOK))
|
||||
c.Assert(err, checker.IsNil)
|
||||
}
|
||||
|
||||
@@ -577,7 +577,7 @@ func (s *HTTPSSuite) TestWithSNIDynamicConfigRouteWithNoChange(c *check.C) {
|
||||
}
|
||||
|
||||
// wait for Traefik
|
||||
err = try.GetRequest("http://127.0.0.1:8080/api/providers", 1*time.Second, try.BodyContains("Host:"+tr1.TLSClientConfig.ServerName))
|
||||
err = try.GetRequest("http://127.0.0.1:8080/api/providers", 5*time.Second, try.BodyContains("Host:"+tr1.TLSClientConfig.ServerName))
|
||||
c.Assert(err, checker.IsNil)
|
||||
|
||||
backend1 := startTestServer("9010", http.StatusNoContent)
|
||||
@@ -646,7 +646,7 @@ func (s *HTTPSSuite) TestWithSNIDynamicConfigRouteWithChange(c *check.C) {
|
||||
}
|
||||
|
||||
// wait for Traefik
|
||||
err = try.GetRequest("http://127.0.0.1:8080/api/providers", 1*time.Second, try.BodyContains("Host:"+tr2.TLSClientConfig.ServerName))
|
||||
err = try.GetRequest("http://127.0.0.1:8080/api/providers", 5*time.Second, try.BodyContains("Host:"+tr2.TLSClientConfig.ServerName))
|
||||
c.Assert(err, checker.IsNil)
|
||||
|
||||
backend1 := startTestServer("9010", http.StatusNoContent)
|
||||
@@ -709,7 +709,7 @@ func (s *HTTPSSuite) TestWithSNIDynamicConfigRouteWithTlsConfigurationDeletion(c
|
||||
}
|
||||
|
||||
// wait for Traefik
|
||||
err = try.GetRequest("http://127.0.0.1:8080/api/providers", 1*time.Second, try.BodyContains("Host:"+tr2.TLSClientConfig.ServerName))
|
||||
err = try.GetRequest("http://127.0.0.1:8080/api/providers", 5*time.Second, try.BodyContains("Host:"+tr2.TLSClientConfig.ServerName))
|
||||
c.Assert(err, checker.IsNil)
|
||||
|
||||
backend2 := startTestServer("9020", http.StatusResetContent)
|
||||
|
||||
@@ -1,6 +1,7 @@
|
||||
package metrics
|
||||
|
||||
import (
|
||||
"context"
|
||||
"time"
|
||||
|
||||
kitlog "github.com/go-kit/kit/log"
|
||||
@@ -72,7 +73,7 @@ func initDatadogClient(config *types.Datadog) *time.Ticker {
|
||||
report := time.NewTicker(pushInterval)
|
||||
|
||||
safe.Go(func() {
|
||||
datadogClient.SendLoop(report.C, "udp", address)
|
||||
datadogClient.SendLoop(context.Background(), report.C, "udp", address)
|
||||
})
|
||||
|
||||
return report
|
||||
|
||||
@@ -2,6 +2,7 @@ package metrics
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"context"
|
||||
"fmt"
|
||||
"net/url"
|
||||
"regexp"
|
||||
@@ -9,7 +10,7 @@ import (
|
||||
|
||||
kitlog "github.com/go-kit/kit/log"
|
||||
"github.com/go-kit/kit/metrics/influx"
|
||||
influxdb "github.com/influxdata/influxdb/client/v2"
|
||||
influxdb "github.com/influxdata/influxdb1-client/v2"
|
||||
"github.com/traefik/traefik/log"
|
||||
"github.com/traefik/traefik/safe"
|
||||
"github.com/traefik/traefik/types"
|
||||
@@ -118,7 +119,7 @@ func initInfluxDBTicker(config *types.InfluxDB) *time.Ticker {
|
||||
|
||||
safe.Go(func() {
|
||||
var buf bytes.Buffer
|
||||
influxDBClient.WriteLoop(report.C, &influxDBWriter{buf: buf, config: config})
|
||||
influxDBClient.WriteLoop(context.Background(), report.C, &influxDBWriter{buf: buf, config: config})
|
||||
})
|
||||
|
||||
return report
|
||||
|
||||
@@ -1,6 +1,7 @@
|
||||
package metrics
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"net/http"
|
||||
"sort"
|
||||
"strings"
|
||||
@@ -57,12 +58,16 @@ const (
|
||||
// doesn't exist anymore.
|
||||
var promState = newPrometheusState()
|
||||
|
||||
var promRegistry = stdprometheus.NewRegistry()
|
||||
|
||||
// PrometheusHandler exposes Prometheus routes.
|
||||
type PrometheusHandler struct{}
|
||||
|
||||
// AddRoutes adds Prometheus routes on a router.
|
||||
func (h PrometheusHandler) AddRoutes(router *mux.Router) {
|
||||
router.Methods(http.MethodGet).Path("/metrics").Handler(promhttp.Handler())
|
||||
router.Methods(http.MethodGet).Path("/metrics").Handler(
|
||||
promhttp.HandlerFor(promRegistry, promhttp.HandlerOpts{}),
|
||||
)
|
||||
}
|
||||
|
||||
// RegisterPrometheus registers all Prometheus metrics.
|
||||
@@ -70,6 +75,20 @@ func (h PrometheusHandler) AddRoutes(router *mux.Router) {
|
||||
func RegisterPrometheus(config *types.Prometheus) Registry {
|
||||
standardRegistry := initStandardRegistry(config)
|
||||
|
||||
if err := promRegistry.Register(stdprometheus.NewProcessCollector(stdprometheus.ProcessCollectorOpts{})); err != nil {
|
||||
var arErr stdprometheus.AlreadyRegisteredError
|
||||
if !errors.As(err, &arErr) {
|
||||
log.Warn("ProcessCollector is already registered")
|
||||
}
|
||||
}
|
||||
|
||||
if err := promRegistry.Register(stdprometheus.NewGoCollector()); err != nil {
|
||||
var arErr stdprometheus.AlreadyRegisteredError
|
||||
if !errors.As(err, &arErr) {
|
||||
log.Warn("GoCollector is already registered")
|
||||
}
|
||||
}
|
||||
|
||||
if !registerPromState() {
|
||||
return nil
|
||||
}
|
||||
@@ -173,14 +192,19 @@ func initStandardRegistry(config *types.Prometheus) Registry {
|
||||
}
|
||||
|
||||
func registerPromState() bool {
|
||||
if err := stdprometheus.Register(promState); err != nil {
|
||||
if _, ok := err.(stdprometheus.AlreadyRegisteredError); !ok {
|
||||
log.Errorf("Unable to register Traefik to Prometheus: %v", err)
|
||||
return false
|
||||
}
|
||||
log.Debug("Prometheus collector already registered.")
|
||||
err := promRegistry.Register(promState)
|
||||
if err == nil {
|
||||
return true
|
||||
}
|
||||
return true
|
||||
|
||||
var arErr stdprometheus.AlreadyRegisteredError
|
||||
if errors.As(err, &arErr) {
|
||||
log.Debug("Prometheus collector already registered.")
|
||||
return true
|
||||
}
|
||||
|
||||
log.Errorf("Unable to register Traefik to Prometheus: %v", err)
|
||||
return false
|
||||
}
|
||||
|
||||
// OnConfigurationUpdate receives the current configuration from Traefik.
|
||||
@@ -224,16 +248,6 @@ type prometheusState struct {
|
||||
state map[string]*collector
|
||||
}
|
||||
|
||||
// reset is a utility method for unit testing. It should be called after each
|
||||
// test run that changes promState internally in order to avoid dependencies
|
||||
// between unit tests.
|
||||
func (ps *prometheusState) reset() {
|
||||
ps.collectors = make(chan *collector)
|
||||
ps.describers = []func(ch chan<- *stdprometheus.Desc){}
|
||||
ps.dynamicConfig = newDynamicConfig()
|
||||
ps.state = make(map[string]*collector)
|
||||
}
|
||||
|
||||
func (ps *prometheusState) SetDynamicConfig(dynamicConfig *dynamicConfig) {
|
||||
ps.mtx.Lock()
|
||||
defer ps.mtx.Unlock()
|
||||
@@ -483,11 +497,15 @@ func (h *histogram) With(labelValues ...string) metrics.Histogram {
|
||||
|
||||
func (h *histogram) Observe(value float64) {
|
||||
labels := h.labelNamesValues.ToLabels()
|
||||
collector := h.hv.With(labels)
|
||||
collector.Observe(value)
|
||||
h.collectors <- newCollector(h.name, labels, collector, func() {
|
||||
h.hv.Delete(labels)
|
||||
})
|
||||
observer := h.hv.With(labels)
|
||||
observer.Observe(value)
|
||||
|
||||
// Do a type assertion to be sure that prometheus will be able to call the Collect method.
|
||||
if collector, ok := observer.(stdprometheus.Histogram); ok {
|
||||
h.collectors <- newCollector(h.name, labels, collector, func() {
|
||||
h.hv.Delete(labels)
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func (h *histogram) Describe(ch chan<- *stdprometheus.Desc) {
|
||||
|
||||
@@ -1,7 +1,6 @@
|
||||
package metrics
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"net/http"
|
||||
"strconv"
|
||||
"testing"
|
||||
@@ -10,13 +9,17 @@ import (
|
||||
"github.com/prometheus/client_golang/prometheus"
|
||||
dto "github.com/prometheus/client_model/go"
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/require"
|
||||
th "github.com/traefik/traefik/testhelpers"
|
||||
"github.com/traefik/traefik/types"
|
||||
)
|
||||
|
||||
func TestRegisterPromState(t *testing.T) {
|
||||
// Reset state of global promState.
|
||||
defer promState.reset()
|
||||
t.Cleanup(func() {
|
||||
promRegistry.Unregister(promState)
|
||||
promState.reset()
|
||||
})
|
||||
|
||||
testCases := []struct {
|
||||
desc string
|
||||
@@ -34,7 +37,7 @@ func TestRegisterPromState(t *testing.T) {
|
||||
{
|
||||
desc: "Register once with no promState init",
|
||||
prometheusSlice: []*types.Prometheus{{}},
|
||||
expectedNbRegistries: 0,
|
||||
expectedNbRegistries: 1,
|
||||
},
|
||||
{
|
||||
desc: "Register twice",
|
||||
@@ -45,7 +48,7 @@ func TestRegisterPromState(t *testing.T) {
|
||||
{
|
||||
desc: "Register twice with no promstate init",
|
||||
prometheusSlice: []*types.Prometheus{{}, {}},
|
||||
expectedNbRegistries: 0,
|
||||
expectedNbRegistries: 2,
|
||||
},
|
||||
{
|
||||
desc: "Register twice with unregister",
|
||||
@@ -58,41 +61,54 @@ func TestRegisterPromState(t *testing.T) {
|
||||
desc: "Register twice with unregister but no promstate init",
|
||||
prometheusSlice: []*types.Prometheus{{}, {}},
|
||||
unregisterPromState: true,
|
||||
expectedNbRegistries: 0,
|
||||
expectedNbRegistries: 2,
|
||||
},
|
||||
}
|
||||
|
||||
for _, test := range testCases {
|
||||
actualNbRegistries := 0
|
||||
for _, prom := range test.prometheusSlice {
|
||||
if test.initPromState {
|
||||
initStandardRegistry(prom)
|
||||
test := test
|
||||
t.Run(test.desc, func(t *testing.T) {
|
||||
actualNbRegistries := 0
|
||||
for _, prom := range test.prometheusSlice {
|
||||
if test.initPromState {
|
||||
initStandardRegistry(prom)
|
||||
}
|
||||
|
||||
if registerPromState() {
|
||||
actualNbRegistries++
|
||||
}
|
||||
|
||||
if test.unregisterPromState {
|
||||
promRegistry.Unregister(promState)
|
||||
}
|
||||
|
||||
promState.reset()
|
||||
}
|
||||
|
||||
promReg := registerPromState()
|
||||
if promReg != false {
|
||||
actualNbRegistries++
|
||||
}
|
||||
|
||||
if test.unregisterPromState {
|
||||
prometheus.Unregister(promState)
|
||||
}
|
||||
|
||||
promState.reset()
|
||||
}
|
||||
|
||||
prometheus.Unregister(promState)
|
||||
|
||||
assert.Equal(t, test.expectedNbRegistries, actualNbRegistries)
|
||||
promRegistry.Unregister(promState)
|
||||
assert.Equal(t, test.expectedNbRegistries, actualNbRegistries)
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
// reset is a utility method for unit testing. It should be called after each
|
||||
// test run that changes promState internally in order to avoid dependencies
|
||||
// between unit tests.
|
||||
func (ps *prometheusState) reset() {
|
||||
ps.collectors = make(chan *collector)
|
||||
ps.describers = []func(ch chan<- *prometheus.Desc){}
|
||||
ps.dynamicConfig = newDynamicConfig()
|
||||
ps.state = make(map[string]*collector)
|
||||
}
|
||||
|
||||
func TestPrometheus(t *testing.T) {
|
||||
promState = newPrometheusState()
|
||||
promRegistry = prometheus.NewRegistry()
|
||||
// Reset state of global promState.
|
||||
defer promState.reset()
|
||||
|
||||
prometheusRegistry := RegisterPrometheus(&types.Prometheus{})
|
||||
defer prometheus.Unregister(promState)
|
||||
defer promRegistry.Unregister(promState)
|
||||
|
||||
if !prometheusRegistry.IsEnabled() {
|
||||
t.Errorf("PrometheusRegistry should return true for IsEnabled()")
|
||||
@@ -139,9 +155,9 @@ func TestPrometheus(t *testing.T) {
|
||||
|
||||
delayForTrackingCompletion()
|
||||
|
||||
metricsFamilies := mustScrape()
|
||||
metricsFamilies := mustScrape(t)
|
||||
|
||||
tests := []struct {
|
||||
testCases := []struct {
|
||||
name string
|
||||
labels map[string]string
|
||||
assert func(*dto.MetricFamily)
|
||||
@@ -237,30 +253,37 @@ func TestPrometheus(t *testing.T) {
|
||||
},
|
||||
}
|
||||
|
||||
for _, test := range tests {
|
||||
family := findMetricFamily(test.name, metricsFamilies)
|
||||
if family == nil {
|
||||
t.Errorf("gathered metrics do not contain %q", test.name)
|
||||
continue
|
||||
}
|
||||
for _, label := range family.Metric[0].Label {
|
||||
val, ok := test.labels[*label.Name]
|
||||
if !ok {
|
||||
t.Errorf("%q metric contains unexpected label %q", test.name, *label.Name)
|
||||
} else if val != *label.Value {
|
||||
t.Errorf("label %q in metric %q has wrong value %q, expected %q", *label.Name, test.name, *label.Value, val)
|
||||
for _, test := range testCases {
|
||||
test := test
|
||||
t.Run(test.name, func(t *testing.T) {
|
||||
family := findMetricFamily(test.name, metricsFamilies)
|
||||
if family == nil {
|
||||
t.Errorf("gathered metrics do not contain %q", test.name)
|
||||
return
|
||||
}
|
||||
}
|
||||
test.assert(family)
|
||||
|
||||
for _, label := range family.Metric[0].Label {
|
||||
val, ok := test.labels[*label.Name]
|
||||
if !ok {
|
||||
t.Errorf("%q metric contains unexpected label %q", test.name, *label.Name)
|
||||
} else if val != *label.Value {
|
||||
t.Errorf("label %q in metric %q has wrong value %q, expected %q", *label.Name, test.name, *label.Value, val)
|
||||
}
|
||||
}
|
||||
|
||||
test.assert(family)
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestPrometheusMetricRemoval(t *testing.T) {
|
||||
promState = newPrometheusState()
|
||||
promRegistry = prometheus.NewRegistry()
|
||||
// Reset state of global promState.
|
||||
defer promState.reset()
|
||||
|
||||
prometheusRegistry := RegisterPrometheus(&types.Prometheus{})
|
||||
defer prometheus.Unregister(promState)
|
||||
defer promRegistry.Unregister(promState)
|
||||
|
||||
configurations := make(types.Configurations)
|
||||
configurations["providerName"] = th.BuildConfiguration(
|
||||
@@ -291,8 +314,8 @@ func TestPrometheusMetricRemoval(t *testing.T) {
|
||||
|
||||
delayForTrackingCompletion()
|
||||
|
||||
assertMetricsExist(t, mustScrape(), entrypointReqsTotalName, backendReqsTotalName, backendServerUpName)
|
||||
assertMetricsAbsent(t, mustScrape(), entrypointReqsTotalName, backendReqsTotalName, backendServerUpName)
|
||||
assertMetricsExist(t, mustScrape(t), entrypointReqsTotalName, backendReqsTotalName, backendServerUpName)
|
||||
assertMetricsAbsent(t, mustScrape(t), entrypointReqsTotalName, backendReqsTotalName, backendServerUpName)
|
||||
|
||||
// To verify that metrics belonging to active configurations are not removed
|
||||
// here the counter examples.
|
||||
@@ -303,8 +326,8 @@ func TestPrometheusMetricRemoval(t *testing.T) {
|
||||
|
||||
delayForTrackingCompletion()
|
||||
|
||||
assertMetricsExist(t, mustScrape(), entrypointReqsTotalName)
|
||||
assertMetricsExist(t, mustScrape(), entrypointReqsTotalName)
|
||||
assertMetricsExist(t, mustScrape(t), entrypointReqsTotalName)
|
||||
assertMetricsExist(t, mustScrape(t), entrypointReqsTotalName)
|
||||
}
|
||||
|
||||
func TestPrometheusRemovedMetricsReset(t *testing.T) {
|
||||
@@ -312,7 +335,7 @@ func TestPrometheusRemovedMetricsReset(t *testing.T) {
|
||||
defer promState.reset()
|
||||
|
||||
prometheusRegistry := RegisterPrometheus(&types.Prometheus{})
|
||||
defer prometheus.Unregister(promState)
|
||||
defer promRegistry.Unregister(promState)
|
||||
|
||||
labelNamesValues := []string{
|
||||
"backend", "backend",
|
||||
@@ -327,12 +350,12 @@ func TestPrometheusRemovedMetricsReset(t *testing.T) {
|
||||
|
||||
delayForTrackingCompletion()
|
||||
|
||||
metricsFamilies := mustScrape()
|
||||
metricsFamilies := mustScrape(t)
|
||||
assertCounterValue(t, 3, findMetricFamily(backendReqsTotalName, metricsFamilies), labelNamesValues...)
|
||||
|
||||
// There is no dynamic configuration and so this metric will be deleted
|
||||
// after the first scrape.
|
||||
assertMetricsAbsent(t, mustScrape(), backendReqsTotalName)
|
||||
assertMetricsAbsent(t, mustScrape(t), backendReqsTotalName)
|
||||
|
||||
prometheusRegistry.
|
||||
BackendReqsCounter().
|
||||
@@ -341,7 +364,7 @@ func TestPrometheusRemovedMetricsReset(t *testing.T) {
|
||||
|
||||
delayForTrackingCompletion()
|
||||
|
||||
metricsFamilies = mustScrape()
|
||||
metricsFamilies = mustScrape(t)
|
||||
assertCounterValue(t, 1, findMetricFamily(backendReqsTotalName, metricsFamilies), labelNamesValues...)
|
||||
}
|
||||
|
||||
@@ -355,11 +378,12 @@ func delayForTrackingCompletion() {
|
||||
time.Sleep(250 * time.Millisecond)
|
||||
}
|
||||
|
||||
func mustScrape() []*dto.MetricFamily {
|
||||
families, err := prometheus.DefaultGatherer.Gather()
|
||||
if err != nil {
|
||||
panic(fmt.Sprintf("could not gather metrics families: %s", err))
|
||||
}
|
||||
func mustScrape(t *testing.T) []*dto.MetricFamily {
|
||||
t.Helper()
|
||||
|
||||
families, err := promRegistry.Gather()
|
||||
require.NoError(t, err, "could not gather metrics families")
|
||||
|
||||
return families
|
||||
}
|
||||
|
||||
@@ -445,6 +469,8 @@ func assertCounterValue(t *testing.T, want float64, family *dto.MetricFamily, la
|
||||
}
|
||||
|
||||
func buildCounterAssert(t *testing.T, metricName string, expectedValue int) func(family *dto.MetricFamily) {
|
||||
t.Helper()
|
||||
|
||||
return func(family *dto.MetricFamily) {
|
||||
if cv := int(family.Metric[0].Counter.GetValue()); cv != expectedValue {
|
||||
t.Errorf("metric %s has value %d, want %d", metricName, cv, expectedValue)
|
||||
@@ -453,6 +479,8 @@ func buildCounterAssert(t *testing.T, metricName string, expectedValue int) func
|
||||
}
|
||||
|
||||
func buildGreaterThanCounterAssert(t *testing.T, metricName string, expectedMinValue int) func(family *dto.MetricFamily) {
|
||||
t.Helper()
|
||||
|
||||
return func(family *dto.MetricFamily) {
|
||||
if cv := int(family.Metric[0].Counter.GetValue()); cv < expectedMinValue {
|
||||
t.Errorf("metric %s has value %d, want at least %d", metricName, cv, expectedMinValue)
|
||||
@@ -461,6 +489,8 @@ func buildGreaterThanCounterAssert(t *testing.T, metricName string, expectedMinV
|
||||
}
|
||||
|
||||
func buildHistogramAssert(t *testing.T, metricName string, expectedSampleCount int) func(family *dto.MetricFamily) {
|
||||
t.Helper()
|
||||
|
||||
return func(family *dto.MetricFamily) {
|
||||
if sc := int(family.Metric[0].Histogram.GetSampleCount()); sc != expectedSampleCount {
|
||||
t.Errorf("metric %s has sample count value %d, want %d", metricName, sc, expectedSampleCount)
|
||||
@@ -469,6 +499,8 @@ func buildHistogramAssert(t *testing.T, metricName string, expectedSampleCount i
|
||||
}
|
||||
|
||||
func buildGaugeAssert(t *testing.T, metricName string, expectedValue int) func(family *dto.MetricFamily) {
|
||||
t.Helper()
|
||||
|
||||
return func(family *dto.MetricFamily) {
|
||||
if gv := int(family.Metric[0].Gauge.GetValue()); gv != expectedValue {
|
||||
t.Errorf("metric %s has value %d, want %d", metricName, gv, expectedValue)
|
||||
@@ -477,6 +509,8 @@ func buildGaugeAssert(t *testing.T, metricName string, expectedValue int) func(f
|
||||
}
|
||||
|
||||
func buildTimestampAssert(t *testing.T, metricName string) func(family *dto.MetricFamily) {
|
||||
t.Helper()
|
||||
|
||||
return func(family *dto.MetricFamily) {
|
||||
if ts := time.Unix(int64(family.Metric[0].Gauge.GetValue()), 0); time.Since(ts) > time.Minute {
|
||||
t.Errorf("metric %s has wrong timestamp %v", metricName, ts)
|
||||
|
||||
@@ -1,6 +1,7 @@
|
||||
package metrics
|
||||
|
||||
import (
|
||||
"context"
|
||||
"time"
|
||||
|
||||
kitlog "github.com/go-kit/kit/log"
|
||||
@@ -70,7 +71,7 @@ func initStatsdTicker(config *types.Statsd) *time.Ticker {
|
||||
report := time.NewTicker(pushInterval)
|
||||
|
||||
safe.Go(func() {
|
||||
statsdClient.SendLoop(report.C, "udp", address)
|
||||
statsdClient.SendLoop(context.Background(), report.C, "udp", address)
|
||||
})
|
||||
|
||||
return report
|
||||
|
||||
@@ -2,6 +2,7 @@ package metrics
|
||||
|
||||
import (
|
||||
"net/http"
|
||||
"strconv"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
@@ -35,11 +36,11 @@ func TestStatsD(t *testing.T) {
|
||||
}
|
||||
|
||||
udp.ShouldReceiveAll(t, expected, func() {
|
||||
statsdRegistry.BackendReqsCounter().With("service", "test", "code", string(http.StatusOK), "method", http.MethodGet).Add(1)
|
||||
statsdRegistry.BackendReqsCounter().With("service", "test", "code", string(http.StatusNotFound), "method", http.MethodGet).Add(1)
|
||||
statsdRegistry.BackendReqsCounter().With("service", "test", "code", strconv.Itoa(http.StatusOK), "method", http.MethodGet).Add(1)
|
||||
statsdRegistry.BackendReqsCounter().With("service", "test", "code", strconv.Itoa(http.StatusNotFound), "method", http.MethodGet).Add(1)
|
||||
statsdRegistry.BackendRetriesCounter().With("service", "test").Add(1)
|
||||
statsdRegistry.BackendRetriesCounter().With("service", "test").Add(1)
|
||||
statsdRegistry.BackendReqDurationHistogram().With("service", "test", "code", string(http.StatusOK)).Observe(10000)
|
||||
statsdRegistry.BackendReqDurationHistogram().With("service", "test", "code", strconv.Itoa(http.StatusOK)).Observe(10000)
|
||||
statsdRegistry.ConfigReloadsCounter().Add(1)
|
||||
statsdRegistry.ConfigReloadsFailureCounter().Add(1)
|
||||
statsdRegistry.EntrypointReqsCounter().With("entrypoint", "test").Add(1)
|
||||
|
||||
@@ -20,7 +20,7 @@ func TestForwardAuthFail(t *testing.T) {
|
||||
server := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
|
||||
http.Error(w, "Forbidden", http.StatusForbidden)
|
||||
}))
|
||||
defer server.Close()
|
||||
t.Cleanup(server.Close)
|
||||
|
||||
middleware, err := NewAuthenticator(&types.Auth{
|
||||
Forward: &types.Forward{
|
||||
@@ -35,7 +35,7 @@ func TestForwardAuthFail(t *testing.T) {
|
||||
n := negroni.New(middleware)
|
||||
n.UseHandler(handler)
|
||||
ts := httptest.NewServer(n)
|
||||
defer ts.Close()
|
||||
t.Cleanup(ts.Close)
|
||||
|
||||
req := testhelpers.MustNewRequest(http.MethodGet, ts.URL, nil)
|
||||
res, err := http.DefaultClient.Do(req)
|
||||
@@ -55,7 +55,7 @@ func TestForwardAuthSuccess(t *testing.T) {
|
||||
w.Header().Set("X-Auth-Secret", "secret")
|
||||
fmt.Fprintln(w, "Success")
|
||||
}))
|
||||
defer server.Close()
|
||||
t.Cleanup(server.Close)
|
||||
|
||||
middleware, err := NewAuthenticator(&types.Auth{
|
||||
Forward: &types.Forward{
|
||||
@@ -74,7 +74,7 @@ func TestForwardAuthSuccess(t *testing.T) {
|
||||
n := negroni.New(middleware)
|
||||
n.UseHandler(handler)
|
||||
ts := httptest.NewServer(n)
|
||||
defer ts.Close()
|
||||
t.Cleanup(ts.Close)
|
||||
|
||||
req := testhelpers.MustNewRequest(http.MethodGet, ts.URL, nil)
|
||||
req.Header.Set("X-Auth-Group", "admin_group")
|
||||
@@ -91,7 +91,7 @@ func TestForwardAuthRedirect(t *testing.T) {
|
||||
authTs := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
|
||||
http.Redirect(w, r, "http://example.com/redirect-test", http.StatusFound)
|
||||
}))
|
||||
defer authTs.Close()
|
||||
t.Cleanup(authTs.Close)
|
||||
|
||||
authMiddleware, err := NewAuthenticator(&types.Auth{
|
||||
Forward: &types.Forward{
|
||||
@@ -106,7 +106,7 @@ func TestForwardAuthRedirect(t *testing.T) {
|
||||
n := negroni.New(authMiddleware)
|
||||
n.UseHandler(handler)
|
||||
ts := httptest.NewServer(n)
|
||||
defer ts.Close()
|
||||
t.Cleanup(ts.Close)
|
||||
|
||||
client := &http.Client{
|
||||
CheckRedirect: func(r *http.Request, via []*http.Request) error {
|
||||
@@ -132,7 +132,7 @@ func TestForwardAuthRemoveHopByHopHeaders(t *testing.T) {
|
||||
headers := w.Header()
|
||||
for _, header := range forward.HopHeaders {
|
||||
if header == forward.TransferEncoding {
|
||||
headers.Add(header, "identity")
|
||||
headers.Add(header, "chunked")
|
||||
} else {
|
||||
headers.Add(header, "test")
|
||||
}
|
||||
@@ -140,7 +140,7 @@ func TestForwardAuthRemoveHopByHopHeaders(t *testing.T) {
|
||||
|
||||
http.Redirect(w, r, "http://example.com/redirect-test", http.StatusFound)
|
||||
}))
|
||||
defer authTs.Close()
|
||||
t.Cleanup(authTs.Close)
|
||||
|
||||
authMiddleware, err := NewAuthenticator(&types.Auth{
|
||||
Forward: &types.Forward{
|
||||
@@ -155,16 +155,17 @@ func TestForwardAuthRemoveHopByHopHeaders(t *testing.T) {
|
||||
n := negroni.New(authMiddleware)
|
||||
n.UseHandler(handler)
|
||||
ts := httptest.NewServer(n)
|
||||
defer ts.Close()
|
||||
t.Cleanup(ts.Close)
|
||||
|
||||
client := &http.Client{
|
||||
CheckRedirect: func(r *http.Request, via []*http.Request) error {
|
||||
return http.ErrUseLastResponse
|
||||
},
|
||||
}
|
||||
|
||||
req := testhelpers.MustNewRequest(http.MethodGet, ts.URL, nil)
|
||||
res, err := client.Do(req)
|
||||
assert.NoError(t, err, "there should be no error")
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, http.StatusFound, res.StatusCode, "they should be equal")
|
||||
|
||||
for _, header := range forward.HopHeaders {
|
||||
@@ -172,11 +173,11 @@ func TestForwardAuthRemoveHopByHopHeaders(t *testing.T) {
|
||||
}
|
||||
|
||||
location, err := res.Location()
|
||||
assert.NoError(t, err, "there should be no error")
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, "http://example.com/redirect-test", location.String(), "they should be equal")
|
||||
|
||||
body, err := ioutil.ReadAll(res.Body)
|
||||
assert.NoError(t, err, "there should be no error")
|
||||
require.NoError(t, err)
|
||||
assert.NotEmpty(t, string(body), "there should be something in the body")
|
||||
}
|
||||
|
||||
@@ -187,7 +188,7 @@ func TestForwardAuthFailResponseHeaders(t *testing.T) {
|
||||
w.Header().Add("X-Foo", "bar")
|
||||
http.Error(w, "Forbidden", http.StatusForbidden)
|
||||
}))
|
||||
defer authTs.Close()
|
||||
t.Cleanup(authTs.Close)
|
||||
|
||||
authMiddleware, err := NewAuthenticator(&types.Auth{
|
||||
Forward: &types.Forward{
|
||||
@@ -202,7 +203,7 @@ func TestForwardAuthFailResponseHeaders(t *testing.T) {
|
||||
n := negroni.New(authMiddleware)
|
||||
n.UseHandler(handler)
|
||||
ts := httptest.NewServer(n)
|
||||
defer ts.Close()
|
||||
t.Cleanup(ts.Close)
|
||||
|
||||
req := testhelpers.MustNewRequest(http.MethodGet, ts.URL, nil)
|
||||
client := &http.Client{}
|
||||
|
||||
@@ -1,10 +1,10 @@
|
||||
package middlewares
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"net/http"
|
||||
|
||||
"github.com/pkg/errors"
|
||||
"github.com/traefik/traefik/log"
|
||||
"github.com/traefik/traefik/middlewares/tracing"
|
||||
"github.com/traefik/traefik/whitelist"
|
||||
|
||||
@@ -6,8 +6,8 @@ import (
|
||||
"crypto/rsa"
|
||||
"crypto/x509"
|
||||
|
||||
"github.com/go-acme/lego/certcrypto"
|
||||
"github.com/go-acme/lego/registration"
|
||||
"github.com/go-acme/lego/v4/certcrypto"
|
||||
"github.com/go-acme/lego/v4/registration"
|
||||
"github.com/traefik/traefik/log"
|
||||
)
|
||||
|
||||
|
||||
@@ -7,8 +7,8 @@ import (
|
||||
|
||||
"github.com/cenk/backoff"
|
||||
"github.com/containous/mux"
|
||||
"github.com/go-acme/lego/challenge"
|
||||
"github.com/go-acme/lego/challenge/http01"
|
||||
"github.com/go-acme/lego/v4/challenge"
|
||||
"github.com/go-acme/lego/v4/challenge/http01"
|
||||
"github.com/traefik/traefik/log"
|
||||
"github.com/traefik/traefik/safe"
|
||||
)
|
||||
|
||||
@@ -3,8 +3,8 @@ package acme
|
||||
import (
|
||||
"crypto/tls"
|
||||
|
||||
"github.com/go-acme/lego/challenge"
|
||||
"github.com/go-acme/lego/challenge/tlsalpn01"
|
||||
"github.com/go-acme/lego/v4/challenge"
|
||||
"github.com/go-acme/lego/v4/challenge/tlsalpn01"
|
||||
"github.com/traefik/traefik/log"
|
||||
"github.com/traefik/traefik/types"
|
||||
)
|
||||
|
||||
@@ -3,6 +3,7 @@ package acme
|
||||
import (
|
||||
"crypto/tls"
|
||||
"crypto/x509"
|
||||
"errors"
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
fmtlog "log"
|
||||
@@ -14,14 +15,13 @@ import (
|
||||
|
||||
"github.com/cenk/backoff"
|
||||
"github.com/containous/flaeg"
|
||||
"github.com/go-acme/lego/certificate"
|
||||
"github.com/go-acme/lego/challenge"
|
||||
"github.com/go-acme/lego/challenge/dns01"
|
||||
"github.com/go-acme/lego/lego"
|
||||
legolog "github.com/go-acme/lego/log"
|
||||
"github.com/go-acme/lego/providers/dns"
|
||||
"github.com/go-acme/lego/registration"
|
||||
"github.com/pkg/errors"
|
||||
"github.com/go-acme/lego/v4/certificate"
|
||||
"github.com/go-acme/lego/v4/challenge"
|
||||
"github.com/go-acme/lego/v4/challenge/dns01"
|
||||
"github.com/go-acme/lego/v4/lego"
|
||||
legolog "github.com/go-acme/lego/v4/log"
|
||||
"github.com/go-acme/lego/v4/providers/dns"
|
||||
"github.com/go-acme/lego/v4/registration"
|
||||
"github.com/sirupsen/logrus"
|
||||
"github.com/traefik/traefik/log"
|
||||
"github.com/traefik/traefik/rules"
|
||||
@@ -38,18 +38,19 @@ var (
|
||||
|
||||
// Configuration holds ACME configuration provided by users
|
||||
type Configuration struct {
|
||||
Email string `description:"Email address used for registration"`
|
||||
ACMELogging bool `description:"Enable debug logging of ACME actions."`
|
||||
CAServer string `description:"CA server to use."`
|
||||
Storage string `description:"Storage to use."`
|
||||
EntryPoint string `description:"EntryPoint to use."`
|
||||
KeyType string `description:"KeyType used for generating certificate private key. Allow value 'EC256', 'EC384', 'RSA2048', 'RSA4096', 'RSA8192'. Default to 'RSA4096'"`
|
||||
OnHostRule bool `description:"Enable certificate generation on frontends Host rules."`
|
||||
OnDemand bool `description:"Enable on demand certificate generation. This will request a certificate from Let's Encrypt during the first TLS handshake for a hostname that does not yet have a certificate."` // Deprecated
|
||||
DNSChallenge *DNSChallenge `description:"Activate DNS-01 Challenge"`
|
||||
HTTPChallenge *HTTPChallenge `description:"Activate HTTP-01 Challenge"`
|
||||
TLSChallenge *TLSChallenge `description:"Activate TLS-ALPN-01 Challenge"`
|
||||
Domains []types.Domain `description:"CN and SANs (alternative domains) to each main domain using format: --acme.domains='main.com,san1.com,san2.com' --acme.domains='*.main.net'. Wildcard domains only accepted with DNSChallenge"`
|
||||
Email string `description:"Email address used for registration"`
|
||||
ACMELogging bool `description:"Enable debug logging of ACME actions."`
|
||||
PreferredChain string `description:"Preferred chain to use."`
|
||||
CAServer string `description:"CA server to use."`
|
||||
Storage string `description:"Storage to use."`
|
||||
EntryPoint string `description:"EntryPoint to use."`
|
||||
KeyType string `description:"KeyType used for generating certificate private key. Allow value 'EC256', 'EC384', 'RSA2048', 'RSA4096', 'RSA8192'. Default to 'RSA4096'"`
|
||||
OnHostRule bool `description:"Enable certificate generation on frontends Host rules."`
|
||||
OnDemand bool `description:"Enable on demand certificate generation. This will request a certificate from Let's Encrypt during the first TLS handshake for a hostname that does not yet have a certificate."` // Deprecated
|
||||
DNSChallenge *DNSChallenge `description:"Activate DNS-01 Challenge"`
|
||||
HTTPChallenge *HTTPChallenge `description:"Activate HTTP-01 Challenge"`
|
||||
TLSChallenge *TLSChallenge `description:"Activate TLS-ALPN-01 Challenge"`
|
||||
Domains []types.Domain `description:"CN and SANs (alternative domains) to each main domain using format: --acme.domains='main.com,san1.com,san2.com' --acme.domains='*.main.net'. Wildcard domains only accepted with DNSChallenge"`
|
||||
}
|
||||
|
||||
// Provider holds configurations of the provider.
|
||||
@@ -270,14 +271,18 @@ func (p *Provider) getClient() (*lego.Client, error) {
|
||||
|
||||
err = client.Challenge.SetDNS01Provider(provider,
|
||||
dns01.CondOption(len(p.DNSChallenge.Resolvers) > 0, dns01.AddRecursiveNameservers(p.DNSChallenge.Resolvers)),
|
||||
dns01.CondOption(p.DNSChallenge.DisablePropagationCheck || p.DNSChallenge.DelayBeforeCheck > 0,
|
||||
dns01.AddPreCheck(func(_, _ string) (bool, error) {
|
||||
if p.DNSChallenge.DelayBeforeCheck > 0 {
|
||||
log.Debugf("Delaying %d rather than validating DNS propagation now.", p.DNSChallenge.DelayBeforeCheck)
|
||||
time.Sleep(time.Duration(p.DNSChallenge.DelayBeforeCheck))
|
||||
}
|
||||
dns01.WrapPreCheck(func(domain, fqdn, value string, check dns01.PreCheckFunc) (bool, error) {
|
||||
if p.DNSChallenge.DelayBeforeCheck > 0 {
|
||||
log.Debugf("Delaying %d rather than validating DNS propagation now.", p.DNSChallenge.DelayBeforeCheck)
|
||||
time.Sleep(time.Duration(p.DNSChallenge.DelayBeforeCheck))
|
||||
}
|
||||
|
||||
if p.DNSChallenge.DisablePropagationCheck {
|
||||
return true, nil
|
||||
})),
|
||||
}
|
||||
|
||||
return check(fqdn, value)
|
||||
}),
|
||||
)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
@@ -411,12 +416,13 @@ func (p *Provider) resolveCertificate(domain types.Domain, domainFromConfigurati
|
||||
var cert *certificate.Resource
|
||||
bundle := true
|
||||
if p.useCertificateWithRetry(uncheckedDomains) {
|
||||
cert, err = obtainCertificateWithRetry(domains, client, p.DNSChallenge.preCheckTimeout, p.DNSChallenge.preCheckInterval, bundle)
|
||||
cert, err = obtainCertificateWithRetry(domains, client, p.DNSChallenge.preCheckTimeout, p.DNSChallenge.preCheckInterval, bundle, p.PreferredChain)
|
||||
} else {
|
||||
request := certificate.ObtainRequest{
|
||||
Domains: domains,
|
||||
Bundle: bundle,
|
||||
MustStaple: OSCPMustStaple,
|
||||
Domains: domains,
|
||||
Bundle: bundle,
|
||||
MustStaple: OSCPMustStaple,
|
||||
PreferredChain: p.PreferredChain,
|
||||
}
|
||||
cert, err = client.Certificate.Obtain(request)
|
||||
}
|
||||
@@ -487,15 +493,16 @@ func (p *Provider) useCertificateWithRetry(domains []string) bool {
|
||||
return false
|
||||
}
|
||||
|
||||
func obtainCertificateWithRetry(domains []string, client *lego.Client, timeout, interval time.Duration, bundle bool) (*certificate.Resource, error) {
|
||||
func obtainCertificateWithRetry(domains []string, client *lego.Client, timeout, interval time.Duration, bundle bool, preferredChain string) (*certificate.Resource, error) {
|
||||
var cert *certificate.Resource
|
||||
var err error
|
||||
|
||||
operation := func() error {
|
||||
request := certificate.ObtainRequest{
|
||||
Domains: domains,
|
||||
Bundle: bundle,
|
||||
MustStaple: OSCPMustStaple,
|
||||
Domains: domains,
|
||||
Bundle: bundle,
|
||||
MustStaple: OSCPMustStaple,
|
||||
PreferredChain: preferredChain,
|
||||
}
|
||||
cert, err = client.Certificate.Obtain(request)
|
||||
return err
|
||||
@@ -654,7 +661,11 @@ func (p *Provider) renewCertificates() {
|
||||
Domain: cert.Domain.Main,
|
||||
PrivateKey: cert.Key,
|
||||
Certificate: cert.Certificate,
|
||||
}, true, OSCPMustStaple)
|
||||
}, true, OSCPMustStaple, p.PreferredChain)
|
||||
if err != nil {
|
||||
log.Errorf("Error renewing certificate from LE: %v, %v", cert.Domain, err)
|
||||
continue
|
||||
}
|
||||
|
||||
if err != nil {
|
||||
log.Errorf("Error renewing certificate from LE: %v, %v", cert.Domain, err)
|
||||
|
||||
@@ -4,7 +4,7 @@ import (
|
||||
"crypto/tls"
|
||||
"testing"
|
||||
|
||||
"github.com/go-acme/lego/certcrypto"
|
||||
"github.com/go-acme/lego/v4/certcrypto"
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/traefik/traefik/safe"
|
||||
traefiktls "github.com/traefik/traefik/tls"
|
||||
|
||||
@@ -134,7 +134,7 @@ func taskState(state swarm.TaskState) func(*swarm.TaskStatus) {
|
||||
|
||||
func taskContainerStatus(id string) func(*swarm.TaskStatus) {
|
||||
return func(status *swarm.TaskStatus) {
|
||||
status.ContainerStatus = swarm.ContainerStatus{
|
||||
status.ContainerStatus = &swarm.ContainerStatus{
|
||||
ContainerID: id,
|
||||
}
|
||||
}
|
||||
|
||||
@@ -1,6 +1,7 @@
|
||||
package file
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
"os"
|
||||
@@ -9,7 +10,6 @@ import (
|
||||
"strings"
|
||||
"text/template"
|
||||
|
||||
"github.com/pkg/errors"
|
||||
"github.com/traefik/traefik/log"
|
||||
"github.com/traefik/traefik/provider"
|
||||
"github.com/traefik/traefik/safe"
|
||||
|
||||
@@ -1,6 +1,7 @@
|
||||
package kubernetes
|
||||
|
||||
import (
|
||||
"context"
|
||||
"errors"
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
@@ -192,7 +193,7 @@ func (c *clientImpl) UpdateIngressStatus(namespace, name, ip, hostname string) e
|
||||
ingCopy := ing.DeepCopy()
|
||||
ingCopy.Status = extensionsv1beta1.IngressStatus{LoadBalancer: corev1.LoadBalancerStatus{Ingress: []corev1.LoadBalancerIngress{{IP: ip, Hostname: hostname}}}}
|
||||
|
||||
_, err = c.clientset.ExtensionsV1beta1().Ingresses(ingCopy.Namespace).UpdateStatus(ingCopy)
|
||||
_, err = c.clientset.ExtensionsV1beta1().Ingresses(ingCopy.Namespace).UpdateStatus(context.Background(), ingCopy, metav1.UpdateOptions{})
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to update ingress status %s/%s: %v", namespace, name, err)
|
||||
}
|
||||
|
||||
@@ -38,3 +38,22 @@ spec:
|
||||
servicePort: 80
|
||||
tls:
|
||||
- secretName: myUndefinedSecret
|
||||
|
||||
---
|
||||
apiVersion: extensions/v1beta1
|
||||
kind: Ingress
|
||||
metadata:
|
||||
annotations:
|
||||
ingress.kubernetes.io/frontend-entry-points: ep3
|
||||
name: badSecretIng
|
||||
namespace: testing
|
||||
spec:
|
||||
rules:
|
||||
- host: example.fail
|
||||
http:
|
||||
paths:
|
||||
- backend:
|
||||
serviceName: example-fail
|
||||
servicePort: 80
|
||||
tls:
|
||||
- secretName: badSecret
|
||||
|
||||
@@ -6,3 +6,13 @@ kind: Secret
|
||||
metadata:
|
||||
name: myTlsSecret
|
||||
namespace: testing
|
||||
|
||||
---
|
||||
apiVersion: v1
|
||||
data:
|
||||
tls.crt: LS0tLS1CRUdJTiBDRVJUSUZJQ0FURS0tLS0tXG5ceDAwXHgwMFx4MDAtLS0tLUVORCBDRVJUSUZJQ0FURS0tLS0tXG4=
|
||||
tls.key: LS0tLS1CRUdJTiBQUklWQVRFIEtFWS0tLS0tCi0tLS0tRU5EIFBSSVZBVEUgS0VZLS0tLS0=
|
||||
kind: Secret
|
||||
metadata:
|
||||
name: badSecret
|
||||
namespace: testing
|
||||
|
||||
@@ -3,6 +3,7 @@ package kubernetes
|
||||
import (
|
||||
"bufio"
|
||||
"bytes"
|
||||
"encoding/pem"
|
||||
"errors"
|
||||
"flag"
|
||||
"fmt"
|
||||
@@ -835,6 +836,19 @@ func getCertificateBlocks(secret *corev1.Secret, namespace, secretName string) (
|
||||
namespace, secretName, strings.Join(missingEntries, ", "))
|
||||
}
|
||||
|
||||
if !isPem(tlsCrtData) {
|
||||
missingEntries = append(missingEntries, "tls.crt")
|
||||
}
|
||||
|
||||
if !isPem(tlsKeyData) {
|
||||
missingEntries = append(missingEntries, "tls.key")
|
||||
}
|
||||
|
||||
if len(missingEntries) > 0 {
|
||||
return "", "", fmt.Errorf("secret %s/%s does not contain PEM formatted TLS data entries: %s",
|
||||
namespace, secretName, strings.Join(missingEntries, ","))
|
||||
}
|
||||
|
||||
return cert, key, nil
|
||||
}
|
||||
|
||||
@@ -1269,3 +1283,17 @@ func templateSafeString(value string) error {
|
||||
_, err := strconv.Unquote(`"` + value + `"`)
|
||||
return err
|
||||
}
|
||||
|
||||
func isPem(data []byte) bool {
|
||||
for {
|
||||
block, rest := pem.Decode(data)
|
||||
if block == nil {
|
||||
return false
|
||||
}
|
||||
if len(rest) == 0 {
|
||||
break
|
||||
}
|
||||
data = rest
|
||||
}
|
||||
return true
|
||||
}
|
||||
|
||||
@@ -1915,8 +1915,8 @@ func TestGetTLS(t *testing.T) {
|
||||
Namespace: "testing",
|
||||
},
|
||||
Data: map[string][]byte{
|
||||
"tls.crt": []byte("tls-crt"),
|
||||
"tls.key": []byte("tls-key"),
|
||||
"tls.crt": []byte("-----BEGIN CERTIFICATE-----\n-----END CERTIFICATE-----\n"),
|
||||
"tls.key": []byte("-----BEGIN PRIVATE KEY-----\n-----END PRIVATE KEY-----\n"),
|
||||
},
|
||||
},
|
||||
{
|
||||
@@ -1925,8 +1925,8 @@ func TestGetTLS(t *testing.T) {
|
||||
Namespace: "testing",
|
||||
},
|
||||
Data: map[string][]byte{
|
||||
"tls.crt": []byte("tls-crt"),
|
||||
"tls.key": []byte("tls-key"),
|
||||
"tls.crt": []byte("-----BEGIN CERTIFICATE-----\n-----END CERTIFICATE-----\n"),
|
||||
"tls.key": []byte("-----BEGIN PRIVATE KEY-----\n-----END PRIVATE KEY-----\n"),
|
||||
},
|
||||
},
|
||||
},
|
||||
@@ -1934,14 +1934,14 @@ func TestGetTLS(t *testing.T) {
|
||||
result: map[string]*tls.Configuration{
|
||||
"testing/test-secret": {
|
||||
Certificate: &tls.Certificate{
|
||||
CertFile: tls.FileOrContent("tls-crt"),
|
||||
KeyFile: tls.FileOrContent("tls-key"),
|
||||
CertFile: tls.FileOrContent("-----BEGIN CERTIFICATE-----\n-----END CERTIFICATE-----\n"),
|
||||
KeyFile: tls.FileOrContent("-----BEGIN PRIVATE KEY-----\n-----END PRIVATE KEY-----\n"),
|
||||
},
|
||||
},
|
||||
"testing/test-secret2": {
|
||||
Certificate: &tls.Certificate{
|
||||
CertFile: tls.FileOrContent("tls-crt"),
|
||||
KeyFile: tls.FileOrContent("tls-key"),
|
||||
CertFile: tls.FileOrContent("-----BEGIN CERTIFICATE-----\n-----END CERTIFICATE-----\n"),
|
||||
KeyFile: tls.FileOrContent("-----BEGIN PRIVATE KEY-----\n-----END PRIVATE KEY-----\n"),
|
||||
},
|
||||
},
|
||||
},
|
||||
@@ -1968,8 +1968,8 @@ func TestGetTLS(t *testing.T) {
|
||||
Namespace: "testing",
|
||||
},
|
||||
Data: map[string][]byte{
|
||||
"tls.crt": []byte("tls-crt"),
|
||||
"tls.key": []byte("tls-key"),
|
||||
"tls.crt": []byte("-----BEGIN CERTIFICATE-----\n-----END CERTIFICATE-----\n"),
|
||||
"tls.key": []byte("-----BEGIN PRIVATE KEY-----\n-----END PRIVATE KEY-----\n"),
|
||||
},
|
||||
},
|
||||
},
|
||||
@@ -1978,12 +1978,60 @@ func TestGetTLS(t *testing.T) {
|
||||
"testing/test-secret": {
|
||||
EntryPoints: []string{"api-secure", "https"},
|
||||
Certificate: &tls.Certificate{
|
||||
CertFile: tls.FileOrContent("tls-crt"),
|
||||
KeyFile: tls.FileOrContent("tls-key"),
|
||||
CertFile: tls.FileOrContent("-----BEGIN CERTIFICATE-----\n-----END CERTIFICATE-----\n"),
|
||||
KeyFile: tls.FileOrContent("-----BEGIN PRIVATE KEY-----\n-----END PRIVATE KEY-----\n"),
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
desc: "load bad certificate",
|
||||
ingress: buildIngress(
|
||||
iNamespace("testing"),
|
||||
iAnnotation(annotationKubernetesFrontendEntryPoints, "https,api-secure"),
|
||||
iRules(iRule(iHost("example.com"))),
|
||||
iTLSes(iTLS("test-secret")),
|
||||
),
|
||||
client: clientMock{
|
||||
secrets: []*corev1.Secret{
|
||||
{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "test-secret",
|
||||
Namespace: "testing",
|
||||
},
|
||||
Data: map[string][]byte{
|
||||
"tls.crt": []byte("invalid"),
|
||||
"tls.key": []byte("invalid"),
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
errResult: "secret testing/test-secret does not contain PEM formatted TLS data entries: tls.crt,tls.key",
|
||||
},
|
||||
{
|
||||
desc: "load nested bad certificate",
|
||||
ingress: buildIngress(
|
||||
iNamespace("testing"),
|
||||
iAnnotation(annotationKubernetesFrontendEntryPoints, "https,api-secure"),
|
||||
iRules(iRule(iHost("example.com"))),
|
||||
iTLSes(iTLS("test-secret")),
|
||||
),
|
||||
client: clientMock{
|
||||
secrets: []*corev1.Secret{
|
||||
{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "test-secret",
|
||||
Namespace: "testing",
|
||||
},
|
||||
Data: map[string][]byte{
|
||||
"tls.crt": []byte("-----BEGIN CERTIFICATE-----\n-----END CERTIFICATE-----\n-----BEGIN CERTIFICATE-----\n\x00\x00\x00-----END CERTIFICATE-----\n"),
|
||||
"tls.key": []byte("-----BEGIN PRIVATE KEY-----\n-----END PRIVATE KEY-----"),
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
errResult: "secret testing/test-secret does not contain PEM formatted TLS data entries: tls.crt",
|
||||
},
|
||||
}
|
||||
|
||||
for _, test := range testCases {
|
||||
|
||||
@@ -25,7 +25,7 @@ GO_BUILD_CMD="go build -ldflags"
|
||||
GO_BUILD_OPT="-s -w -X ${GIT_REPO_URL}.Version=${VERSION} -X ${GIT_REPO_URL}.Codename=${CODENAME} -X ${GIT_REPO_URL}.BuildDate=${DATE}"
|
||||
|
||||
# Build arm binaries
|
||||
OS_PLATFORM_ARG=(linux windows darwin)
|
||||
OS_PLATFORM_ARG=(linux windows)
|
||||
OS_ARCH_ARG=(386)
|
||||
for OS in ${OS_PLATFORM_ARG[@]}; do
|
||||
BIN_EXT=''
|
||||
|
||||
@@ -1,7 +1,7 @@
|
||||
#!/usr/bin/env bash
|
||||
set -e
|
||||
|
||||
if [ -n "$TRAVIS_TAG" ]; then
|
||||
if [ -n "$SEMAPHORE_GIT_TAG_NAME" ]; then
|
||||
echo "Deploying..."
|
||||
else
|
||||
echo "Skipping deploy"
|
||||
@@ -12,11 +12,9 @@ git config --global user.email "$TRAEFIKER_EMAIL"
|
||||
git config --global user.name "Traefiker"
|
||||
|
||||
# load ssh key
|
||||
echo "Loading key..."
|
||||
openssl aes-256-cbc -K $encrypted_f9e835a425bc_key -iv $encrypted_f9e835a425bc_iv -in .travis/traefiker_rsa.enc -out ~/.ssh/traefiker_rsa -d
|
||||
eval "$(ssh-agent -s)"
|
||||
chmod 600 ~/.ssh/traefiker_rsa
|
||||
ssh-add ~/.ssh/traefiker_rsa
|
||||
chmod 600 /home/semaphore/.ssh/traefiker_rsa
|
||||
ssh-add /home/semaphore/.ssh/traefiker_rsa
|
||||
|
||||
# update traefik-library-image repo (official Docker image)
|
||||
echo "Updating traefik-library-imag repo..."
|
||||
@@ -31,4 +29,4 @@ git push -q --follow-tags -u origin master > /dev/null 2>&1
|
||||
cd ..
|
||||
rm -Rf traefik-library-image/
|
||||
|
||||
echo "Deployed"
|
||||
echo "Deployed"
|
||||
|
||||
@@ -1,16 +1,37 @@
|
||||
FROM alpine:3.8
|
||||
FROM alpine:3.14 as alpine
|
||||
|
||||
RUN apk --no-cache --no-progress add \
|
||||
ca-certificates \
|
||||
curl \
|
||||
findutils \
|
||||
libcurl \
|
||||
ruby \
|
||||
ruby-bigdecimal \
|
||||
ruby-etc \
|
||||
ruby-ffi \
|
||||
ruby-json \
|
||||
ruby-nokogiri=1.8.3-r0 \
|
||||
tini \
|
||||
&& gem install --no-document html-proofer -v 3.9.3
|
||||
ruby-nokogiri \
|
||||
ruby-dev \
|
||||
build-base
|
||||
|
||||
RUN gem install html-proofer --version 3.19.0 --no-document -- --use-system-libraries
|
||||
|
||||
# After Ruby, some NodeJS YAY!
|
||||
RUN apk --no-cache --no-progress add \
|
||||
git \
|
||||
nodejs \
|
||||
npm
|
||||
|
||||
# To handle 'not get uid/gid'
|
||||
RUN npm config set unsafe-perm true
|
||||
|
||||
RUN npm install --global \
|
||||
markdownlint@0.22.0 \
|
||||
markdownlint-cli@0.26.0
|
||||
|
||||
# Finally the shell tools we need for later
|
||||
# tini helps to terminate properly all the parallelized tasks when sending CTRL-C
|
||||
RUN apk --no-cache --no-progress add \
|
||||
ca-certificates \
|
||||
curl \
|
||||
tini
|
||||
|
||||
COPY ./validate.sh /validate.sh
|
||||
|
||||
|
||||
@@ -1,13 +1,13 @@
|
||||
#!/bin/sh
|
||||
|
||||
PATH_TO_SITE="${1:-/app/site}"
|
||||
|
||||
set -eu
|
||||
|
||||
PATH_TO_SITE="/app/site"
|
||||
[ -d "${PATH_TO_SITE}" ]
|
||||
|
||||
NUMBER_OF_CPUS="$(grep -c processor /proc/cpuinfo)"
|
||||
|
||||
|
||||
echo "=== Checking HTML content..."
|
||||
|
||||
# Search for all HTML files except the theme's partials
|
||||
@@ -19,10 +19,12 @@ find "${PATH_TO_SITE}" -type f -not -path "/app/site/theme/*" \
|
||||
htmlproofer \
|
||||
--check-html \
|
||||
--check_external_hash \
|
||||
--empty_alt_ignore \
|
||||
--alt_ignore="/traefikproxy-vertical-logo-color.svg/" \
|
||||
--http_status_ignore="0,500,501,503" \
|
||||
--url-ignore "/https://groups.google.com/a/traefik.io/forum/#!forum/security/,/localhost:/,/127.0.0.1:/,/fonts.gstatic.com/,/.minikube/,/doc.traefik.io\/traefik/,/traefik.io/,/github.com\/traefik\/traefik\/*edit*/,/github.com\/traefik\/traefik\/$/" \
|
||||
'{}'
|
||||
--file_ignore="/404.html/" \
|
||||
--url_ignore="/https://groups.google.com/a/traefik.io/forum/#!forum/security/,/localhost:/,/127.0.0.1:/,/fonts.gstatic.com/,/.minikube/,/github.com\/traefik\/traefik\/*edit*/,/github.com\/traefik\/traefik/,/doc.traefik.io/,/github\.com\/golang\/oauth2\/blob\/36a7019397c4c86cf59eeab3bc0d188bac444277\/.+/,/www.akamai.com/,/pilot.traefik.io\/profile/,/traefik.io/,/doc.traefik.io\/traefik-mesh/,/www.mkdocs.org/,/squidfunk.github.io/,/ietf.org/,/www.namesilo.com/,/www.youtube.com/,/www.linode.com/,/www.alibabacloud.com/" \
|
||||
'{}' 1>/dev/null
|
||||
## HTML-proofer options at https://github.com/gjtorikian/html-proofer#configuration
|
||||
|
||||
echo "= Documentation checked successfuly."
|
||||
echo "= Documentation checked successfully."
|
||||
|
||||
@@ -4,11 +4,11 @@ RepositoryName = "traefik"
|
||||
OutputType = "file"
|
||||
FileName = "traefik_changelog.md"
|
||||
|
||||
# example new bugfix v1.7.27
|
||||
# example new bugfix v1.7.34
|
||||
CurrentRef = "v1.7"
|
||||
PreviousRef = "v1.7.26"
|
||||
PreviousRef = "v1.7.33"
|
||||
BaseBranch = "v1.7"
|
||||
FutureCurrentRefName = "v1.7.27"
|
||||
FutureCurrentRefName = "v1.7.34"
|
||||
|
||||
ThresholdPreviousRef = 10
|
||||
ThresholdCurrentRef = 10
|
||||
|
||||
@@ -1,58 +0,0 @@
|
||||
#!/usr/bin/env bash
|
||||
|
||||
set -o errexit
|
||||
set -o pipefail
|
||||
set -o nounset
|
||||
|
||||
echo "Prune dependencies"
|
||||
|
||||
find vendor -name '*_test.go' -exec rm {} \;
|
||||
|
||||
find vendor -type f \( ! -iname 'licen[cs]e*' \
|
||||
-a ! -iname '*notice*' \
|
||||
-a ! -iname '*patent*' \
|
||||
-a ! -iname '*copying*' \
|
||||
-a ! -iname '*unlicense*' \
|
||||
-a ! -iname '*copyright*' \
|
||||
-a ! -iname '*copyleft*' \
|
||||
-a ! -iname '*legal*' \
|
||||
-a ! -iname 'disclaimer*' \
|
||||
-a ! -iname 'third-party*' \
|
||||
-a ! -iname 'thirdparty*' \
|
||||
-a ! -iname '*.go' \
|
||||
-a ! -iname '*.c' \
|
||||
-a ! -iname '*.s' \
|
||||
-a ! -iname '*.pl' \
|
||||
-a ! -iname '*.cc' \
|
||||
-a ! -iname '*.cpp' \
|
||||
-a ! -iname '*.cxx' \
|
||||
-a ! -iname '*.h' \
|
||||
-a ! -iname '*.hh' \
|
||||
-a ! -iname '*.hpp' \
|
||||
-a ! -iname '*.hxx' \
|
||||
-a ! -iname '*.s' \) -exec rm -f {} +
|
||||
|
||||
find . -type d \( -iname '*Godeps*' \) -exec rm -rf {} +
|
||||
|
||||
find vendor -type l \( ! -iname 'licen[cs]e*' \
|
||||
-a ! -iname '*notice*' \
|
||||
-a ! -iname '*patent*' \
|
||||
-a ! -iname '*copying*' \
|
||||
-a ! -iname '*unlicense*' \
|
||||
-a ! -iname '*copyright*' \
|
||||
-a ! -iname '*copyleft*' \
|
||||
-a ! -iname '*legal*' \
|
||||
-a ! -iname 'disclaimer*' \
|
||||
-a ! -iname 'third-party*' \
|
||||
-a ! -iname 'thirdparty*' \
|
||||
-a ! -iname '*.go' \
|
||||
-a ! -iname '*.c' \
|
||||
-a ! -iname '*.S' \
|
||||
-a ! -iname '*.cc' \
|
||||
-a ! -iname '*.cpp' \
|
||||
-a ! -iname '*.cxx' \
|
||||
-a ! -iname '*.h' \
|
||||
-a ! -iname '*.hh' \
|
||||
-a ! -iname '*.hpp' \
|
||||
-a ! -iname '*.hxx' \
|
||||
-a ! -iname '*.s' \) -exec rm -f {} +
|
||||
@@ -17,7 +17,6 @@ find_dirs() {
|
||||
find . -not \( \
|
||||
\( \
|
||||
-path './integration/*' \
|
||||
-o -path './vendor/*' \
|
||||
-o -path './.git/*' \
|
||||
\) \
|
||||
-prune \
|
||||
|
||||
@@ -3,32 +3,16 @@ set -o errexit
|
||||
set -o pipefail
|
||||
set -o nounset
|
||||
|
||||
SCRIPT_DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )"; export SCRIPT_DIR
|
||||
source "${SCRIPT_DIR}/.validate"
|
||||
SCRIPT_DIR="$( cd "$( dirname "${0}" )" && pwd -P)"; export SCRIPT_DIR
|
||||
|
||||
vendor_dir="./vendor/"
|
||||
IFS=$'\n' files=( $(validate_diff --diff-filter=ACMR --name-only -- ${vendor_dir} || true) )
|
||||
go generate
|
||||
|
||||
if [[ ${#files[@]} -gt 0 ]]; then
|
||||
# We run dep install to and see if we have a diff afterwards
|
||||
echo "checking ${vendor_dir} for unintentional changes..."
|
||||
echo "checking go modules for unintentional changes..."
|
||||
|
||||
dep ensure -v
|
||||
(${SCRIPT_DIR}/prune-dep.sh)
|
||||
go mod tidy
|
||||
git diff --exit-code go.mod
|
||||
git diff --exit-code go.sum
|
||||
|
||||
# Let see if the working directory is clean
|
||||
diffs="$(git status --porcelain -- ${vendor_dir} 2>/dev/null)"
|
||||
if [[ "$diffs" ]]; then
|
||||
{
|
||||
echo "The result of 'dep ensure' for vendor directory '${vendor_dir}' differs"
|
||||
echo
|
||||
echo "$diffs"
|
||||
echo
|
||||
echo 'Please vendor your package(s) with dep.'
|
||||
echo
|
||||
} >&2
|
||||
exit 2
|
||||
fi
|
||||
fi
|
||||
echo 'Congratulations! All go modules changes are done the right way.'
|
||||
|
||||
echo 'Congratulations! All vendoring changes are done the right way.'
|
||||
rm -rf ./autogen/genstatic/
|
||||
|
||||
@@ -19,7 +19,7 @@ import (
|
||||
|
||||
"github.com/armon/go-proxyproto"
|
||||
"github.com/containous/mux"
|
||||
"github.com/go-acme/lego/challenge/tlsalpn01"
|
||||
"github.com/go-acme/lego/v4/challenge/tlsalpn01"
|
||||
"github.com/sirupsen/logrus"
|
||||
"github.com/traefik/traefik/cluster"
|
||||
"github.com/traefik/traefik/configuration"
|
||||
|
||||
@@ -135,8 +135,11 @@ func getCertificateDomains(cert *tls.Certificate) []string {
|
||||
if len(x509Cert.Subject.CommonName) > 0 {
|
||||
names = append(names, x509Cert.Subject.CommonName)
|
||||
}
|
||||
|
||||
for _, san := range x509Cert.DNSNames {
|
||||
names = append(names, san)
|
||||
if san != x509Cert.Subject.CommonName {
|
||||
names = append(names, san)
|
||||
}
|
||||
}
|
||||
|
||||
return names
|
||||
|
||||
202
vendor/cloud.google.com/go/LICENSE
generated
vendored
202
vendor/cloud.google.com/go/LICENSE
generated
vendored
@@ -1,202 +0,0 @@
|
||||
|
||||
Apache License
|
||||
Version 2.0, January 2004
|
||||
http://www.apache.org/licenses/
|
||||
|
||||
TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
|
||||
|
||||
1. Definitions.
|
||||
|
||||
"License" shall mean the terms and conditions for use, reproduction,
|
||||
and distribution as defined by Sections 1 through 9 of this document.
|
||||
|
||||
"Licensor" shall mean the copyright owner or entity authorized by
|
||||
the copyright owner that is granting the License.
|
||||
|
||||
"Legal Entity" shall mean the union of the acting entity and all
|
||||
other entities that control, are controlled by, or are under common
|
||||
control with that entity. For the purposes of this definition,
|
||||
"control" means (i) the power, direct or indirect, to cause the
|
||||
direction or management of such entity, whether by contract or
|
||||
otherwise, or (ii) ownership of fifty percent (50%) or more of the
|
||||
outstanding shares, or (iii) beneficial ownership of such entity.
|
||||
|
||||
"You" (or "Your") shall mean an individual or Legal Entity
|
||||
exercising permissions granted by this License.
|
||||
|
||||
"Source" form shall mean the preferred form for making modifications,
|
||||
including but not limited to software source code, documentation
|
||||
source, and configuration files.
|
||||
|
||||
"Object" form shall mean any form resulting from mechanical
|
||||
transformation or translation of a Source form, including but
|
||||
not limited to compiled object code, generated documentation,
|
||||
and conversions to other media types.
|
||||
|
||||
"Work" shall mean the work of authorship, whether in Source or
|
||||
Object form, made available under the License, as indicated by a
|
||||
copyright notice that is included in or attached to the work
|
||||
(an example is provided in the Appendix below).
|
||||
|
||||
"Derivative Works" shall mean any work, whether in Source or Object
|
||||
form, that is based on (or derived from) the Work and for which the
|
||||
editorial revisions, annotations, elaborations, or other modifications
|
||||
represent, as a whole, an original work of authorship. For the purposes
|
||||
of this License, Derivative Works shall not include works that remain
|
||||
separable from, or merely link (or bind by name) to the interfaces of,
|
||||
the Work and Derivative Works thereof.
|
||||
|
||||
"Contribution" shall mean any work of authorship, including
|
||||
the original version of the Work and any modifications or additions
|
||||
to that Work or Derivative Works thereof, that is intentionally
|
||||
submitted to Licensor for inclusion in the Work by the copyright owner
|
||||
or by an individual or Legal Entity authorized to submit on behalf of
|
||||
the copyright owner. For the purposes of this definition, "submitted"
|
||||
means any form of electronic, verbal, or written communication sent
|
||||
to the Licensor or its representatives, including but not limited to
|
||||
communication on electronic mailing lists, source code control systems,
|
||||
and issue tracking systems that are managed by, or on behalf of, the
|
||||
Licensor for the purpose of discussing and improving the Work, but
|
||||
excluding communication that is conspicuously marked or otherwise
|
||||
designated in writing by the copyright owner as "Not a Contribution."
|
||||
|
||||
"Contributor" shall mean Licensor and any individual or Legal Entity
|
||||
on behalf of whom a Contribution has been received by Licensor and
|
||||
subsequently incorporated within the Work.
|
||||
|
||||
2. Grant of Copyright License. Subject to the terms and conditions of
|
||||
this License, each Contributor hereby grants to You a perpetual,
|
||||
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
|
||||
copyright license to reproduce, prepare Derivative Works of,
|
||||
publicly display, publicly perform, sublicense, and distribute the
|
||||
Work and such Derivative Works in Source or Object form.
|
||||
|
||||
3. Grant of Patent License. Subject to the terms and conditions of
|
||||
this License, each Contributor hereby grants to You a perpetual,
|
||||
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
|
||||
(except as stated in this section) patent license to make, have made,
|
||||
use, offer to sell, sell, import, and otherwise transfer the Work,
|
||||
where such license applies only to those patent claims licensable
|
||||
by such Contributor that are necessarily infringed by their
|
||||
Contribution(s) alone or by combination of their Contribution(s)
|
||||
with the Work to which such Contribution(s) was submitted. If You
|
||||
institute patent litigation against any entity (including a
|
||||
cross-claim or counterclaim in a lawsuit) alleging that the Work
|
||||
or a Contribution incorporated within the Work constitutes direct
|
||||
or contributory patent infringement, then any patent licenses
|
||||
granted to You under this License for that Work shall terminate
|
||||
as of the date such litigation is filed.
|
||||
|
||||
4. Redistribution. You may reproduce and distribute copies of the
|
||||
Work or Derivative Works thereof in any medium, with or without
|
||||
modifications, and in Source or Object form, provided that You
|
||||
meet the following conditions:
|
||||
|
||||
(a) You must give any other recipients of the Work or
|
||||
Derivative Works a copy of this License; and
|
||||
|
||||
(b) You must cause any modified files to carry prominent notices
|
||||
stating that You changed the files; and
|
||||
|
||||
(c) You must retain, in the Source form of any Derivative Works
|
||||
that You distribute, all copyright, patent, trademark, and
|
||||
attribution notices from the Source form of the Work,
|
||||
excluding those notices that do not pertain to any part of
|
||||
the Derivative Works; and
|
||||
|
||||
(d) If the Work includes a "NOTICE" text file as part of its
|
||||
distribution, then any Derivative Works that You distribute must
|
||||
include a readable copy of the attribution notices contained
|
||||
within such NOTICE file, excluding those notices that do not
|
||||
pertain to any part of the Derivative Works, in at least one
|
||||
of the following places: within a NOTICE text file distributed
|
||||
as part of the Derivative Works; within the Source form or
|
||||
documentation, if provided along with the Derivative Works; or,
|
||||
within a display generated by the Derivative Works, if and
|
||||
wherever such third-party notices normally appear. The contents
|
||||
of the NOTICE file are for informational purposes only and
|
||||
do not modify the License. You may add Your own attribution
|
||||
notices within Derivative Works that You distribute, alongside
|
||||
or as an addendum to the NOTICE text from the Work, provided
|
||||
that such additional attribution notices cannot be construed
|
||||
as modifying the License.
|
||||
|
||||
You may add Your own copyright statement to Your modifications and
|
||||
may provide additional or different license terms and conditions
|
||||
for use, reproduction, or distribution of Your modifications, or
|
||||
for any such Derivative Works as a whole, provided Your use,
|
||||
reproduction, and distribution of the Work otherwise complies with
|
||||
the conditions stated in this License.
|
||||
|
||||
5. Submission of Contributions. Unless You explicitly state otherwise,
|
||||
any Contribution intentionally submitted for inclusion in the Work
|
||||
by You to the Licensor shall be under the terms and conditions of
|
||||
this License, without any additional terms or conditions.
|
||||
Notwithstanding the above, nothing herein shall supersede or modify
|
||||
the terms of any separate license agreement you may have executed
|
||||
with Licensor regarding such Contributions.
|
||||
|
||||
6. Trademarks. This License does not grant permission to use the trade
|
||||
names, trademarks, service marks, or product names of the Licensor,
|
||||
except as required for reasonable and customary use in describing the
|
||||
origin of the Work and reproducing the content of the NOTICE file.
|
||||
|
||||
7. Disclaimer of Warranty. Unless required by applicable law or
|
||||
agreed to in writing, Licensor provides the Work (and each
|
||||
Contributor provides its Contributions) on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
|
||||
implied, including, without limitation, any warranties or conditions
|
||||
of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
|
||||
PARTICULAR PURPOSE. You are solely responsible for determining the
|
||||
appropriateness of using or redistributing the Work and assume any
|
||||
risks associated with Your exercise of permissions under this License.
|
||||
|
||||
8. Limitation of Liability. In no event and under no legal theory,
|
||||
whether in tort (including negligence), contract, or otherwise,
|
||||
unless required by applicable law (such as deliberate and grossly
|
||||
negligent acts) or agreed to in writing, shall any Contributor be
|
||||
liable to You for damages, including any direct, indirect, special,
|
||||
incidental, or consequential damages of any character arising as a
|
||||
result of this License or out of the use or inability to use the
|
||||
Work (including but not limited to damages for loss of goodwill,
|
||||
work stoppage, computer failure or malfunction, or any and all
|
||||
other commercial damages or losses), even if such Contributor
|
||||
has been advised of the possibility of such damages.
|
||||
|
||||
9. Accepting Warranty or Additional Liability. While redistributing
|
||||
the Work or Derivative Works thereof, You may choose to offer,
|
||||
and charge a fee for, acceptance of support, warranty, indemnity,
|
||||
or other liability obligations and/or rights consistent with this
|
||||
License. However, in accepting such obligations, You may act only
|
||||
on Your own behalf and on Your sole responsibility, not on behalf
|
||||
of any other Contributor, and only if You agree to indemnify,
|
||||
defend, and hold each Contributor harmless for any liability
|
||||
incurred by, or claims asserted against, such Contributor by reason
|
||||
of your accepting any such warranty or additional liability.
|
||||
|
||||
END OF TERMS AND CONDITIONS
|
||||
|
||||
APPENDIX: How to apply the Apache License to your work.
|
||||
|
||||
To apply the Apache License to your work, attach the following
|
||||
boilerplate notice, with the fields enclosed by brackets "[]"
|
||||
replaced with your own identifying information. (Don't include
|
||||
the brackets!) The text should be enclosed in the appropriate
|
||||
comment syntax for the file format. We also recommend that a
|
||||
file or class name and description of purpose be included on the
|
||||
same "printed page" as the copyright notice for easier
|
||||
identification within third-party archives.
|
||||
|
||||
Copyright [yyyy] [name of copyright owner]
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
437
vendor/cloud.google.com/go/compute/metadata/metadata.go
generated
vendored
437
vendor/cloud.google.com/go/compute/metadata/metadata.go
generated
vendored
@@ -1,437 +0,0 @@
|
||||
// Copyright 2014 Google Inc. All Rights Reserved.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
// Package metadata provides access to Google Compute Engine (GCE)
|
||||
// metadata and API service accounts.
|
||||
//
|
||||
// This package is a wrapper around the GCE metadata service,
|
||||
// as documented at https://developers.google.com/compute/docs/metadata.
|
||||
package metadata // import "cloud.google.com/go/compute/metadata"
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
"net"
|
||||
"net/http"
|
||||
"net/url"
|
||||
"os"
|
||||
"runtime"
|
||||
"strings"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"golang.org/x/net/context"
|
||||
"golang.org/x/net/context/ctxhttp"
|
||||
)
|
||||
|
||||
const (
|
||||
// metadataIP is the documented metadata server IP address.
|
||||
metadataIP = "169.254.169.254"
|
||||
|
||||
// metadataHostEnv is the environment variable specifying the
|
||||
// GCE metadata hostname. If empty, the default value of
|
||||
// metadataIP ("169.254.169.254") is used instead.
|
||||
// This is variable name is not defined by any spec, as far as
|
||||
// I know; it was made up for the Go package.
|
||||
metadataHostEnv = "GCE_METADATA_HOST"
|
||||
|
||||
userAgent = "gcloud-golang/0.1"
|
||||
)
|
||||
|
||||
type cachedValue struct {
|
||||
k string
|
||||
trim bool
|
||||
mu sync.Mutex
|
||||
v string
|
||||
}
|
||||
|
||||
var (
|
||||
projID = &cachedValue{k: "project/project-id", trim: true}
|
||||
projNum = &cachedValue{k: "project/numeric-project-id", trim: true}
|
||||
instID = &cachedValue{k: "instance/id", trim: true}
|
||||
)
|
||||
|
||||
var (
|
||||
metaClient = &http.Client{
|
||||
Transport: &http.Transport{
|
||||
Dial: (&net.Dialer{
|
||||
Timeout: 2 * time.Second,
|
||||
KeepAlive: 30 * time.Second,
|
||||
}).Dial,
|
||||
ResponseHeaderTimeout: 2 * time.Second,
|
||||
},
|
||||
}
|
||||
subscribeClient = &http.Client{
|
||||
Transport: &http.Transport{
|
||||
Dial: (&net.Dialer{
|
||||
Timeout: 2 * time.Second,
|
||||
KeepAlive: 30 * time.Second,
|
||||
}).Dial,
|
||||
},
|
||||
}
|
||||
)
|
||||
|
||||
// NotDefinedError is returned when requested metadata is not defined.
|
||||
//
|
||||
// The underlying string is the suffix after "/computeMetadata/v1/".
|
||||
//
|
||||
// This error is not returned if the value is defined to be the empty
|
||||
// string.
|
||||
type NotDefinedError string
|
||||
|
||||
func (suffix NotDefinedError) Error() string {
|
||||
return fmt.Sprintf("metadata: GCE metadata %q not defined", string(suffix))
|
||||
}
|
||||
|
||||
// Get returns a value from the metadata service.
|
||||
// The suffix is appended to "http://${GCE_METADATA_HOST}/computeMetadata/v1/".
|
||||
//
|
||||
// If the GCE_METADATA_HOST environment variable is not defined, a default of
|
||||
// 169.254.169.254 will be used instead.
|
||||
//
|
||||
// If the requested metadata is not defined, the returned error will
|
||||
// be of type NotDefinedError.
|
||||
func Get(suffix string) (string, error) {
|
||||
val, _, err := getETag(metaClient, suffix)
|
||||
return val, err
|
||||
}
|
||||
|
||||
// getETag returns a value from the metadata service as well as the associated
|
||||
// ETag using the provided client. This func is otherwise equivalent to Get.
|
||||
func getETag(client *http.Client, suffix string) (value, etag string, err error) {
|
||||
// Using a fixed IP makes it very difficult to spoof the metadata service in
|
||||
// a container, which is an important use-case for local testing of cloud
|
||||
// deployments. To enable spoofing of the metadata service, the environment
|
||||
// variable GCE_METADATA_HOST is first inspected to decide where metadata
|
||||
// requests shall go.
|
||||
host := os.Getenv(metadataHostEnv)
|
||||
if host == "" {
|
||||
// Using 169.254.169.254 instead of "metadata" here because Go
|
||||
// binaries built with the "netgo" tag and without cgo won't
|
||||
// know the search suffix for "metadata" is
|
||||
// ".google.internal", and this IP address is documented as
|
||||
// being stable anyway.
|
||||
host = metadataIP
|
||||
}
|
||||
url := "http://" + host + "/computeMetadata/v1/" + suffix
|
||||
req, _ := http.NewRequest("GET", url, nil)
|
||||
req.Header.Set("Metadata-Flavor", "Google")
|
||||
req.Header.Set("User-Agent", userAgent)
|
||||
res, err := client.Do(req)
|
||||
if err != nil {
|
||||
return "", "", err
|
||||
}
|
||||
defer res.Body.Close()
|
||||
if res.StatusCode == http.StatusNotFound {
|
||||
return "", "", NotDefinedError(suffix)
|
||||
}
|
||||
if res.StatusCode != 200 {
|
||||
return "", "", fmt.Errorf("status code %d trying to fetch %s", res.StatusCode, url)
|
||||
}
|
||||
all, err := ioutil.ReadAll(res.Body)
|
||||
if err != nil {
|
||||
return "", "", err
|
||||
}
|
||||
return string(all), res.Header.Get("Etag"), nil
|
||||
}
|
||||
|
||||
func getTrimmed(suffix string) (s string, err error) {
|
||||
s, err = Get(suffix)
|
||||
s = strings.TrimSpace(s)
|
||||
return
|
||||
}
|
||||
|
||||
func (c *cachedValue) get() (v string, err error) {
|
||||
defer c.mu.Unlock()
|
||||
c.mu.Lock()
|
||||
if c.v != "" {
|
||||
return c.v, nil
|
||||
}
|
||||
if c.trim {
|
||||
v, err = getTrimmed(c.k)
|
||||
} else {
|
||||
v, err = Get(c.k)
|
||||
}
|
||||
if err == nil {
|
||||
c.v = v
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
var (
|
||||
onGCEOnce sync.Once
|
||||
onGCE bool
|
||||
)
|
||||
|
||||
// OnGCE reports whether this process is running on Google Compute Engine.
|
||||
func OnGCE() bool {
|
||||
onGCEOnce.Do(initOnGCE)
|
||||
return onGCE
|
||||
}
|
||||
|
||||
func initOnGCE() {
|
||||
onGCE = testOnGCE()
|
||||
}
|
||||
|
||||
func testOnGCE() bool {
|
||||
// The user explicitly said they're on GCE, so trust them.
|
||||
if os.Getenv(metadataHostEnv) != "" {
|
||||
return true
|
||||
}
|
||||
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
defer cancel()
|
||||
|
||||
resc := make(chan bool, 2)
|
||||
|
||||
// Try two strategies in parallel.
|
||||
// See https://github.com/GoogleCloudPlatform/google-cloud-go/issues/194
|
||||
go func() {
|
||||
req, _ := http.NewRequest("GET", "http://"+metadataIP, nil)
|
||||
req.Header.Set("User-Agent", userAgent)
|
||||
res, err := ctxhttp.Do(ctx, metaClient, req)
|
||||
if err != nil {
|
||||
resc <- false
|
||||
return
|
||||
}
|
||||
defer res.Body.Close()
|
||||
resc <- res.Header.Get("Metadata-Flavor") == "Google"
|
||||
}()
|
||||
|
||||
go func() {
|
||||
addrs, err := net.LookupHost("metadata.google.internal")
|
||||
if err != nil || len(addrs) == 0 {
|
||||
resc <- false
|
||||
return
|
||||
}
|
||||
resc <- strsContains(addrs, metadataIP)
|
||||
}()
|
||||
|
||||
tryHarder := systemInfoSuggestsGCE()
|
||||
if tryHarder {
|
||||
res := <-resc
|
||||
if res {
|
||||
// The first strategy succeeded, so let's use it.
|
||||
return true
|
||||
}
|
||||
// Wait for either the DNS or metadata server probe to
|
||||
// contradict the other one and say we are running on
|
||||
// GCE. Give it a lot of time to do so, since the system
|
||||
// info already suggests we're running on a GCE BIOS.
|
||||
timer := time.NewTimer(5 * time.Second)
|
||||
defer timer.Stop()
|
||||
select {
|
||||
case res = <-resc:
|
||||
return res
|
||||
case <-timer.C:
|
||||
// Too slow. Who knows what this system is.
|
||||
return false
|
||||
}
|
||||
}
|
||||
|
||||
// There's no hint from the system info that we're running on
|
||||
// GCE, so use the first probe's result as truth, whether it's
|
||||
// true or false. The goal here is to optimize for speed for
|
||||
// users who are NOT running on GCE. We can't assume that
|
||||
// either a DNS lookup or an HTTP request to a blackholed IP
|
||||
// address is fast. Worst case this should return when the
|
||||
// metaClient's Transport.ResponseHeaderTimeout or
|
||||
// Transport.Dial.Timeout fires (in two seconds).
|
||||
return <-resc
|
||||
}
|
||||
|
||||
// systemInfoSuggestsGCE reports whether the local system (without
|
||||
// doing network requests) suggests that we're running on GCE. If this
|
||||
// returns true, testOnGCE tries a bit harder to reach its metadata
|
||||
// server.
|
||||
func systemInfoSuggestsGCE() bool {
|
||||
if runtime.GOOS != "linux" {
|
||||
// We don't have any non-Linux clues available, at least yet.
|
||||
return false
|
||||
}
|
||||
slurp, _ := ioutil.ReadFile("/sys/class/dmi/id/product_name")
|
||||
name := strings.TrimSpace(string(slurp))
|
||||
return name == "Google" || name == "Google Compute Engine"
|
||||
}
|
||||
|
||||
// Subscribe subscribes to a value from the metadata service.
|
||||
// The suffix is appended to "http://${GCE_METADATA_HOST}/computeMetadata/v1/".
|
||||
// The suffix may contain query parameters.
|
||||
//
|
||||
// Subscribe calls fn with the latest metadata value indicated by the provided
|
||||
// suffix. If the metadata value is deleted, fn is called with the empty string
|
||||
// and ok false. Subscribe blocks until fn returns a non-nil error or the value
|
||||
// is deleted. Subscribe returns the error value returned from the last call to
|
||||
// fn, which may be nil when ok == false.
|
||||
func Subscribe(suffix string, fn func(v string, ok bool) error) error {
|
||||
const failedSubscribeSleep = time.Second * 5
|
||||
|
||||
// First check to see if the metadata value exists at all.
|
||||
val, lastETag, err := getETag(subscribeClient, suffix)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if err := fn(val, true); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
ok := true
|
||||
if strings.ContainsRune(suffix, '?') {
|
||||
suffix += "&wait_for_change=true&last_etag="
|
||||
} else {
|
||||
suffix += "?wait_for_change=true&last_etag="
|
||||
}
|
||||
for {
|
||||
val, etag, err := getETag(subscribeClient, suffix+url.QueryEscape(lastETag))
|
||||
if err != nil {
|
||||
if _, deleted := err.(NotDefinedError); !deleted {
|
||||
time.Sleep(failedSubscribeSleep)
|
||||
continue // Retry on other errors.
|
||||
}
|
||||
ok = false
|
||||
}
|
||||
lastETag = etag
|
||||
|
||||
if err := fn(val, ok); err != nil || !ok {
|
||||
return err
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// ProjectID returns the current instance's project ID string.
|
||||
func ProjectID() (string, error) { return projID.get() }
|
||||
|
||||
// NumericProjectID returns the current instance's numeric project ID.
|
||||
func NumericProjectID() (string, error) { return projNum.get() }
|
||||
|
||||
// InternalIP returns the instance's primary internal IP address.
|
||||
func InternalIP() (string, error) {
|
||||
return getTrimmed("instance/network-interfaces/0/ip")
|
||||
}
|
||||
|
||||
// ExternalIP returns the instance's primary external (public) IP address.
|
||||
func ExternalIP() (string, error) {
|
||||
return getTrimmed("instance/network-interfaces/0/access-configs/0/external-ip")
|
||||
}
|
||||
|
||||
// Hostname returns the instance's hostname. This will be of the form
|
||||
// "<instanceID>.c.<projID>.internal".
|
||||
func Hostname() (string, error) {
|
||||
return getTrimmed("instance/hostname")
|
||||
}
|
||||
|
||||
// InstanceTags returns the list of user-defined instance tags,
|
||||
// assigned when initially creating a GCE instance.
|
||||
func InstanceTags() ([]string, error) {
|
||||
var s []string
|
||||
j, err := Get("instance/tags")
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if err := json.NewDecoder(strings.NewReader(j)).Decode(&s); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return s, nil
|
||||
}
|
||||
|
||||
// InstanceID returns the current VM's numeric instance ID.
|
||||
func InstanceID() (string, error) {
|
||||
return instID.get()
|
||||
}
|
||||
|
||||
// InstanceName returns the current VM's instance ID string.
|
||||
func InstanceName() (string, error) {
|
||||
host, err := Hostname()
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
return strings.Split(host, ".")[0], nil
|
||||
}
|
||||
|
||||
// Zone returns the current VM's zone, such as "us-central1-b".
|
||||
func Zone() (string, error) {
|
||||
zone, err := getTrimmed("instance/zone")
|
||||
// zone is of the form "projects/<projNum>/zones/<zoneName>".
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
return zone[strings.LastIndex(zone, "/")+1:], nil
|
||||
}
|
||||
|
||||
// InstanceAttributes returns the list of user-defined attributes,
|
||||
// assigned when initially creating a GCE VM instance. The value of an
|
||||
// attribute can be obtained with InstanceAttributeValue.
|
||||
func InstanceAttributes() ([]string, error) { return lines("instance/attributes/") }
|
||||
|
||||
// ProjectAttributes returns the list of user-defined attributes
|
||||
// applying to the project as a whole, not just this VM. The value of
|
||||
// an attribute can be obtained with ProjectAttributeValue.
|
||||
func ProjectAttributes() ([]string, error) { return lines("project/attributes/") }
|
||||
|
||||
func lines(suffix string) ([]string, error) {
|
||||
j, err := Get(suffix)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
s := strings.Split(strings.TrimSpace(j), "\n")
|
||||
for i := range s {
|
||||
s[i] = strings.TrimSpace(s[i])
|
||||
}
|
||||
return s, nil
|
||||
}
|
||||
|
||||
// InstanceAttributeValue returns the value of the provided VM
|
||||
// instance attribute.
|
||||
//
|
||||
// If the requested attribute is not defined, the returned error will
|
||||
// be of type NotDefinedError.
|
||||
//
|
||||
// InstanceAttributeValue may return ("", nil) if the attribute was
|
||||
// defined to be the empty string.
|
||||
func InstanceAttributeValue(attr string) (string, error) {
|
||||
return Get("instance/attributes/" + attr)
|
||||
}
|
||||
|
||||
// ProjectAttributeValue returns the value of the provided
|
||||
// project attribute.
|
||||
//
|
||||
// If the requested attribute is not defined, the returned error will
|
||||
// be of type NotDefinedError.
|
||||
//
|
||||
// ProjectAttributeValue may return ("", nil) if the attribute was
|
||||
// defined to be the empty string.
|
||||
func ProjectAttributeValue(attr string) (string, error) {
|
||||
return Get("project/attributes/" + attr)
|
||||
}
|
||||
|
||||
// Scopes returns the service account scopes for the given account.
|
||||
// The account may be empty or the string "default" to use the instance's
|
||||
// main account.
|
||||
func Scopes(serviceAccount string) ([]string, error) {
|
||||
if serviceAccount == "" {
|
||||
serviceAccount = "default"
|
||||
}
|
||||
return lines("instance/service-accounts/" + serviceAccount + "/scopes")
|
||||
}
|
||||
|
||||
func strsContains(ss []string, s string) bool {
|
||||
for _, v := range ss {
|
||||
if v == s {
|
||||
return true
|
||||
}
|
||||
}
|
||||
return false
|
||||
}
|
||||
201
vendor/code.cloudfoundry.org/clock/LICENSE
generated
vendored
201
vendor/code.cloudfoundry.org/clock/LICENSE
generated
vendored
@@ -1,201 +0,0 @@
|
||||
Apache License
|
||||
Version 2.0, January 2004
|
||||
http://www.apache.org/licenses/
|
||||
|
||||
TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
|
||||
|
||||
1. Definitions.
|
||||
|
||||
"License" shall mean the terms and conditions for use, reproduction,
|
||||
and distribution as defined by Sections 1 through 9 of this document.
|
||||
|
||||
"Licensor" shall mean the copyright owner or entity authorized by
|
||||
the copyright owner that is granting the License.
|
||||
|
||||
"Legal Entity" shall mean the union of the acting entity and all
|
||||
other entities that control, are controlled by, or are under common
|
||||
control with that entity. For the purposes of this definition,
|
||||
"control" means (i) the power, direct or indirect, to cause the
|
||||
direction or management of such entity, whether by contract or
|
||||
otherwise, or (ii) ownership of fifty percent (50%) or more of the
|
||||
outstanding shares, or (iii) beneficial ownership of such entity.
|
||||
|
||||
"You" (or "Your") shall mean an individual or Legal Entity
|
||||
exercising permissions granted by this License.
|
||||
|
||||
"Source" form shall mean the preferred form for making modifications,
|
||||
including but not limited to software source code, documentation
|
||||
source, and configuration files.
|
||||
|
||||
"Object" form shall mean any form resulting from mechanical
|
||||
transformation or translation of a Source form, including but
|
||||
not limited to compiled object code, generated documentation,
|
||||
and conversions to other media types.
|
||||
|
||||
"Work" shall mean the work of authorship, whether in Source or
|
||||
Object form, made available under the License, as indicated by a
|
||||
copyright notice that is included in or attached to the work
|
||||
(an example is provided in the Appendix below).
|
||||
|
||||
"Derivative Works" shall mean any work, whether in Source or Object
|
||||
form, that is based on (or derived from) the Work and for which the
|
||||
editorial revisions, annotations, elaborations, or other modifications
|
||||
represent, as a whole, an original work of authorship. For the purposes
|
||||
of this License, Derivative Works shall not include works that remain
|
||||
separable from, or merely link (or bind by name) to the interfaces of,
|
||||
the Work and Derivative Works thereof.
|
||||
|
||||
"Contribution" shall mean any work of authorship, including
|
||||
the original version of the Work and any modifications or additions
|
||||
to that Work or Derivative Works thereof, that is intentionally
|
||||
submitted to Licensor for inclusion in the Work by the copyright owner
|
||||
or by an individual or Legal Entity authorized to submit on behalf of
|
||||
the copyright owner. For the purposes of this definition, "submitted"
|
||||
means any form of electronic, verbal, or written communication sent
|
||||
to the Licensor or its representatives, including but not limited to
|
||||
communication on electronic mailing lists, source code control systems,
|
||||
and issue tracking systems that are managed by, or on behalf of, the
|
||||
Licensor for the purpose of discussing and improving the Work, but
|
||||
excluding communication that is conspicuously marked or otherwise
|
||||
designated in writing by the copyright owner as "Not a Contribution."
|
||||
|
||||
"Contributor" shall mean Licensor and any individual or Legal Entity
|
||||
on behalf of whom a Contribution has been received by Licensor and
|
||||
subsequently incorporated within the Work.
|
||||
|
||||
2. Grant of Copyright License. Subject to the terms and conditions of
|
||||
this License, each Contributor hereby grants to You a perpetual,
|
||||
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
|
||||
copyright license to reproduce, prepare Derivative Works of,
|
||||
publicly display, publicly perform, sublicense, and distribute the
|
||||
Work and such Derivative Works in Source or Object form.
|
||||
|
||||
3. Grant of Patent License. Subject to the terms and conditions of
|
||||
this License, each Contributor hereby grants to You a perpetual,
|
||||
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
|
||||
(except as stated in this section) patent license to make, have made,
|
||||
use, offer to sell, sell, import, and otherwise transfer the Work,
|
||||
where such license applies only to those patent claims licensable
|
||||
by such Contributor that are necessarily infringed by their
|
||||
Contribution(s) alone or by combination of their Contribution(s)
|
||||
with the Work to which such Contribution(s) was submitted. If You
|
||||
institute patent litigation against any entity (including a
|
||||
cross-claim or counterclaim in a lawsuit) alleging that the Work
|
||||
or a Contribution incorporated within the Work constitutes direct
|
||||
or contributory patent infringement, then any patent licenses
|
||||
granted to You under this License for that Work shall terminate
|
||||
as of the date such litigation is filed.
|
||||
|
||||
4. Redistribution. You may reproduce and distribute copies of the
|
||||
Work or Derivative Works thereof in any medium, with or without
|
||||
modifications, and in Source or Object form, provided that You
|
||||
meet the following conditions:
|
||||
|
||||
(a) You must give any other recipients of the Work or
|
||||
Derivative Works a copy of this License; and
|
||||
|
||||
(b) You must cause any modified files to carry prominent notices
|
||||
stating that You changed the files; and
|
||||
|
||||
(c) You must retain, in the Source form of any Derivative Works
|
||||
that You distribute, all copyright, patent, trademark, and
|
||||
attribution notices from the Source form of the Work,
|
||||
excluding those notices that do not pertain to any part of
|
||||
the Derivative Works; and
|
||||
|
||||
(d) If the Work includes a "NOTICE" text file as part of its
|
||||
distribution, then any Derivative Works that You distribute must
|
||||
include a readable copy of the attribution notices contained
|
||||
within such NOTICE file, excluding those notices that do not
|
||||
pertain to any part of the Derivative Works, in at least one
|
||||
of the following places: within a NOTICE text file distributed
|
||||
as part of the Derivative Works; within the Source form or
|
||||
documentation, if provided along with the Derivative Works; or,
|
||||
within a display generated by the Derivative Works, if and
|
||||
wherever such third-party notices normally appear. The contents
|
||||
of the NOTICE file are for informational purposes only and
|
||||
do not modify the License. You may add Your own attribution
|
||||
notices within Derivative Works that You distribute, alongside
|
||||
or as an addendum to the NOTICE text from the Work, provided
|
||||
that such additional attribution notices cannot be construed
|
||||
as modifying the License.
|
||||
|
||||
You may add Your own copyright statement to Your modifications and
|
||||
may provide additional or different license terms and conditions
|
||||
for use, reproduction, or distribution of Your modifications, or
|
||||
for any such Derivative Works as a whole, provided Your use,
|
||||
reproduction, and distribution of the Work otherwise complies with
|
||||
the conditions stated in this License.
|
||||
|
||||
5. Submission of Contributions. Unless You explicitly state otherwise,
|
||||
any Contribution intentionally submitted for inclusion in the Work
|
||||
by You to the Licensor shall be under the terms and conditions of
|
||||
this License, without any additional terms or conditions.
|
||||
Notwithstanding the above, nothing herein shall supersede or modify
|
||||
the terms of any separate license agreement you may have executed
|
||||
with Licensor regarding such Contributions.
|
||||
|
||||
6. Trademarks. This License does not grant permission to use the trade
|
||||
names, trademarks, service marks, or product names of the Licensor,
|
||||
except as required for reasonable and customary use in describing the
|
||||
origin of the Work and reproducing the content of the NOTICE file.
|
||||
|
||||
7. Disclaimer of Warranty. Unless required by applicable law or
|
||||
agreed to in writing, Licensor provides the Work (and each
|
||||
Contributor provides its Contributions) on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
|
||||
implied, including, without limitation, any warranties or conditions
|
||||
of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
|
||||
PARTICULAR PURPOSE. You are solely responsible for determining the
|
||||
appropriateness of using or redistributing the Work and assume any
|
||||
risks associated with Your exercise of permissions under this License.
|
||||
|
||||
8. Limitation of Liability. In no event and under no legal theory,
|
||||
whether in tort (including negligence), contract, or otherwise,
|
||||
unless required by applicable law (such as deliberate and grossly
|
||||
negligent acts) or agreed to in writing, shall any Contributor be
|
||||
liable to You for damages, including any direct, indirect, special,
|
||||
incidental, or consequential damages of any character arising as a
|
||||
result of this License or out of the use or inability to use the
|
||||
Work (including but not limited to damages for loss of goodwill,
|
||||
work stoppage, computer failure or malfunction, or any and all
|
||||
other commercial damages or losses), even if such Contributor
|
||||
has been advised of the possibility of such damages.
|
||||
|
||||
9. Accepting Warranty or Additional Liability. While redistributing
|
||||
the Work or Derivative Works thereof, You may choose to offer,
|
||||
and charge a fee for, acceptance of support, warranty, indemnity,
|
||||
or other liability obligations and/or rights consistent with this
|
||||
License. However, in accepting such obligations, You may act only
|
||||
on Your own behalf and on Your sole responsibility, not on behalf
|
||||
of any other Contributor, and only if You agree to indemnify,
|
||||
defend, and hold each Contributor harmless for any liability
|
||||
incurred by, or claims asserted against, such Contributor by reason
|
||||
of your accepting any such warranty or additional liability.
|
||||
|
||||
END OF TERMS AND CONDITIONS
|
||||
|
||||
APPENDIX: How to apply the Apache License to your work.
|
||||
|
||||
To apply the Apache License to your work, attach the following
|
||||
boilerplate notice, with the fields enclosed by brackets "[]"
|
||||
replaced with your own identifying information. (Don't include
|
||||
the brackets!) The text should be enclosed in the appropriate
|
||||
comment syntax for the file format. We also recommend that a
|
||||
file or class name and description of purpose be included on the
|
||||
same "printed page" as the copyright notice for easier
|
||||
identification within third-party archives.
|
||||
|
||||
Copyright [yyyy] [name of copyright owner]
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
20
vendor/code.cloudfoundry.org/clock/NOTICE
generated
vendored
20
vendor/code.cloudfoundry.org/clock/NOTICE
generated
vendored
@@ -1,20 +0,0 @@
|
||||
Copyright (c) 2015-Present CloudFoundry.org Foundation, Inc. All Rights Reserved.
|
||||
|
||||
This project contains software that is Copyright (c) 2015 Pivotal Software, Inc.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
|
||||
This project may include a number of subcomponents with separate
|
||||
copyright notices and license terms. Your use of these subcomponents
|
||||
is subject to the terms and conditions of each subcomponent's license,
|
||||
as noted in the LICENSE file.
|
||||
53
vendor/code.cloudfoundry.org/clock/clock.go
generated
vendored
53
vendor/code.cloudfoundry.org/clock/clock.go
generated
vendored
@@ -1,53 +0,0 @@
|
||||
package clock
|
||||
|
||||
import "time"
|
||||
|
||||
type Clock interface {
|
||||
Now() time.Time
|
||||
Sleep(d time.Duration)
|
||||
Since(t time.Time) time.Duration
|
||||
// After waits for the duration to elapse and then sends the current time
|
||||
// on the returned channel.
|
||||
// It is equivalent to clock.NewTimer(d).C.
|
||||
// The underlying Timer is not recovered by the garbage collector
|
||||
// until the timer fires. If efficiency is a concern, use clock.NewTimer
|
||||
// instead and call Timer.Stop if the timer is no longer needed.
|
||||
After(d time.Duration) <-chan time.Time
|
||||
|
||||
NewTimer(d time.Duration) Timer
|
||||
NewTicker(d time.Duration) Ticker
|
||||
}
|
||||
|
||||
type realClock struct{}
|
||||
|
||||
func NewClock() Clock {
|
||||
return &realClock{}
|
||||
}
|
||||
|
||||
func (clock *realClock) Now() time.Time {
|
||||
return time.Now()
|
||||
}
|
||||
|
||||
func (clock *realClock) Since(t time.Time) time.Duration {
|
||||
return time.Now().Sub(t)
|
||||
}
|
||||
|
||||
func (clock *realClock) Sleep(d time.Duration) {
|
||||
<-clock.NewTimer(d).C()
|
||||
}
|
||||
|
||||
func (clock *realClock) After(d time.Duration) <-chan time.Time {
|
||||
return clock.NewTimer(d).C()
|
||||
}
|
||||
|
||||
func (clock *realClock) NewTimer(d time.Duration) Timer {
|
||||
return &realTimer{
|
||||
t: time.NewTimer(d),
|
||||
}
|
||||
}
|
||||
|
||||
func (clock *realClock) NewTicker(d time.Duration) Ticker {
|
||||
return &realTicker{
|
||||
t: time.NewTicker(d),
|
||||
}
|
||||
}
|
||||
1
vendor/code.cloudfoundry.org/clock/package.go
generated
vendored
1
vendor/code.cloudfoundry.org/clock/package.go
generated
vendored
@@ -1 +0,0 @@
|
||||
package clock // import "code.cloudfoundry.org/clock"
|
||||
20
vendor/code.cloudfoundry.org/clock/ticker.go
generated
vendored
20
vendor/code.cloudfoundry.org/clock/ticker.go
generated
vendored
@@ -1,20 +0,0 @@
|
||||
package clock
|
||||
|
||||
import "time"
|
||||
|
||||
type Ticker interface {
|
||||
C() <-chan time.Time
|
||||
Stop()
|
||||
}
|
||||
|
||||
type realTicker struct {
|
||||
t *time.Ticker
|
||||
}
|
||||
|
||||
func (t *realTicker) C() <-chan time.Time {
|
||||
return t.t.C
|
||||
}
|
||||
|
||||
func (t *realTicker) Stop() {
|
||||
t.t.Stop()
|
||||
}
|
||||
25
vendor/code.cloudfoundry.org/clock/timer.go
generated
vendored
25
vendor/code.cloudfoundry.org/clock/timer.go
generated
vendored
@@ -1,25 +0,0 @@
|
||||
package clock
|
||||
|
||||
import "time"
|
||||
|
||||
type Timer interface {
|
||||
C() <-chan time.Time
|
||||
Reset(d time.Duration) bool
|
||||
Stop() bool
|
||||
}
|
||||
|
||||
type realTimer struct {
|
||||
t *time.Timer
|
||||
}
|
||||
|
||||
func (t *realTimer) C() <-chan time.Time {
|
||||
return t.t.C
|
||||
}
|
||||
|
||||
func (t *realTimer) Reset(d time.Duration) bool {
|
||||
return t.t.Reset(d)
|
||||
}
|
||||
|
||||
func (t *realTimer) Stop() bool {
|
||||
return t.t.Stop()
|
||||
}
|
||||
201
vendor/contrib.go.opencensus.io/exporter/ocagent/LICENSE
generated
vendored
201
vendor/contrib.go.opencensus.io/exporter/ocagent/LICENSE
generated
vendored
@@ -1,201 +0,0 @@
|
||||
Apache License
|
||||
Version 2.0, January 2004
|
||||
http://www.apache.org/licenses/
|
||||
|
||||
TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
|
||||
|
||||
1. Definitions.
|
||||
|
||||
"License" shall mean the terms and conditions for use, reproduction,
|
||||
and distribution as defined by Sections 1 through 9 of this document.
|
||||
|
||||
"Licensor" shall mean the copyright owner or entity authorized by
|
||||
the copyright owner that is granting the License.
|
||||
|
||||
"Legal Entity" shall mean the union of the acting entity and all
|
||||
other entities that control, are controlled by, or are under common
|
||||
control with that entity. For the purposes of this definition,
|
||||
"control" means (i) the power, direct or indirect, to cause the
|
||||
direction or management of such entity, whether by contract or
|
||||
otherwise, or (ii) ownership of fifty percent (50%) or more of the
|
||||
outstanding shares, or (iii) beneficial ownership of such entity.
|
||||
|
||||
"You" (or "Your") shall mean an individual or Legal Entity
|
||||
exercising permissions granted by this License.
|
||||
|
||||
"Source" form shall mean the preferred form for making modifications,
|
||||
including but not limited to software source code, documentation
|
||||
source, and configuration files.
|
||||
|
||||
"Object" form shall mean any form resulting from mechanical
|
||||
transformation or translation of a Source form, including but
|
||||
not limited to compiled object code, generated documentation,
|
||||
and conversions to other media types.
|
||||
|
||||
"Work" shall mean the work of authorship, whether in Source or
|
||||
Object form, made available under the License, as indicated by a
|
||||
copyright notice that is included in or attached to the work
|
||||
(an example is provided in the Appendix below).
|
||||
|
||||
"Derivative Works" shall mean any work, whether in Source or Object
|
||||
form, that is based on (or derived from) the Work and for which the
|
||||
editorial revisions, annotations, elaborations, or other modifications
|
||||
represent, as a whole, an original work of authorship. For the purposes
|
||||
of this License, Derivative Works shall not include works that remain
|
||||
separable from, or merely link (or bind by name) to the interfaces of,
|
||||
the Work and Derivative Works thereof.
|
||||
|
||||
"Contribution" shall mean any work of authorship, including
|
||||
the original version of the Work and any modifications or additions
|
||||
to that Work or Derivative Works thereof, that is intentionally
|
||||
submitted to Licensor for inclusion in the Work by the copyright owner
|
||||
or by an individual or Legal Entity authorized to submit on behalf of
|
||||
the copyright owner. For the purposes of this definition, "submitted"
|
||||
means any form of electronic, verbal, or written communication sent
|
||||
to the Licensor or its representatives, including but not limited to
|
||||
communication on electronic mailing lists, source code control systems,
|
||||
and issue tracking systems that are managed by, or on behalf of, the
|
||||
Licensor for the purpose of discussing and improving the Work, but
|
||||
excluding communication that is conspicuously marked or otherwise
|
||||
designated in writing by the copyright owner as "Not a Contribution."
|
||||
|
||||
"Contributor" shall mean Licensor and any individual or Legal Entity
|
||||
on behalf of whom a Contribution has been received by Licensor and
|
||||
subsequently incorporated within the Work.
|
||||
|
||||
2. Grant of Copyright License. Subject to the terms and conditions of
|
||||
this License, each Contributor hereby grants to You a perpetual,
|
||||
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
|
||||
copyright license to reproduce, prepare Derivative Works of,
|
||||
publicly display, publicly perform, sublicense, and distribute the
|
||||
Work and such Derivative Works in Source or Object form.
|
||||
|
||||
3. Grant of Patent License. Subject to the terms and conditions of
|
||||
this License, each Contributor hereby grants to You a perpetual,
|
||||
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
|
||||
(except as stated in this section) patent license to make, have made,
|
||||
use, offer to sell, sell, import, and otherwise transfer the Work,
|
||||
where such license applies only to those patent claims licensable
|
||||
by such Contributor that are necessarily infringed by their
|
||||
Contribution(s) alone or by combination of their Contribution(s)
|
||||
with the Work to which such Contribution(s) was submitted. If You
|
||||
institute patent litigation against any entity (including a
|
||||
cross-claim or counterclaim in a lawsuit) alleging that the Work
|
||||
or a Contribution incorporated within the Work constitutes direct
|
||||
or contributory patent infringement, then any patent licenses
|
||||
granted to You under this License for that Work shall terminate
|
||||
as of the date such litigation is filed.
|
||||
|
||||
4. Redistribution. You may reproduce and distribute copies of the
|
||||
Work or Derivative Works thereof in any medium, with or without
|
||||
modifications, and in Source or Object form, provided that You
|
||||
meet the following conditions:
|
||||
|
||||
(a) You must give any other recipients of the Work or
|
||||
Derivative Works a copy of this License; and
|
||||
|
||||
(b) You must cause any modified files to carry prominent notices
|
||||
stating that You changed the files; and
|
||||
|
||||
(c) You must retain, in the Source form of any Derivative Works
|
||||
that You distribute, all copyright, patent, trademark, and
|
||||
attribution notices from the Source form of the Work,
|
||||
excluding those notices that do not pertain to any part of
|
||||
the Derivative Works; and
|
||||
|
||||
(d) If the Work includes a "NOTICE" text file as part of its
|
||||
distribution, then any Derivative Works that You distribute must
|
||||
include a readable copy of the attribution notices contained
|
||||
within such NOTICE file, excluding those notices that do not
|
||||
pertain to any part of the Derivative Works, in at least one
|
||||
of the following places: within a NOTICE text file distributed
|
||||
as part of the Derivative Works; within the Source form or
|
||||
documentation, if provided along with the Derivative Works; or,
|
||||
within a display generated by the Derivative Works, if and
|
||||
wherever such third-party notices normally appear. The contents
|
||||
of the NOTICE file are for informational purposes only and
|
||||
do not modify the License. You may add Your own attribution
|
||||
notices within Derivative Works that You distribute, alongside
|
||||
or as an addendum to the NOTICE text from the Work, provided
|
||||
that such additional attribution notices cannot be construed
|
||||
as modifying the License.
|
||||
|
||||
You may add Your own copyright statement to Your modifications and
|
||||
may provide additional or different license terms and conditions
|
||||
for use, reproduction, or distribution of Your modifications, or
|
||||
for any such Derivative Works as a whole, provided Your use,
|
||||
reproduction, and distribution of the Work otherwise complies with
|
||||
the conditions stated in this License.
|
||||
|
||||
5. Submission of Contributions. Unless You explicitly state otherwise,
|
||||
any Contribution intentionally submitted for inclusion in the Work
|
||||
by You to the Licensor shall be under the terms and conditions of
|
||||
this License, without any additional terms or conditions.
|
||||
Notwithstanding the above, nothing herein shall supersede or modify
|
||||
the terms of any separate license agreement you may have executed
|
||||
with Licensor regarding such Contributions.
|
||||
|
||||
6. Trademarks. This License does not grant permission to use the trade
|
||||
names, trademarks, service marks, or product names of the Licensor,
|
||||
except as required for reasonable and customary use in describing the
|
||||
origin of the Work and reproducing the content of the NOTICE file.
|
||||
|
||||
7. Disclaimer of Warranty. Unless required by applicable law or
|
||||
agreed to in writing, Licensor provides the Work (and each
|
||||
Contributor provides its Contributions) on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
|
||||
implied, including, without limitation, any warranties or conditions
|
||||
of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
|
||||
PARTICULAR PURPOSE. You are solely responsible for determining the
|
||||
appropriateness of using or redistributing the Work and assume any
|
||||
risks associated with Your exercise of permissions under this License.
|
||||
|
||||
8. Limitation of Liability. In no event and under no legal theory,
|
||||
whether in tort (including negligence), contract, or otherwise,
|
||||
unless required by applicable law (such as deliberate and grossly
|
||||
negligent acts) or agreed to in writing, shall any Contributor be
|
||||
liable to You for damages, including any direct, indirect, special,
|
||||
incidental, or consequential damages of any character arising as a
|
||||
result of this License or out of the use or inability to use the
|
||||
Work (including but not limited to damages for loss of goodwill,
|
||||
work stoppage, computer failure or malfunction, or any and all
|
||||
other commercial damages or losses), even if such Contributor
|
||||
has been advised of the possibility of such damages.
|
||||
|
||||
9. Accepting Warranty or Additional Liability. While redistributing
|
||||
the Work or Derivative Works thereof, You may choose to offer,
|
||||
and charge a fee for, acceptance of support, warranty, indemnity,
|
||||
or other liability obligations and/or rights consistent with this
|
||||
License. However, in accepting such obligations, You may act only
|
||||
on Your own behalf and on Your sole responsibility, not on behalf
|
||||
of any other Contributor, and only if You agree to indemnify,
|
||||
defend, and hold each Contributor harmless for any liability
|
||||
incurred by, or claims asserted against, such Contributor by reason
|
||||
of your accepting any such warranty or additional liability.
|
||||
|
||||
END OF TERMS AND CONDITIONS
|
||||
|
||||
APPENDIX: How to apply the Apache License to your work.
|
||||
|
||||
To apply the Apache License to your work, attach the following
|
||||
boilerplate notice, with the fields enclosed by brackets "[]"
|
||||
replaced with your own identifying information. (Don't include
|
||||
the brackets!) The text should be enclosed in the appropriate
|
||||
comment syntax for the file format. We also recommend that a
|
||||
file or class name and description of purpose be included on the
|
||||
same "printed page" as the copyright notice for easier
|
||||
identification within third-party archives.
|
||||
|
||||
Copyright [yyyy] [name of copyright owner]
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
38
vendor/contrib.go.opencensus.io/exporter/ocagent/common.go
generated
vendored
38
vendor/contrib.go.opencensus.io/exporter/ocagent/common.go
generated
vendored
@@ -1,38 +0,0 @@
|
||||
// Copyright 2018, OpenCensus Authors
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package ocagent
|
||||
|
||||
import (
|
||||
"math/rand"
|
||||
"time"
|
||||
)
|
||||
|
||||
var randSrc = rand.New(rand.NewSource(time.Now().UnixNano()))
|
||||
|
||||
// retries function fn upto n times, if fn returns an error lest it returns nil early.
|
||||
// It applies exponential backoff in units of (1<<n) + jitter microsends.
|
||||
func nTriesWithExponentialBackoff(nTries int64, timeBaseUnit time.Duration, fn func() error) (err error) {
|
||||
for i := int64(0); i < nTries; i++ {
|
||||
err = fn()
|
||||
if err == nil {
|
||||
return nil
|
||||
}
|
||||
// Backoff for a time period with a pseudo-random jitter
|
||||
jitter := time.Duration(randSrc.Float64()*100) * time.Microsecond
|
||||
ts := jitter + ((1 << uint64(i)) * timeBaseUnit)
|
||||
<-time.After(ts)
|
||||
}
|
||||
return err
|
||||
}
|
||||
97
vendor/contrib.go.opencensus.io/exporter/ocagent/connection.go
generated
vendored
97
vendor/contrib.go.opencensus.io/exporter/ocagent/connection.go
generated
vendored
@@ -1,97 +0,0 @@
|
||||
// Copyright 2018, OpenCensus Authors
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package ocagent
|
||||
|
||||
import (
|
||||
"math/rand"
|
||||
"sync/atomic"
|
||||
"time"
|
||||
)
|
||||
|
||||
const (
|
||||
sDisconnected int32 = 5 + iota
|
||||
sConnected
|
||||
)
|
||||
|
||||
func (ae *Exporter) setStateDisconnected() {
|
||||
atomic.StoreInt32(&ae.connectionState, sDisconnected)
|
||||
select {
|
||||
case ae.disconnectedCh <- true:
|
||||
default:
|
||||
}
|
||||
}
|
||||
|
||||
func (ae *Exporter) setStateConnected() {
|
||||
atomic.StoreInt32(&ae.connectionState, sConnected)
|
||||
}
|
||||
|
||||
func (ae *Exporter) connected() bool {
|
||||
return atomic.LoadInt32(&ae.connectionState) == sConnected
|
||||
}
|
||||
|
||||
const defaultConnReattemptPeriod = 10 * time.Second
|
||||
|
||||
func (ae *Exporter) indefiniteBackgroundConnection() error {
|
||||
defer func() {
|
||||
ae.backgroundConnectionDoneCh <- true
|
||||
}()
|
||||
|
||||
connReattemptPeriod := ae.reconnectionPeriod
|
||||
if connReattemptPeriod <= 0 {
|
||||
connReattemptPeriod = defaultConnReattemptPeriod
|
||||
}
|
||||
|
||||
// No strong seeding required, nano time can
|
||||
// already help with pseudo uniqueness.
|
||||
rng := rand.New(rand.NewSource(time.Now().UnixNano() + rand.Int63n(1024)))
|
||||
|
||||
// maxJitter: 1 + (70% of the connectionReattemptPeriod)
|
||||
maxJitter := int64(1 + 0.7*float64(connReattemptPeriod))
|
||||
|
||||
for {
|
||||
// Otherwise these will be the normal scenarios to enable
|
||||
// reconnections if we trip out.
|
||||
// 1. If we've stopped, return entirely
|
||||
// 2. Otherwise block until we are disconnected, and
|
||||
// then retry connecting
|
||||
select {
|
||||
case <-ae.stopCh:
|
||||
return errStopped
|
||||
|
||||
case <-ae.disconnectedCh:
|
||||
// Normal scenario that we'll wait for
|
||||
}
|
||||
|
||||
if err := ae.connect(); err == nil {
|
||||
ae.setStateConnected()
|
||||
} else {
|
||||
ae.setStateDisconnected()
|
||||
}
|
||||
|
||||
// Apply some jitter to avoid lockstep retrials of other
|
||||
// agent-exporters. Lockstep retrials could result in an
|
||||
// innocent DDOS, by clogging the machine's resources and network.
|
||||
jitter := time.Duration(rng.Int63n(maxJitter))
|
||||
<-time.After(connReattemptPeriod + jitter)
|
||||
}
|
||||
}
|
||||
|
||||
func (ae *Exporter) connect() error {
|
||||
cc, err := ae.dialToAgent()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
return ae.enableConnectionStreams(cc)
|
||||
}
|
||||
46
vendor/contrib.go.opencensus.io/exporter/ocagent/nodeinfo.go
generated
vendored
46
vendor/contrib.go.opencensus.io/exporter/ocagent/nodeinfo.go
generated
vendored
@@ -1,46 +0,0 @@
|
||||
// Copyright 2018, OpenCensus Authors
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package ocagent
|
||||
|
||||
import (
|
||||
"os"
|
||||
|
||||
commonpb "github.com/census-instrumentation/opencensus-proto/gen-go/agent/common/v1"
|
||||
"go.opencensus.io"
|
||||
)
|
||||
|
||||
// NodeWithStartTime creates a node using nodeName and derives:
|
||||
// Hostname from the environment
|
||||
// Pid from the current process
|
||||
// StartTimestamp from the start time of this process
|
||||
// Language and library information.
|
||||
func NodeWithStartTime(nodeName string) *commonpb.Node {
|
||||
return &commonpb.Node{
|
||||
Identifier: &commonpb.ProcessIdentifier{
|
||||
HostName: os.Getenv("HOSTNAME"),
|
||||
Pid: uint32(os.Getpid()),
|
||||
StartTimestamp: timeToTimestamp(startTime),
|
||||
},
|
||||
LibraryInfo: &commonpb.LibraryInfo{
|
||||
Language: commonpb.LibraryInfo_GO_LANG,
|
||||
ExporterVersion: Version,
|
||||
CoreLibraryVersion: opencensus.Version(),
|
||||
},
|
||||
ServiceInfo: &commonpb.ServiceInfo{
|
||||
Name: nodeName,
|
||||
},
|
||||
Attributes: make(map[string]string),
|
||||
}
|
||||
}
|
||||
496
vendor/contrib.go.opencensus.io/exporter/ocagent/ocagent.go
generated
vendored
496
vendor/contrib.go.opencensus.io/exporter/ocagent/ocagent.go
generated
vendored
@@ -1,496 +0,0 @@
|
||||
// Copyright 2018, OpenCensus Authors
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package ocagent
|
||||
|
||||
import (
|
||||
"context"
|
||||
"errors"
|
||||
"fmt"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"google.golang.org/api/support/bundler"
|
||||
"google.golang.org/grpc"
|
||||
"google.golang.org/grpc/credentials"
|
||||
"google.golang.org/grpc/metadata"
|
||||
|
||||
"go.opencensus.io/plugin/ocgrpc"
|
||||
"go.opencensus.io/resource"
|
||||
"go.opencensus.io/stats/view"
|
||||
"go.opencensus.io/trace"
|
||||
|
||||
commonpb "github.com/census-instrumentation/opencensus-proto/gen-go/agent/common/v1"
|
||||
agentmetricspb "github.com/census-instrumentation/opencensus-proto/gen-go/agent/metrics/v1"
|
||||
agenttracepb "github.com/census-instrumentation/opencensus-proto/gen-go/agent/trace/v1"
|
||||
metricspb "github.com/census-instrumentation/opencensus-proto/gen-go/metrics/v1"
|
||||
resourcepb "github.com/census-instrumentation/opencensus-proto/gen-go/resource/v1"
|
||||
tracepb "github.com/census-instrumentation/opencensus-proto/gen-go/trace/v1"
|
||||
)
|
||||
|
||||
var startupMu sync.Mutex
|
||||
var startTime time.Time
|
||||
|
||||
func init() {
|
||||
startupMu.Lock()
|
||||
startTime = time.Now()
|
||||
startupMu.Unlock()
|
||||
}
|
||||
|
||||
var _ trace.Exporter = (*Exporter)(nil)
|
||||
var _ view.Exporter = (*Exporter)(nil)
|
||||
|
||||
type Exporter struct {
|
||||
connectionState int32
|
||||
|
||||
// mu protects the non-atomic and non-channel variables
|
||||
mu sync.RWMutex
|
||||
// senderMu protects the concurrent unsafe traceExporter client
|
||||
senderMu sync.RWMutex
|
||||
started bool
|
||||
stopped bool
|
||||
agentAddress string
|
||||
serviceName string
|
||||
canDialInsecure bool
|
||||
traceExporter agenttracepb.TraceService_ExportClient
|
||||
metricsExporter agentmetricspb.MetricsService_ExportClient
|
||||
nodeInfo *commonpb.Node
|
||||
grpcClientConn *grpc.ClientConn
|
||||
reconnectionPeriod time.Duration
|
||||
resource *resourcepb.Resource
|
||||
compressor string
|
||||
headers map[string]string
|
||||
|
||||
startOnce sync.Once
|
||||
stopCh chan bool
|
||||
disconnectedCh chan bool
|
||||
|
||||
backgroundConnectionDoneCh chan bool
|
||||
|
||||
traceBundler *bundler.Bundler
|
||||
|
||||
// viewDataBundler is the bundler to enable conversion
|
||||
// from OpenCensus-Go view.Data to metricspb.Metric.
|
||||
// Please do not confuse it with metricsBundler!
|
||||
viewDataBundler *bundler.Bundler
|
||||
|
||||
clientTransportCredentials credentials.TransportCredentials
|
||||
}
|
||||
|
||||
func NewExporter(opts ...ExporterOption) (*Exporter, error) {
|
||||
exp, err := NewUnstartedExporter(opts...)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if err := exp.Start(); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return exp, nil
|
||||
}
|
||||
|
||||
const spanDataBufferSize = 300
|
||||
|
||||
func NewUnstartedExporter(opts ...ExporterOption) (*Exporter, error) {
|
||||
e := new(Exporter)
|
||||
for _, opt := range opts {
|
||||
opt.withExporter(e)
|
||||
}
|
||||
traceBundler := bundler.NewBundler((*trace.SpanData)(nil), func(bundle interface{}) {
|
||||
e.uploadTraces(bundle.([]*trace.SpanData))
|
||||
})
|
||||
traceBundler.DelayThreshold = 2 * time.Second
|
||||
traceBundler.BundleCountThreshold = spanDataBufferSize
|
||||
e.traceBundler = traceBundler
|
||||
|
||||
viewDataBundler := bundler.NewBundler((*view.Data)(nil), func(bundle interface{}) {
|
||||
e.uploadViewData(bundle.([]*view.Data))
|
||||
})
|
||||
viewDataBundler.DelayThreshold = 2 * time.Second
|
||||
viewDataBundler.BundleCountThreshold = 500 // TODO: (@odeke-em) make this configurable.
|
||||
e.viewDataBundler = viewDataBundler
|
||||
e.nodeInfo = NodeWithStartTime(e.serviceName)
|
||||
e.resource = resourceProtoFromEnv()
|
||||
|
||||
return e, nil
|
||||
}
|
||||
|
||||
const (
|
||||
maxInitialConfigRetries = 10
|
||||
maxInitialTracesRetries = 10
|
||||
)
|
||||
|
||||
var (
|
||||
errAlreadyStarted = errors.New("already started")
|
||||
errNotStarted = errors.New("not started")
|
||||
errStopped = errors.New("stopped")
|
||||
errNoConnection = errors.New("no active connection")
|
||||
)
|
||||
|
||||
// Start dials to the agent, establishing a connection to it. It also
|
||||
// initiates the Config and Trace services by sending over the initial
|
||||
// messages that consist of the node identifier. Start invokes a background
|
||||
// connector that will reattempt connections to the agent periodically
|
||||
// if the connection dies.
|
||||
func (ae *Exporter) Start() error {
|
||||
var err = errAlreadyStarted
|
||||
ae.startOnce.Do(func() {
|
||||
ae.mu.Lock()
|
||||
defer ae.mu.Unlock()
|
||||
|
||||
ae.started = true
|
||||
ae.disconnectedCh = make(chan bool, 1)
|
||||
ae.stopCh = make(chan bool)
|
||||
ae.backgroundConnectionDoneCh = make(chan bool)
|
||||
|
||||
ae.setStateDisconnected()
|
||||
go ae.indefiniteBackgroundConnection()
|
||||
|
||||
err = nil
|
||||
})
|
||||
|
||||
return err
|
||||
}
|
||||
|
||||
func (ae *Exporter) prepareAgentAddress() string {
|
||||
if ae.agentAddress != "" {
|
||||
return ae.agentAddress
|
||||
}
|
||||
return fmt.Sprintf("%s:%d", DefaultAgentHost, DefaultAgentPort)
|
||||
}
|
||||
|
||||
func (ae *Exporter) enableConnectionStreams(cc *grpc.ClientConn) error {
|
||||
ae.mu.RLock()
|
||||
started := ae.started
|
||||
nodeInfo := ae.nodeInfo
|
||||
ae.mu.RUnlock()
|
||||
|
||||
if !started {
|
||||
return errNotStarted
|
||||
}
|
||||
|
||||
ae.mu.Lock()
|
||||
// If the previous clientConn was non-nil, close it
|
||||
if ae.grpcClientConn != nil {
|
||||
_ = ae.grpcClientConn.Close()
|
||||
}
|
||||
ae.grpcClientConn = cc
|
||||
ae.mu.Unlock()
|
||||
|
||||
if err := ae.createTraceServiceConnection(ae.grpcClientConn, nodeInfo); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return ae.createMetricsServiceConnection(ae.grpcClientConn, nodeInfo)
|
||||
}
|
||||
|
||||
func (ae *Exporter) createTraceServiceConnection(cc *grpc.ClientConn, node *commonpb.Node) error {
|
||||
// Initiate the trace service by sending over node identifier info.
|
||||
traceSvcClient := agenttracepb.NewTraceServiceClient(cc)
|
||||
ctx := context.Background()
|
||||
if len(ae.headers) > 0 {
|
||||
ctx = metadata.NewOutgoingContext(ctx, metadata.New(ae.headers))
|
||||
}
|
||||
traceExporter, err := traceSvcClient.Export(ctx)
|
||||
if err != nil {
|
||||
return fmt.Errorf("Exporter.Start:: TraceServiceClient: %v", err)
|
||||
}
|
||||
|
||||
firstTraceMessage := &agenttracepb.ExportTraceServiceRequest{
|
||||
Node: node,
|
||||
Resource: ae.resource,
|
||||
}
|
||||
if err := traceExporter.Send(firstTraceMessage); err != nil {
|
||||
return fmt.Errorf("Exporter.Start:: Failed to initiate the Config service: %v", err)
|
||||
}
|
||||
|
||||
ae.mu.Lock()
|
||||
ae.traceExporter = traceExporter
|
||||
ae.mu.Unlock()
|
||||
|
||||
// Initiate the config service by sending over node identifier info.
|
||||
configStream, err := traceSvcClient.Config(context.Background())
|
||||
if err != nil {
|
||||
return fmt.Errorf("Exporter.Start:: ConfigStream: %v", err)
|
||||
}
|
||||
firstCfgMessage := &agenttracepb.CurrentLibraryConfig{Node: node}
|
||||
if err := configStream.Send(firstCfgMessage); err != nil {
|
||||
return fmt.Errorf("Exporter.Start:: Failed to initiate the Config service: %v", err)
|
||||
}
|
||||
|
||||
// In the background, handle trace configurations that are beamed down
|
||||
// by the agent, but also reply to it with the applied configuration.
|
||||
go ae.handleConfigStreaming(configStream)
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (ae *Exporter) createMetricsServiceConnection(cc *grpc.ClientConn, node *commonpb.Node) error {
|
||||
metricsSvcClient := agentmetricspb.NewMetricsServiceClient(cc)
|
||||
metricsExporter, err := metricsSvcClient.Export(context.Background())
|
||||
if err != nil {
|
||||
return fmt.Errorf("MetricsExporter: failed to start the service client: %v", err)
|
||||
}
|
||||
// Initiate the metrics service by sending over the first message just containing the Node and Resource.
|
||||
firstMetricsMessage := &agentmetricspb.ExportMetricsServiceRequest{
|
||||
Node: node,
|
||||
Resource: ae.resource,
|
||||
}
|
||||
if err := metricsExporter.Send(firstMetricsMessage); err != nil {
|
||||
return fmt.Errorf("MetricsExporter:: failed to send the first message: %v", err)
|
||||
}
|
||||
|
||||
ae.mu.Lock()
|
||||
ae.metricsExporter = metricsExporter
|
||||
ae.mu.Unlock()
|
||||
|
||||
// With that we are good to go and can start sending metrics
|
||||
return nil
|
||||
}
|
||||
|
||||
func (ae *Exporter) dialToAgent() (*grpc.ClientConn, error) {
|
||||
addr := ae.prepareAgentAddress()
|
||||
var dialOpts []grpc.DialOption
|
||||
if ae.clientTransportCredentials != nil {
|
||||
dialOpts = append(dialOpts, grpc.WithTransportCredentials(ae.clientTransportCredentials))
|
||||
} else if ae.canDialInsecure {
|
||||
dialOpts = append(dialOpts, grpc.WithInsecure())
|
||||
}
|
||||
if ae.compressor != "" {
|
||||
dialOpts = append(dialOpts, grpc.WithDefaultCallOptions(grpc.UseCompressor(ae.compressor)))
|
||||
}
|
||||
dialOpts = append(dialOpts, grpc.WithStatsHandler(&ocgrpc.ClientHandler{}))
|
||||
|
||||
ctx := context.Background()
|
||||
if len(ae.headers) > 0 {
|
||||
ctx = metadata.NewOutgoingContext(ctx, metadata.New(ae.headers))
|
||||
}
|
||||
return grpc.DialContext(ctx, addr, dialOpts...)
|
||||
}
|
||||
|
||||
func (ae *Exporter) handleConfigStreaming(configStream agenttracepb.TraceService_ConfigClient) error {
|
||||
// Note: We haven't yet implemented configuration sending so we
|
||||
// should NOT be changing connection states within this function for now.
|
||||
for {
|
||||
recv, err := configStream.Recv()
|
||||
if err != nil {
|
||||
// TODO: Check if this is a transient error or exponential backoff-able.
|
||||
return err
|
||||
}
|
||||
cfg := recv.Config
|
||||
if cfg == nil {
|
||||
continue
|
||||
}
|
||||
|
||||
// Otherwise now apply the trace configuration sent down from the agent
|
||||
if psamp := cfg.GetProbabilitySampler(); psamp != nil {
|
||||
trace.ApplyConfig(trace.Config{DefaultSampler: trace.ProbabilitySampler(psamp.SamplingProbability)})
|
||||
} else if csamp := cfg.GetConstantSampler(); csamp != nil {
|
||||
alwaysSample := csamp.Decision == tracepb.ConstantSampler_ALWAYS_ON
|
||||
if alwaysSample {
|
||||
trace.ApplyConfig(trace.Config{DefaultSampler: trace.AlwaysSample()})
|
||||
} else {
|
||||
trace.ApplyConfig(trace.Config{DefaultSampler: trace.NeverSample()})
|
||||
}
|
||||
} else { // TODO: Add the rate limiting sampler here
|
||||
}
|
||||
|
||||
// Then finally send back to upstream the newly applied configuration
|
||||
err = configStream.Send(&agenttracepb.CurrentLibraryConfig{Config: &tracepb.TraceConfig{Sampler: cfg.Sampler}})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Stop shuts down all the connections and resources
|
||||
// related to the exporter.
|
||||
func (ae *Exporter) Stop() error {
|
||||
ae.mu.RLock()
|
||||
cc := ae.grpcClientConn
|
||||
started := ae.started
|
||||
stopped := ae.stopped
|
||||
ae.mu.RUnlock()
|
||||
|
||||
if !started {
|
||||
return errNotStarted
|
||||
}
|
||||
if stopped {
|
||||
// TODO: tell the user that we've already stopped, so perhaps a sentinel error?
|
||||
return nil
|
||||
}
|
||||
|
||||
ae.Flush()
|
||||
|
||||
// Now close the underlying gRPC connection.
|
||||
var err error
|
||||
if cc != nil {
|
||||
err = cc.Close()
|
||||
}
|
||||
|
||||
// At this point we can change the state variables: started and stopped
|
||||
ae.mu.Lock()
|
||||
ae.started = false
|
||||
ae.stopped = true
|
||||
ae.mu.Unlock()
|
||||
close(ae.stopCh)
|
||||
|
||||
// Ensure that the backgroundConnector returns
|
||||
<-ae.backgroundConnectionDoneCh
|
||||
|
||||
return err
|
||||
}
|
||||
|
||||
func (ae *Exporter) ExportSpan(sd *trace.SpanData) {
|
||||
if sd == nil {
|
||||
return
|
||||
}
|
||||
_ = ae.traceBundler.Add(sd, 1)
|
||||
}
|
||||
|
||||
func (ae *Exporter) ExportTraceServiceRequest(batch *agenttracepb.ExportTraceServiceRequest) error {
|
||||
if batch == nil || len(batch.Spans) == 0 {
|
||||
return nil
|
||||
}
|
||||
|
||||
select {
|
||||
case <-ae.stopCh:
|
||||
return errStopped
|
||||
|
||||
default:
|
||||
if !ae.connected() {
|
||||
return errNoConnection
|
||||
}
|
||||
|
||||
ae.senderMu.Lock()
|
||||
err := ae.traceExporter.Send(batch)
|
||||
ae.senderMu.Unlock()
|
||||
if err != nil {
|
||||
ae.setStateDisconnected()
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
func (ae *Exporter) ExportView(vd *view.Data) {
|
||||
if vd == nil {
|
||||
return
|
||||
}
|
||||
_ = ae.viewDataBundler.Add(vd, 1)
|
||||
}
|
||||
|
||||
func ocSpanDataToPbSpans(sdl []*trace.SpanData) []*tracepb.Span {
|
||||
if len(sdl) == 0 {
|
||||
return nil
|
||||
}
|
||||
protoSpans := make([]*tracepb.Span, 0, len(sdl))
|
||||
for _, sd := range sdl {
|
||||
if sd != nil {
|
||||
protoSpans = append(protoSpans, ocSpanToProtoSpan(sd))
|
||||
}
|
||||
}
|
||||
return protoSpans
|
||||
}
|
||||
|
||||
func (ae *Exporter) uploadTraces(sdl []*trace.SpanData) {
|
||||
select {
|
||||
case <-ae.stopCh:
|
||||
return
|
||||
|
||||
default:
|
||||
if !ae.connected() {
|
||||
return
|
||||
}
|
||||
|
||||
protoSpans := ocSpanDataToPbSpans(sdl)
|
||||
if len(protoSpans) == 0 {
|
||||
return
|
||||
}
|
||||
ae.senderMu.Lock()
|
||||
err := ae.traceExporter.Send(&agenttracepb.ExportTraceServiceRequest{
|
||||
Spans: protoSpans,
|
||||
})
|
||||
ae.senderMu.Unlock()
|
||||
if err != nil {
|
||||
ae.setStateDisconnected()
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func ocViewDataToPbMetrics(vdl []*view.Data) []*metricspb.Metric {
|
||||
if len(vdl) == 0 {
|
||||
return nil
|
||||
}
|
||||
metrics := make([]*metricspb.Metric, 0, len(vdl))
|
||||
for _, vd := range vdl {
|
||||
if vd != nil {
|
||||
vmetric, err := viewDataToMetric(vd)
|
||||
// TODO: (@odeke-em) somehow report this error, if it is non-nil.
|
||||
if err == nil && vmetric != nil {
|
||||
metrics = append(metrics, vmetric)
|
||||
}
|
||||
}
|
||||
}
|
||||
return metrics
|
||||
}
|
||||
|
||||
func (ae *Exporter) uploadViewData(vdl []*view.Data) {
|
||||
select {
|
||||
case <-ae.stopCh:
|
||||
return
|
||||
|
||||
default:
|
||||
if !ae.connected() {
|
||||
return
|
||||
}
|
||||
|
||||
protoMetrics := ocViewDataToPbMetrics(vdl)
|
||||
if len(protoMetrics) == 0 {
|
||||
return
|
||||
}
|
||||
err := ae.metricsExporter.Send(&agentmetricspb.ExportMetricsServiceRequest{
|
||||
Metrics: protoMetrics,
|
||||
// TODO:(@odeke-em)
|
||||
// a) Figure out how to derive a Node from the environment
|
||||
// b) Figure out how to derive a Resource from the environment
|
||||
// or better letting users of the exporter configure it.
|
||||
})
|
||||
if err != nil {
|
||||
ae.setStateDisconnected()
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (ae *Exporter) Flush() {
|
||||
ae.traceBundler.Flush()
|
||||
ae.viewDataBundler.Flush()
|
||||
}
|
||||
|
||||
func resourceProtoFromEnv() *resourcepb.Resource {
|
||||
rs, _ := resource.FromEnv(context.Background())
|
||||
if rs == nil {
|
||||
return nil
|
||||
}
|
||||
|
||||
rprs := &resourcepb.Resource{
|
||||
Type: rs.Type,
|
||||
}
|
||||
if rs.Labels != nil {
|
||||
rprs.Labels = make(map[string]string)
|
||||
for k, v := range rs.Labels {
|
||||
rprs.Labels[k] = v
|
||||
}
|
||||
}
|
||||
return rprs
|
||||
}
|
||||
128
vendor/contrib.go.opencensus.io/exporter/ocagent/options.go
generated
vendored
128
vendor/contrib.go.opencensus.io/exporter/ocagent/options.go
generated
vendored
@@ -1,128 +0,0 @@
|
||||
// Copyright 2018, OpenCensus Authors
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package ocagent
|
||||
|
||||
import (
|
||||
"time"
|
||||
|
||||
"google.golang.org/grpc/credentials"
|
||||
)
|
||||
|
||||
const (
|
||||
DefaultAgentPort uint16 = 55678
|
||||
DefaultAgentHost string = "localhost"
|
||||
)
|
||||
|
||||
type ExporterOption interface {
|
||||
withExporter(e *Exporter)
|
||||
}
|
||||
|
||||
type insecureGrpcConnection int
|
||||
|
||||
var _ ExporterOption = (*insecureGrpcConnection)(nil)
|
||||
|
||||
func (igc *insecureGrpcConnection) withExporter(e *Exporter) {
|
||||
e.canDialInsecure = true
|
||||
}
|
||||
|
||||
// WithInsecure disables client transport security for the exporter's gRPC connection
|
||||
// just like grpc.WithInsecure() https://godoc.org/google.golang.org/grpc#WithInsecure
|
||||
// does. Note, by default, client security is required unless WithInsecure is used.
|
||||
func WithInsecure() ExporterOption { return new(insecureGrpcConnection) }
|
||||
|
||||
type addressSetter string
|
||||
|
||||
func (as addressSetter) withExporter(e *Exporter) {
|
||||
e.agentAddress = string(as)
|
||||
}
|
||||
|
||||
var _ ExporterOption = (*addressSetter)(nil)
|
||||
|
||||
// WithAddress allows one to set the address that the exporter will
|
||||
// connect to the agent on. If unset, it will instead try to use
|
||||
// connect to DefaultAgentHost:DefaultAgentPort
|
||||
func WithAddress(addr string) ExporterOption {
|
||||
return addressSetter(addr)
|
||||
}
|
||||
|
||||
type serviceNameSetter string
|
||||
|
||||
func (sns serviceNameSetter) withExporter(e *Exporter) {
|
||||
e.serviceName = string(sns)
|
||||
}
|
||||
|
||||
var _ ExporterOption = (*serviceNameSetter)(nil)
|
||||
|
||||
// WithServiceName allows one to set/override the service name
|
||||
// that the exporter will report to the agent.
|
||||
func WithServiceName(serviceName string) ExporterOption {
|
||||
return serviceNameSetter(serviceName)
|
||||
}
|
||||
|
||||
type reconnectionPeriod time.Duration
|
||||
|
||||
func (rp reconnectionPeriod) withExporter(e *Exporter) {
|
||||
e.reconnectionPeriod = time.Duration(rp)
|
||||
}
|
||||
|
||||
func WithReconnectionPeriod(rp time.Duration) ExporterOption {
|
||||
return reconnectionPeriod(rp)
|
||||
}
|
||||
|
||||
type compressorSetter string
|
||||
|
||||
func (c compressorSetter) withExporter(e *Exporter) {
|
||||
e.compressor = string(c)
|
||||
}
|
||||
|
||||
// UseCompressor will set the compressor for the gRPC client to use when sending requests.
|
||||
// It is the responsibility of the caller to ensure that the compressor set has been registered
|
||||
// with google.golang.org/grpc/encoding. This can be done by encoding.RegisterCompressor. Some
|
||||
// compressors auto-register on import, such as gzip, which can be registered by calling
|
||||
// `import _ "google.golang.org/grpc/encoding/gzip"`
|
||||
func UseCompressor(compressorName string) ExporterOption {
|
||||
return compressorSetter(compressorName)
|
||||
}
|
||||
|
||||
type headerSetter map[string]string
|
||||
|
||||
func (h headerSetter) withExporter(e *Exporter) {
|
||||
e.headers = map[string]string(h)
|
||||
}
|
||||
|
||||
// WithHeaders will send the provided headers when the gRPC stream connection
|
||||
// is instantiated
|
||||
func WithHeaders(headers map[string]string) ExporterOption {
|
||||
return headerSetter(headers)
|
||||
}
|
||||
|
||||
type clientCredentials struct {
|
||||
credentials.TransportCredentials
|
||||
}
|
||||
|
||||
var _ ExporterOption = (*clientCredentials)(nil)
|
||||
|
||||
// WithTLSCredentials allows the connection to use TLS credentials
|
||||
// when talking to the server. It takes in grpc.TransportCredentials instead
|
||||
// of say a Certificate file or a tls.Certificate, because the retrieving
|
||||
// these credentials can be done in many ways e.g. plain file, in code tls.Config
|
||||
// or by certificate rotation, so it is up to the caller to decide what to use.
|
||||
func WithTLSCredentials(creds credentials.TransportCredentials) ExporterOption {
|
||||
return &clientCredentials{TransportCredentials: creds}
|
||||
}
|
||||
|
||||
func (cc *clientCredentials) withExporter(e *Exporter) {
|
||||
e.clientTransportCredentials = cc.TransportCredentials
|
||||
}
|
||||
248
vendor/contrib.go.opencensus.io/exporter/ocagent/transform_spans.go
generated
vendored
248
vendor/contrib.go.opencensus.io/exporter/ocagent/transform_spans.go
generated
vendored
@@ -1,248 +0,0 @@
|
||||
// Copyright 2018, OpenCensus Authors
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package ocagent
|
||||
|
||||
import (
|
||||
"math"
|
||||
"time"
|
||||
|
||||
"go.opencensus.io/trace"
|
||||
"go.opencensus.io/trace/tracestate"
|
||||
|
||||
tracepb "github.com/census-instrumentation/opencensus-proto/gen-go/trace/v1"
|
||||
"github.com/golang/protobuf/ptypes/timestamp"
|
||||
)
|
||||
|
||||
const (
|
||||
maxAnnotationEventsPerSpan = 32
|
||||
maxMessageEventsPerSpan = 128
|
||||
)
|
||||
|
||||
func ocSpanToProtoSpan(sd *trace.SpanData) *tracepb.Span {
|
||||
if sd == nil {
|
||||
return nil
|
||||
}
|
||||
var namePtr *tracepb.TruncatableString
|
||||
if sd.Name != "" {
|
||||
namePtr = &tracepb.TruncatableString{Value: sd.Name}
|
||||
}
|
||||
return &tracepb.Span{
|
||||
TraceId: sd.TraceID[:],
|
||||
SpanId: sd.SpanID[:],
|
||||
ParentSpanId: sd.ParentSpanID[:],
|
||||
Status: ocStatusToProtoStatus(sd.Status),
|
||||
StartTime: timeToTimestamp(sd.StartTime),
|
||||
EndTime: timeToTimestamp(sd.EndTime),
|
||||
Links: ocLinksToProtoLinks(sd.Links),
|
||||
Kind: ocSpanKindToProtoSpanKind(sd.SpanKind),
|
||||
Name: namePtr,
|
||||
Attributes: ocAttributesToProtoAttributes(sd.Attributes),
|
||||
TimeEvents: ocTimeEventsToProtoTimeEvents(sd.Annotations, sd.MessageEvents),
|
||||
Tracestate: ocTracestateToProtoTracestate(sd.Tracestate),
|
||||
}
|
||||
}
|
||||
|
||||
var blankStatus trace.Status
|
||||
|
||||
func ocStatusToProtoStatus(status trace.Status) *tracepb.Status {
|
||||
if status == blankStatus {
|
||||
return nil
|
||||
}
|
||||
return &tracepb.Status{
|
||||
Code: status.Code,
|
||||
Message: status.Message,
|
||||
}
|
||||
}
|
||||
|
||||
func ocLinksToProtoLinks(links []trace.Link) *tracepb.Span_Links {
|
||||
if len(links) == 0 {
|
||||
return nil
|
||||
}
|
||||
|
||||
sl := make([]*tracepb.Span_Link, 0, len(links))
|
||||
for _, ocLink := range links {
|
||||
// This redefinition is necessary to prevent ocLink.*ID[:] copies
|
||||
// being reused -- in short we need a new ocLink per iteration.
|
||||
ocLink := ocLink
|
||||
|
||||
sl = append(sl, &tracepb.Span_Link{
|
||||
TraceId: ocLink.TraceID[:],
|
||||
SpanId: ocLink.SpanID[:],
|
||||
Type: ocLinkTypeToProtoLinkType(ocLink.Type),
|
||||
})
|
||||
}
|
||||
|
||||
return &tracepb.Span_Links{
|
||||
Link: sl,
|
||||
}
|
||||
}
|
||||
|
||||
func ocLinkTypeToProtoLinkType(oct trace.LinkType) tracepb.Span_Link_Type {
|
||||
switch oct {
|
||||
case trace.LinkTypeChild:
|
||||
return tracepb.Span_Link_CHILD_LINKED_SPAN
|
||||
case trace.LinkTypeParent:
|
||||
return tracepb.Span_Link_PARENT_LINKED_SPAN
|
||||
default:
|
||||
return tracepb.Span_Link_TYPE_UNSPECIFIED
|
||||
}
|
||||
}
|
||||
|
||||
func ocAttributesToProtoAttributes(attrs map[string]interface{}) *tracepb.Span_Attributes {
|
||||
if len(attrs) == 0 {
|
||||
return nil
|
||||
}
|
||||
outMap := make(map[string]*tracepb.AttributeValue)
|
||||
for k, v := range attrs {
|
||||
switch v := v.(type) {
|
||||
case bool:
|
||||
outMap[k] = &tracepb.AttributeValue{Value: &tracepb.AttributeValue_BoolValue{BoolValue: v}}
|
||||
|
||||
case int:
|
||||
outMap[k] = &tracepb.AttributeValue{Value: &tracepb.AttributeValue_IntValue{IntValue: int64(v)}}
|
||||
|
||||
case int64:
|
||||
outMap[k] = &tracepb.AttributeValue{Value: &tracepb.AttributeValue_IntValue{IntValue: v}}
|
||||
|
||||
case string:
|
||||
outMap[k] = &tracepb.AttributeValue{
|
||||
Value: &tracepb.AttributeValue_StringValue{
|
||||
StringValue: &tracepb.TruncatableString{Value: v},
|
||||
},
|
||||
}
|
||||
}
|
||||
}
|
||||
return &tracepb.Span_Attributes{
|
||||
AttributeMap: outMap,
|
||||
}
|
||||
}
|
||||
|
||||
// This code is mostly copied from
|
||||
// https://github.com/census-ecosystem/opencensus-go-exporter-stackdriver/blob/master/trace_proto.go#L46
|
||||
func ocTimeEventsToProtoTimeEvents(as []trace.Annotation, es []trace.MessageEvent) *tracepb.Span_TimeEvents {
|
||||
if len(as) == 0 && len(es) == 0 {
|
||||
return nil
|
||||
}
|
||||
|
||||
timeEvents := &tracepb.Span_TimeEvents{}
|
||||
var annotations, droppedAnnotationsCount int
|
||||
var messageEvents, droppedMessageEventsCount int
|
||||
|
||||
// Transform annotations
|
||||
for i, a := range as {
|
||||
if annotations >= maxAnnotationEventsPerSpan {
|
||||
droppedAnnotationsCount = len(as) - i
|
||||
break
|
||||
}
|
||||
annotations++
|
||||
timeEvents.TimeEvent = append(timeEvents.TimeEvent,
|
||||
&tracepb.Span_TimeEvent{
|
||||
Time: timeToTimestamp(a.Time),
|
||||
Value: transformAnnotationToTimeEvent(&a),
|
||||
},
|
||||
)
|
||||
}
|
||||
|
||||
// Transform message events
|
||||
for i, e := range es {
|
||||
if messageEvents >= maxMessageEventsPerSpan {
|
||||
droppedMessageEventsCount = len(es) - i
|
||||
break
|
||||
}
|
||||
messageEvents++
|
||||
timeEvents.TimeEvent = append(timeEvents.TimeEvent,
|
||||
&tracepb.Span_TimeEvent{
|
||||
Time: timeToTimestamp(e.Time),
|
||||
Value: transformMessageEventToTimeEvent(&e),
|
||||
},
|
||||
)
|
||||
}
|
||||
|
||||
// Process dropped counter
|
||||
timeEvents.DroppedAnnotationsCount = clip32(droppedAnnotationsCount)
|
||||
timeEvents.DroppedMessageEventsCount = clip32(droppedMessageEventsCount)
|
||||
|
||||
return timeEvents
|
||||
}
|
||||
|
||||
func transformAnnotationToTimeEvent(a *trace.Annotation) *tracepb.Span_TimeEvent_Annotation_ {
|
||||
return &tracepb.Span_TimeEvent_Annotation_{
|
||||
Annotation: &tracepb.Span_TimeEvent_Annotation{
|
||||
Description: &tracepb.TruncatableString{Value: a.Message},
|
||||
Attributes: ocAttributesToProtoAttributes(a.Attributes),
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
func transformMessageEventToTimeEvent(e *trace.MessageEvent) *tracepb.Span_TimeEvent_MessageEvent_ {
|
||||
return &tracepb.Span_TimeEvent_MessageEvent_{
|
||||
MessageEvent: &tracepb.Span_TimeEvent_MessageEvent{
|
||||
Type: tracepb.Span_TimeEvent_MessageEvent_Type(e.EventType),
|
||||
Id: uint64(e.MessageID),
|
||||
UncompressedSize: uint64(e.UncompressedByteSize),
|
||||
CompressedSize: uint64(e.CompressedByteSize),
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
// clip32 clips an int to the range of an int32.
|
||||
func clip32(x int) int32 {
|
||||
if x < math.MinInt32 {
|
||||
return math.MinInt32
|
||||
}
|
||||
if x > math.MaxInt32 {
|
||||
return math.MaxInt32
|
||||
}
|
||||
return int32(x)
|
||||
}
|
||||
|
||||
func timeToTimestamp(t time.Time) *timestamp.Timestamp {
|
||||
nanoTime := t.UnixNano()
|
||||
return ×tamp.Timestamp{
|
||||
Seconds: nanoTime / 1e9,
|
||||
Nanos: int32(nanoTime % 1e9),
|
||||
}
|
||||
}
|
||||
|
||||
func ocSpanKindToProtoSpanKind(kind int) tracepb.Span_SpanKind {
|
||||
switch kind {
|
||||
case trace.SpanKindClient:
|
||||
return tracepb.Span_CLIENT
|
||||
case trace.SpanKindServer:
|
||||
return tracepb.Span_SERVER
|
||||
default:
|
||||
return tracepb.Span_SPAN_KIND_UNSPECIFIED
|
||||
}
|
||||
}
|
||||
|
||||
func ocTracestateToProtoTracestate(ts *tracestate.Tracestate) *tracepb.Span_Tracestate {
|
||||
if ts == nil {
|
||||
return nil
|
||||
}
|
||||
return &tracepb.Span_Tracestate{
|
||||
Entries: ocTracestateEntriesToProtoTracestateEntries(ts.Entries()),
|
||||
}
|
||||
}
|
||||
|
||||
func ocTracestateEntriesToProtoTracestateEntries(entries []tracestate.Entry) []*tracepb.Span_Tracestate_Entry {
|
||||
protoEntries := make([]*tracepb.Span_Tracestate_Entry, 0, len(entries))
|
||||
for _, entry := range entries {
|
||||
protoEntries = append(protoEntries, &tracepb.Span_Tracestate_Entry{
|
||||
Key: entry.Key,
|
||||
Value: entry.Value,
|
||||
})
|
||||
}
|
||||
return protoEntries
|
||||
}
|
||||
274
vendor/contrib.go.opencensus.io/exporter/ocagent/transform_stats_to_metrics.go
generated
vendored
274
vendor/contrib.go.opencensus.io/exporter/ocagent/transform_stats_to_metrics.go
generated
vendored
@@ -1,274 +0,0 @@
|
||||
// Copyright 2018, OpenCensus Authors
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package ocagent
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"time"
|
||||
|
||||
"go.opencensus.io/stats"
|
||||
"go.opencensus.io/stats/view"
|
||||
"go.opencensus.io/tag"
|
||||
|
||||
"github.com/golang/protobuf/ptypes/timestamp"
|
||||
|
||||
metricspb "github.com/census-instrumentation/opencensus-proto/gen-go/metrics/v1"
|
||||
)
|
||||
|
||||
var (
|
||||
errNilMeasure = errors.New("expecting a non-nil stats.Measure")
|
||||
errNilView = errors.New("expecting a non-nil view.View")
|
||||
errNilViewData = errors.New("expecting a non-nil view.Data")
|
||||
)
|
||||
|
||||
func viewDataToMetric(vd *view.Data) (*metricspb.Metric, error) {
|
||||
if vd == nil {
|
||||
return nil, errNilViewData
|
||||
}
|
||||
|
||||
descriptor, err := viewToMetricDescriptor(vd.View)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
timeseries, err := viewDataToTimeseries(vd)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
metric := &metricspb.Metric{
|
||||
MetricDescriptor: descriptor,
|
||||
Timeseries: timeseries,
|
||||
}
|
||||
return metric, nil
|
||||
}
|
||||
|
||||
func viewToMetricDescriptor(v *view.View) (*metricspb.MetricDescriptor, error) {
|
||||
if v == nil {
|
||||
return nil, errNilView
|
||||
}
|
||||
if v.Measure == nil {
|
||||
return nil, errNilMeasure
|
||||
}
|
||||
|
||||
desc := &metricspb.MetricDescriptor{
|
||||
Name: stringOrCall(v.Name, v.Measure.Name),
|
||||
Description: stringOrCall(v.Description, v.Measure.Description),
|
||||
Unit: v.Measure.Unit(),
|
||||
Type: aggregationToMetricDescriptorType(v),
|
||||
LabelKeys: tagKeysToLabelKeys(v.TagKeys),
|
||||
}
|
||||
return desc, nil
|
||||
}
|
||||
|
||||
func stringOrCall(first string, call func() string) string {
|
||||
if first != "" {
|
||||
return first
|
||||
}
|
||||
return call()
|
||||
}
|
||||
|
||||
type measureType uint
|
||||
|
||||
const (
|
||||
measureUnknown measureType = iota
|
||||
measureInt64
|
||||
measureFloat64
|
||||
)
|
||||
|
||||
func measureTypeFromMeasure(m stats.Measure) measureType {
|
||||
switch m.(type) {
|
||||
default:
|
||||
return measureUnknown
|
||||
case *stats.Float64Measure:
|
||||
return measureFloat64
|
||||
case *stats.Int64Measure:
|
||||
return measureInt64
|
||||
}
|
||||
}
|
||||
|
||||
func aggregationToMetricDescriptorType(v *view.View) metricspb.MetricDescriptor_Type {
|
||||
if v == nil || v.Aggregation == nil {
|
||||
return metricspb.MetricDescriptor_UNSPECIFIED
|
||||
}
|
||||
if v.Measure == nil {
|
||||
return metricspb.MetricDescriptor_UNSPECIFIED
|
||||
}
|
||||
|
||||
switch v.Aggregation.Type {
|
||||
case view.AggTypeCount:
|
||||
// Cumulative on int64
|
||||
return metricspb.MetricDescriptor_CUMULATIVE_INT64
|
||||
|
||||
case view.AggTypeDistribution:
|
||||
// Cumulative types
|
||||
return metricspb.MetricDescriptor_CUMULATIVE_DISTRIBUTION
|
||||
|
||||
case view.AggTypeLastValue:
|
||||
// Gauge types
|
||||
switch measureTypeFromMeasure(v.Measure) {
|
||||
case measureFloat64:
|
||||
return metricspb.MetricDescriptor_GAUGE_DOUBLE
|
||||
case measureInt64:
|
||||
return metricspb.MetricDescriptor_GAUGE_INT64
|
||||
}
|
||||
|
||||
case view.AggTypeSum:
|
||||
// Cumulative types
|
||||
switch measureTypeFromMeasure(v.Measure) {
|
||||
case measureFloat64:
|
||||
return metricspb.MetricDescriptor_CUMULATIVE_DOUBLE
|
||||
case measureInt64:
|
||||
return metricspb.MetricDescriptor_CUMULATIVE_INT64
|
||||
}
|
||||
}
|
||||
|
||||
// For all other cases, return unspecified.
|
||||
return metricspb.MetricDescriptor_UNSPECIFIED
|
||||
}
|
||||
|
||||
func tagKeysToLabelKeys(tagKeys []tag.Key) []*metricspb.LabelKey {
|
||||
labelKeys := make([]*metricspb.LabelKey, 0, len(tagKeys))
|
||||
for _, tagKey := range tagKeys {
|
||||
labelKeys = append(labelKeys, &metricspb.LabelKey{
|
||||
Key: tagKey.Name(),
|
||||
})
|
||||
}
|
||||
return labelKeys
|
||||
}
|
||||
|
||||
func viewDataToTimeseries(vd *view.Data) ([]*metricspb.TimeSeries, error) {
|
||||
if vd == nil || len(vd.Rows) == 0 {
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
// Given that view.Data only contains Start, End
|
||||
// the timestamps for all the row data will be the exact same
|
||||
// per aggregation. However, the values will differ.
|
||||
// Each row has its own tags.
|
||||
startTimestamp := timeToProtoTimestamp(vd.Start)
|
||||
endTimestamp := timeToProtoTimestamp(vd.End)
|
||||
|
||||
mType := measureTypeFromMeasure(vd.View.Measure)
|
||||
timeseries := make([]*metricspb.TimeSeries, 0, len(vd.Rows))
|
||||
// It is imperative that the ordering of "LabelValues" matches those
|
||||
// of the Label keys in the metric descriptor.
|
||||
for _, row := range vd.Rows {
|
||||
labelValues := labelValuesFromTags(row.Tags)
|
||||
point := rowToPoint(vd.View, row, endTimestamp, mType)
|
||||
timeseries = append(timeseries, &metricspb.TimeSeries{
|
||||
StartTimestamp: startTimestamp,
|
||||
LabelValues: labelValues,
|
||||
Points: []*metricspb.Point{point},
|
||||
})
|
||||
}
|
||||
|
||||
if len(timeseries) == 0 {
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
return timeseries, nil
|
||||
}
|
||||
|
||||
func timeToProtoTimestamp(t time.Time) *timestamp.Timestamp {
|
||||
unixNano := t.UnixNano()
|
||||
return ×tamp.Timestamp{
|
||||
Seconds: int64(unixNano / 1e9),
|
||||
Nanos: int32(unixNano % 1e9),
|
||||
}
|
||||
}
|
||||
|
||||
func rowToPoint(v *view.View, row *view.Row, endTimestamp *timestamp.Timestamp, mType measureType) *metricspb.Point {
|
||||
pt := &metricspb.Point{
|
||||
Timestamp: endTimestamp,
|
||||
}
|
||||
|
||||
switch data := row.Data.(type) {
|
||||
case *view.CountData:
|
||||
pt.Value = &metricspb.Point_Int64Value{Int64Value: data.Value}
|
||||
|
||||
case *view.DistributionData:
|
||||
pt.Value = &metricspb.Point_DistributionValue{
|
||||
DistributionValue: &metricspb.DistributionValue{
|
||||
Count: data.Count,
|
||||
Sum: float64(data.Count) * data.Mean, // because Mean := Sum/Count
|
||||
// TODO: Add Exemplar
|
||||
Buckets: bucketsToProtoBuckets(data.CountPerBucket),
|
||||
BucketOptions: &metricspb.DistributionValue_BucketOptions{
|
||||
Type: &metricspb.DistributionValue_BucketOptions_Explicit_{
|
||||
Explicit: &metricspb.DistributionValue_BucketOptions_Explicit{
|
||||
Bounds: v.Aggregation.Buckets,
|
||||
},
|
||||
},
|
||||
},
|
||||
SumOfSquaredDeviation: data.SumOfSquaredDev,
|
||||
}}
|
||||
|
||||
case *view.LastValueData:
|
||||
setPointValue(pt, data.Value, mType)
|
||||
|
||||
case *view.SumData:
|
||||
setPointValue(pt, data.Value, mType)
|
||||
}
|
||||
|
||||
return pt
|
||||
}
|
||||
|
||||
// Not returning anything from this function because metricspb.Point.is_Value is an unexported
|
||||
// interface hence we just have to set its value by pointer.
|
||||
func setPointValue(pt *metricspb.Point, value float64, mType measureType) {
|
||||
if mType == measureInt64 {
|
||||
pt.Value = &metricspb.Point_Int64Value{Int64Value: int64(value)}
|
||||
} else {
|
||||
pt.Value = &metricspb.Point_DoubleValue{DoubleValue: value}
|
||||
}
|
||||
}
|
||||
|
||||
func bucketsToProtoBuckets(countPerBucket []int64) []*metricspb.DistributionValue_Bucket {
|
||||
distBuckets := make([]*metricspb.DistributionValue_Bucket, len(countPerBucket))
|
||||
for i := 0; i < len(countPerBucket); i++ {
|
||||
count := countPerBucket[i]
|
||||
|
||||
distBuckets[i] = &metricspb.DistributionValue_Bucket{
|
||||
Count: count,
|
||||
}
|
||||
}
|
||||
|
||||
return distBuckets
|
||||
}
|
||||
|
||||
func labelValuesFromTags(tags []tag.Tag) []*metricspb.LabelValue {
|
||||
if len(tags) == 0 {
|
||||
return nil
|
||||
}
|
||||
|
||||
labelValues := make([]*metricspb.LabelValue, 0, len(tags))
|
||||
for _, tag_ := range tags {
|
||||
labelValues = append(labelValues, &metricspb.LabelValue{
|
||||
Value: tag_.Value,
|
||||
|
||||
// It is imperative that we set the "HasValue" attribute,
|
||||
// in order to distinguish missing a label from the empty string.
|
||||
// https://godoc.org/github.com/census-instrumentation/opencensus-proto/gen-go/metrics/v1#LabelValue.HasValue
|
||||
//
|
||||
// OpenCensus-Go uses non-pointers for tags as seen by this function's arguments,
|
||||
// so the best case that we can use to distinguish missing labels/tags from the
|
||||
// empty string is by checking if the Tag.Key.Name() != "" to indicate that we have
|
||||
// a value.
|
||||
HasValue: tag_.Key.Name() != "",
|
||||
})
|
||||
}
|
||||
return labelValues
|
||||
}
|
||||
17
vendor/contrib.go.opencensus.io/exporter/ocagent/version.go
generated
vendored
17
vendor/contrib.go.opencensus.io/exporter/ocagent/version.go
generated
vendored
@@ -1,17 +0,0 @@
|
||||
// Copyright 2018, OpenCensus Authors
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package ocagent
|
||||
|
||||
const Version = "0.0.1"
|
||||
21
vendor/github.com/ArthurHlt/go-eureka-client/LICENSE
generated
vendored
21
vendor/github.com/ArthurHlt/go-eureka-client/LICENSE
generated
vendored
@@ -1,21 +0,0 @@
|
||||
MIT License
|
||||
|
||||
Copyright (c) 2017 Arthur Halet
|
||||
|
||||
Permission is hereby granted, free of charge, to any person obtaining a copy
|
||||
of this software and associated documentation files (the "Software"), to deal
|
||||
in the Software without restriction, including without limitation the rights
|
||||
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
|
||||
copies of the Software, and to permit persons to whom the Software is
|
||||
furnished to do so, subject to the following conditions:
|
||||
|
||||
The above copyright notice and this permission notice shall be included in all
|
||||
copies or substantial portions of the Software.
|
||||
|
||||
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
|
||||
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
||||
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
||||
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
|
||||
SOFTWARE.
|
||||
357
vendor/github.com/ArthurHlt/go-eureka-client/eureka/client.go
generated
vendored
357
vendor/github.com/ArthurHlt/go-eureka-client/eureka/client.go
generated
vendored
@@ -1,357 +0,0 @@
|
||||
package eureka
|
||||
|
||||
import (
|
||||
"crypto/tls"
|
||||
"crypto/x509"
|
||||
"encoding/json"
|
||||
"errors"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"net"
|
||||
"net/http"
|
||||
"net/url"
|
||||
"os"
|
||||
"path"
|
||||
"time"
|
||||
"strings"
|
||||
)
|
||||
|
||||
const (
|
||||
defaultBufferSize = 10
|
||||
UP = "UP"
|
||||
DOWN = "DOWN"
|
||||
STARTING = "STARTING"
|
||||
)
|
||||
|
||||
type Config struct {
|
||||
CertFile string `json:"certFile"`
|
||||
KeyFile string `json:"keyFile"`
|
||||
CaCertFile []string `json:"caCertFiles"`
|
||||
DialTimeout time.Duration `json:"timeout"`
|
||||
Consistency string `json:"consistency"`
|
||||
}
|
||||
|
||||
type Client struct {
|
||||
Config Config `json:"config"`
|
||||
Cluster *Cluster `json:"cluster"`
|
||||
httpClient *http.Client
|
||||
persistence io.Writer
|
||||
cURLch chan string
|
||||
// CheckRetry can be used to control the policy for failed requests
|
||||
// and modify the cluster if needed.
|
||||
// The client calls it before sending requests again, and
|
||||
// stops retrying if CheckRetry returns some error. The cases that
|
||||
// this function needs to handle include no response and unexpected
|
||||
// http status code of response.
|
||||
// If CheckRetry is nil, client will call the default one
|
||||
// `DefaultCheckRetry`.
|
||||
// Argument cluster is the eureka.Cluster object that these requests have been made on.
|
||||
// Argument numReqs is the number of http.Requests that have been made so far.
|
||||
// Argument lastResp is the http.Responses from the last request.
|
||||
// Argument err is the reason of the failure.
|
||||
CheckRetry func(cluster *Cluster, numReqs int,
|
||||
lastResp http.Response, err error) error
|
||||
}
|
||||
|
||||
// NewClient create a basic client that is configured to be used
|
||||
// with the given machine list.
|
||||
func NewClient(machines []string) *Client {
|
||||
config := Config{
|
||||
// default timeout is one second
|
||||
DialTimeout: time.Second,
|
||||
}
|
||||
|
||||
client := &Client{
|
||||
Cluster: NewCluster(machines),
|
||||
Config: config,
|
||||
}
|
||||
|
||||
client.initHTTPClient()
|
||||
return client
|
||||
}
|
||||
|
||||
// NewTLSClient create a basic client with TLS configuration
|
||||
func NewTLSClient(machines []string, cert string, key string, caCerts []string) (*Client, error) {
|
||||
// overwrite the default machine to use https
|
||||
if len(machines) == 0 {
|
||||
machines = []string{"https://127.0.0.1:4001"}
|
||||
}
|
||||
|
||||
config := Config{
|
||||
// default timeout is one second
|
||||
DialTimeout: time.Second,
|
||||
CertFile: cert,
|
||||
KeyFile: key,
|
||||
CaCertFile: make([]string, 0),
|
||||
}
|
||||
|
||||
client := &Client{
|
||||
Cluster: NewCluster(machines),
|
||||
Config: config,
|
||||
}
|
||||
|
||||
err := client.initHTTPSClient(cert, key)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
for _, caCert := range caCerts {
|
||||
if err := client.AddRootCA(caCert); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
return client, nil
|
||||
}
|
||||
|
||||
// NewClientFromFile creates a client from a given file path.
|
||||
// The given file is expected to use the JSON format.
|
||||
func NewClientFromFile(fpath string) (*Client, error) {
|
||||
fi, err := os.Open(fpath)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
defer func() {
|
||||
if err := fi.Close(); err != nil {
|
||||
panic(err)
|
||||
}
|
||||
}()
|
||||
|
||||
return NewClientFromReader(fi)
|
||||
}
|
||||
|
||||
// NewClientFromReader creates a Client configured from a given reader.
|
||||
// The configuration is expected to use the JSON format.
|
||||
func NewClientFromReader(reader io.Reader) (*Client, error) {
|
||||
c := new(Client)
|
||||
|
||||
b, err := ioutil.ReadAll(reader)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
err = json.Unmarshal(b, c)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if c.Config.CertFile == "" {
|
||||
c.initHTTPClient()
|
||||
} else {
|
||||
err = c.initHTTPSClient(c.Config.CertFile, c.Config.KeyFile)
|
||||
}
|
||||
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
for _, caCert := range c.Config.CaCertFile {
|
||||
if err := c.AddRootCA(caCert); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
|
||||
return c, nil
|
||||
}
|
||||
|
||||
// Override the Client's HTTP Transport object
|
||||
func (c *Client) SetTransport(tr *http.Transport) {
|
||||
c.httpClient.Transport = tr
|
||||
}
|
||||
|
||||
// initHTTPClient initializes a HTTP client for eureka client
|
||||
func (c *Client) initHTTPClient() {
|
||||
tr := &http.Transport{
|
||||
Dial: c.dial,
|
||||
TLSClientConfig: &tls.Config{
|
||||
InsecureSkipVerify: true,
|
||||
},
|
||||
}
|
||||
c.httpClient = &http.Client{Transport: tr}
|
||||
}
|
||||
|
||||
// initHTTPClient initializes a HTTPS client for eureka client
|
||||
func (c *Client) initHTTPSClient(cert, key string) error {
|
||||
if cert == "" || key == "" {
|
||||
return errors.New("Require both cert and key path")
|
||||
}
|
||||
|
||||
tlsCert, err := tls.LoadX509KeyPair(cert, key)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
tlsConfig := &tls.Config{
|
||||
Certificates: []tls.Certificate{tlsCert},
|
||||
InsecureSkipVerify: true,
|
||||
}
|
||||
|
||||
tr := &http.Transport{
|
||||
TLSClientConfig: tlsConfig,
|
||||
Dial: c.dial,
|
||||
}
|
||||
|
||||
c.httpClient = &http.Client{Transport: tr}
|
||||
return nil
|
||||
}
|
||||
|
||||
// Sets the DialTimeout value
|
||||
func (c *Client) SetDialTimeout(d time.Duration) {
|
||||
c.Config.DialTimeout = d
|
||||
}
|
||||
|
||||
// AddRootCA adds a root CA cert for the eureka client
|
||||
func (c *Client) AddRootCA(caCert string) error {
|
||||
if c.httpClient == nil {
|
||||
return errors.New("Client has not been initialized yet!")
|
||||
}
|
||||
|
||||
certBytes, err := ioutil.ReadFile(caCert)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
tr, ok := c.httpClient.Transport.(*http.Transport)
|
||||
|
||||
if !ok {
|
||||
panic("AddRootCA(): Transport type assert should not fail")
|
||||
}
|
||||
|
||||
if tr.TLSClientConfig.RootCAs == nil {
|
||||
caCertPool := x509.NewCertPool()
|
||||
ok = caCertPool.AppendCertsFromPEM(certBytes)
|
||||
if ok {
|
||||
tr.TLSClientConfig.RootCAs = caCertPool
|
||||
}
|
||||
tr.TLSClientConfig.InsecureSkipVerify = false
|
||||
} else {
|
||||
ok = tr.TLSClientConfig.RootCAs.AppendCertsFromPEM(certBytes)
|
||||
}
|
||||
|
||||
if !ok {
|
||||
err = errors.New("Unable to load caCert")
|
||||
}
|
||||
|
||||
c.Config.CaCertFile = append(c.Config.CaCertFile, caCert)
|
||||
return err
|
||||
}
|
||||
|
||||
// SetCluster updates cluster information using the given machine list.
|
||||
func (c *Client) SetCluster(machines []string) bool {
|
||||
success := c.internalSyncCluster(machines)
|
||||
return success
|
||||
}
|
||||
|
||||
func (c *Client) GetCluster() []string {
|
||||
return c.Cluster.Machines
|
||||
}
|
||||
|
||||
// SyncCluster updates the cluster information using the internal machine list.
|
||||
func (c *Client) SyncCluster() bool {
|
||||
return c.internalSyncCluster(c.Cluster.Machines)
|
||||
}
|
||||
|
||||
// internalSyncCluster syncs cluster information using the given machine list.
|
||||
func (c *Client) internalSyncCluster(machines []string) bool {
|
||||
for _, machine := range machines {
|
||||
httpPath := c.createHttpPath(machine, "machines")
|
||||
resp, err := c.httpClient.Get(httpPath)
|
||||
if err != nil {
|
||||
// try another machine in the cluster
|
||||
continue
|
||||
} else {
|
||||
b, err := ioutil.ReadAll(resp.Body)
|
||||
resp.Body.Close()
|
||||
if err != nil {
|
||||
// try another machine in the cluster
|
||||
continue
|
||||
}
|
||||
|
||||
// update Machines List
|
||||
c.Cluster.updateFromStr(string(b))
|
||||
|
||||
// update leader
|
||||
// the first one in the machine list is the leader
|
||||
c.Cluster.switchLeader(0)
|
||||
|
||||
logger.Debug("sync.machines " + strings.Join(c.Cluster.Machines, ", "))
|
||||
return true
|
||||
}
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
// createHttpPath creates a complete HTTP URL.
|
||||
// serverName should contain both the host name and a port number, if any.
|
||||
func (c *Client) createHttpPath(serverName string, _path string) string {
|
||||
u, err := url.Parse(serverName)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
|
||||
u.Path = path.Join(u.Path, _path)
|
||||
|
||||
if u.Scheme == "" {
|
||||
u.Scheme = "http"
|
||||
}
|
||||
return u.String()
|
||||
}
|
||||
|
||||
// dial attempts to open a TCP connection to the provided address, explicitly
|
||||
// enabling keep-alives with a one-second interval.
|
||||
func (c *Client) dial(network, addr string) (net.Conn, error) {
|
||||
conn, err := net.DialTimeout(network, addr, c.Config.DialTimeout)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
tcpConn, ok := conn.(*net.TCPConn)
|
||||
if !ok {
|
||||
return nil, errors.New("Failed type-assertion of net.Conn as *net.TCPConn")
|
||||
}
|
||||
|
||||
// Keep TCP alive to check whether or not the remote machine is down
|
||||
if err = tcpConn.SetKeepAlive(true); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if err = tcpConn.SetKeepAlivePeriod(time.Second); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return tcpConn, nil
|
||||
}
|
||||
|
||||
// MarshalJSON implements the Marshaller interface
|
||||
// as defined by the standard JSON package.
|
||||
func (c *Client) MarshalJSON() ([]byte, error) {
|
||||
b, err := json.Marshal(struct {
|
||||
Config Config `json:"config"`
|
||||
Cluster *Cluster `json:"cluster"`
|
||||
}{
|
||||
Config: c.Config,
|
||||
Cluster: c.Cluster,
|
||||
})
|
||||
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return b, nil
|
||||
}
|
||||
|
||||
// UnmarshalJSON implements the Unmarshaller interface
|
||||
// as defined by the standard JSON package.
|
||||
func (c *Client) UnmarshalJSON(b []byte) error {
|
||||
temp := struct {
|
||||
Config Config `json:"config"`
|
||||
Cluster *Cluster `json:"cluster"`
|
||||
}{}
|
||||
err := json.Unmarshal(b, &temp)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
c.Cluster = temp.Cluster
|
||||
c.Config = temp.Config
|
||||
return nil
|
||||
}
|
||||
51
vendor/github.com/ArthurHlt/go-eureka-client/eureka/cluster.go
generated
vendored
51
vendor/github.com/ArthurHlt/go-eureka-client/eureka/cluster.go
generated
vendored
@@ -1,51 +0,0 @@
|
||||
package eureka
|
||||
|
||||
import (
|
||||
"net/url"
|
||||
"strings"
|
||||
)
|
||||
|
||||
type Cluster struct {
|
||||
Leader string `json:"leader"`
|
||||
Machines []string `json:"machines"`
|
||||
}
|
||||
|
||||
func NewCluster(machines []string) *Cluster {
|
||||
// if an empty slice was sent in then just assume HTTP 4001 on localhost
|
||||
if len(machines) == 0 {
|
||||
machines = []string{"http://127.0.0.1:4001"}
|
||||
}
|
||||
|
||||
// default leader and machines
|
||||
return &Cluster{
|
||||
Leader: machines[0],
|
||||
Machines: machines,
|
||||
}
|
||||
}
|
||||
|
||||
// switchLeader switch the current leader to machines[num]
|
||||
func (cl *Cluster) switchLeader(num int) {
|
||||
logger.Debug("switch.leader[from %v to %v]",
|
||||
cl.Leader, cl.Machines[num])
|
||||
|
||||
cl.Leader = cl.Machines[num]
|
||||
}
|
||||
|
||||
func (cl *Cluster) updateFromStr(machines string) {
|
||||
cl.Machines = strings.Split(machines, ", ")
|
||||
}
|
||||
|
||||
func (cl *Cluster) updateLeader(leader string) {
|
||||
logger.Debug("update.leader[%s,%s]", cl.Leader, leader)
|
||||
cl.Leader = leader
|
||||
}
|
||||
|
||||
func (cl *Cluster) updateLeaderFromURL(u *url.URL) {
|
||||
var leader string
|
||||
if u.Scheme == "" {
|
||||
leader = "http://" + u.Host
|
||||
} else {
|
||||
leader = u.Scheme + "://" + u.Host
|
||||
}
|
||||
cl.updateLeader(leader)
|
||||
}
|
||||
21
vendor/github.com/ArthurHlt/go-eureka-client/eureka/debug.go
generated
vendored
21
vendor/github.com/ArthurHlt/go-eureka-client/eureka/debug.go
generated
vendored
@@ -1,21 +0,0 @@
|
||||
package eureka
|
||||
|
||||
import (
|
||||
"github.com/ArthurHlt/gominlog"
|
||||
"log"
|
||||
)
|
||||
|
||||
var logger *gominlog.MinLog
|
||||
|
||||
func GetLogger() *log.Logger {
|
||||
return logger.GetLogger()
|
||||
}
|
||||
|
||||
func SetLogger(loggerLog *log.Logger) {
|
||||
logger.SetLogger(loggerLog)
|
||||
}
|
||||
|
||||
func init() {
|
||||
// Default logger uses the go default log.
|
||||
logger = gominlog.NewClassicMinLogWithPackageName("go-eureka-client")
|
||||
}
|
||||
10
vendor/github.com/ArthurHlt/go-eureka-client/eureka/delete.go
generated
vendored
10
vendor/github.com/ArthurHlt/go-eureka-client/eureka/delete.go
generated
vendored
@@ -1,10 +0,0 @@
|
||||
package eureka
|
||||
|
||||
import "strings"
|
||||
|
||||
func (c *Client) UnregisterInstance(appId, instanceId string) error {
|
||||
values := []string{"apps", appId, instanceId}
|
||||
path := strings.Join(values, "/")
|
||||
_, err := c.Delete(path)
|
||||
return err
|
||||
}
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user