forked from SW/traefik
Compare commits
50 Commits
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
266f5d18a8 | ||
|
|
305af43fb9 | ||
|
|
30545808d9 | ||
|
|
358f125a58 | ||
|
|
57ae9a80d5 | ||
|
|
e32c021f16 | ||
|
|
0db2a9aadd | ||
|
|
4f4dab3ca5 | ||
|
|
eaee39e534 | ||
|
|
d85eb0495c | ||
|
|
8aa618775d | ||
|
|
f6b7e333be | ||
|
|
108d9dbb3f | ||
|
|
2a1fa32950 | ||
|
|
ee7aa77833 | ||
|
|
fcc4cab614 | ||
|
|
1206cd52fc | ||
|
|
5cdba752a4 | ||
|
|
b48ea1e173 | ||
|
|
373040f552 | ||
|
|
443902a0f0 | ||
|
|
9eb02d9b03 | ||
|
|
2eb651645d | ||
|
|
630571fdc8 | ||
|
|
00fc43ebce | ||
|
|
6d906fa4c8 | ||
|
|
6a4c7796e3 | ||
|
|
67704e333d | ||
|
|
76c9cea856 | ||
|
|
0366fb9bc2 | ||
|
|
5fed947eaa | ||
|
|
c289279d24 | ||
|
|
418dca1113 | ||
|
|
ed293e3058 | ||
|
|
40bb0cd879 | ||
|
|
32f5e0df8f | ||
|
|
40b8a93930 | ||
|
|
b52b0f58b9 | ||
|
|
0016db0856 | ||
|
|
5aeca4507e | ||
|
|
11bfb49e65 | ||
|
|
d0cdb5608f | ||
|
|
8bb8ad5e02 | ||
|
|
b488d8365c | ||
|
|
e2bd7b45d1 | ||
|
|
c8ea2ce703 | ||
|
|
7d1edcd735 | ||
|
|
9660c31d3d | ||
|
|
37ac19583a | ||
|
|
ca60c52199 |
2
.gitattributes
vendored
2
.gitattributes
vendored
@@ -1 +1 @@
|
||||
# vendor/github.com/xenolf/lego/providers/dns/cloudxns/cloudxns.go eol=crlf
|
||||
# vendor/github.com/go-acme/lego/providers/dns/cloudxns/cloudxns.go eol=crlf
|
||||
|
||||
18
.gitignore
vendored
18
.gitignore
vendored
@@ -1,15 +1,17 @@
|
||||
/dist
|
||||
/autogen/genstatic/gen.go
|
||||
.idea/
|
||||
.intellij/
|
||||
*.iml
|
||||
/traefik
|
||||
/traefik.toml
|
||||
/static/
|
||||
/webui/.tmp/
|
||||
.vscode/
|
||||
.DS_Store
|
||||
/static/
|
||||
/autogen/genstatic/gen.go
|
||||
/webui/.tmp/
|
||||
/examples/acme/acme.json
|
||||
/site/
|
||||
/docs/site/
|
||||
/traefik.toml
|
||||
/dist
|
||||
/traefik
|
||||
*.log
|
||||
*.exe
|
||||
.DS_Store
|
||||
/examples/acme/acme.json
|
||||
cover.out
|
||||
|
||||
10
.travis.yml
10
.travis.yml
@@ -30,9 +30,8 @@ before_deploy:
|
||||
make -j${N_MAKE_JOBS} crossbinary-parallel;
|
||||
tar cfz dist/traefik-${VERSION}.src.tar.gz --exclude-vcs --exclude dist .;
|
||||
fi;
|
||||
curl -sI https://github.com/containous/structor/releases/latest | grep -Fi Location | tr -d '\r' | sed "s/tag/download/g" | awk -F " " '{ print $2 "/structor_linux-amd64"}' | wget --output-document=$GOPATH/bin/structor -i -;
|
||||
chmod +x $GOPATH/bin/structor;
|
||||
structor -o containous -r traefik --dockerfile-url="https://raw.githubusercontent.com/containous/traefik/master/docs.Dockerfile" --menu.js-url="https://raw.githubusercontent.com/containous/structor/master/traefik-menu.js.gotmpl" --rqts-url="https://raw.githubusercontent.com/containous/structor/master/requirements-override.txt" --exp-branch=master --debug;
|
||||
curl -sfL https://raw.githubusercontent.com/containous/structor/master/godownloader.sh | bash -s -- -b "${GOPATH}/bin" v1.7.0
|
||||
structor -o containous -r traefik --dockerfile-url="https://raw.githubusercontent.com/containous/traefik/v1.7/docs.Dockerfile" --menu.js-url="https://raw.githubusercontent.com/containous/structor/master/traefik-menu.js.gotmpl" --rqts-url="https://raw.githubusercontent.com/containous/structor/master/requirements-override.txt" --exp-branch=master --force-edit-url --debug;
|
||||
fi
|
||||
deploy:
|
||||
- provider: releases
|
||||
@@ -49,11 +48,6 @@ deploy:
|
||||
on:
|
||||
repo: containous/traefik
|
||||
tags: true
|
||||
- provider: script
|
||||
script: sh script/deploy-docker.sh
|
||||
skip_cleanup: true
|
||||
on:
|
||||
repo: containous/traefik
|
||||
- provider: pages
|
||||
edge: false
|
||||
github_token: ${GITHUB_TOKEN}
|
||||
|
||||
65
CHANGELOG.md
65
CHANGELOG.md
@@ -1,5 +1,66 @@
|
||||
# Change Log
|
||||
|
||||
## [v1.7.10](https://github.com/containous/traefik/tree/v1.7.10) (2019-03-28)
|
||||
[All Commits](https://github.com/containous/traefik/compare/v1.7.9...v1.7.10)
|
||||
|
||||
**Bug fixes:**
|
||||
- **[acme]** fix: update lego. ([#4670](https://github.com/containous/traefik/pull/4670) by [ldez](https://github.com/ldez))
|
||||
- **[acme]** Migrate to go-acme/lego. ([#4577](https://github.com/containous/traefik/pull/4577) by [ldez](https://github.com/ldez))
|
||||
- **[authentication,middleware]** Reorder Auth and TLSClientHeaders middleware ([#4557](https://github.com/containous/traefik/pull/4557) by [tomberek](https://github.com/tomberek))
|
||||
- **[k8s/ingress]** Support external name service on global default backend ([#4564](https://github.com/containous/traefik/pull/4564) by [kippandrew](https://github.com/kippandrew))
|
||||
- **[k8s/ingress]** Loop through service ports for global backend ([#4486](https://github.com/containous/traefik/pull/4486) by [dtomcej](https://github.com/dtomcej))
|
||||
- **[k8s]** Add entrypoints prefix in kubernetes frontend/backend id ([#4679](https://github.com/containous/traefik/pull/4679) by [juliens](https://github.com/juliens))
|
||||
- **[websocket]** Exclude websocket connections from Average Response Time ([#4313](https://github.com/containous/traefik/pull/4313) by [siyu6974](https://github.com/siyu6974))
|
||||
- **[middleware]** Added support for configuring trace headers for DataDog tracing ([#4516](https://github.com/containous/traefik/pull/4516) by [aantono](https://github.com/aantono))
|
||||
|
||||
**Documentation:**
|
||||
- **[acme]** Add _FILE Environment Variable Documentation ([#4643](https://github.com/containous/traefik/pull/4643) by [dargmuesli](https://github.com/dargmuesli))
|
||||
- **[docker]** Add TraefikEE as security workaround ([#4606](https://github.com/containous/traefik/pull/4606) by [emilevauge](https://github.com/emilevauge))
|
||||
|
||||
## [v1.7.9](https://github.com/containous/traefik/tree/v1.7.9) (2019-02-11)
|
||||
[All Commits](https://github.com/containous/traefik/compare/v1.7.8...v1.7.9)
|
||||
|
||||
**Bug fixes:**
|
||||
- **[acme]** Updates of Lego. ([#4480](https://github.com/containous/traefik/pull/4480) by [ldez](https://github.com/ldez))
|
||||
- **[k8s]** app-root on non-explicit path include "/" in the redirect ([#4458](https://github.com/containous/traefik/pull/4458) by [doctori](https://github.com/doctori))
|
||||
- **[middleware]** Missing trailers with retry ([#4442](https://github.com/containous/traefik/pull/4442) by [juliens](https://github.com/juliens))
|
||||
- **[rancher]** Handle errors when working with rancher ([#4378](https://github.com/containous/traefik/pull/4378) by [apsifly](https://github.com/apsifly))
|
||||
- **[servicefabric]** Add support for specifying the name of the endpoint. ([#4479](https://github.com/containous/traefik/pull/4479) by [ldez](https://github.com/ldez))
|
||||
- **[tls]** insecureSkipVerify for the passTLSCert transport ([#4438](https://github.com/containous/traefik/pull/4438) by [jbdoumenjou](https://github.com/jbdoumenjou))
|
||||
- **[tracing]** Add Tracing Header Context Name option for Jaeger ([#4459](https://github.com/containous/traefik/pull/4459) by [gadoor](https://github.com/gadoor))
|
||||
|
||||
**Documentation:**
|
||||
- **[metrics]** Update default value of buckets for Prometheus ([#4468](https://github.com/containous/traefik/pull/4468) by [adam-golab](https://github.com/adam-golab))
|
||||
- **[rules]** Fixes the display of the associativity rules. ([#4478](https://github.com/containous/traefik/pull/4478) by [ldez](https://github.com/ldez))
|
||||
- Fixed curl example ([#4471](https://github.com/containous/traefik/pull/4471) by [rgarrigue](https://github.com/rgarrigue))
|
||||
|
||||
## [v1.7.8](https://github.com/containous/traefik/tree/v1.7.8) (2019-01-29)
|
||||
[All Commits](https://github.com/containous/traefik/compare/v1.7.7...v1.7.8)
|
||||
|
||||
**Bug fixes:**
|
||||
- **[acme]** Updates lego. ([#4428](https://github.com/containous/traefik/pull/4428) by [ldez](https://github.com/ldez))
|
||||
- **[acme]** Updates lego. ([#4376](https://github.com/containous/traefik/pull/4376) by [ldez](https://github.com/ldez))
|
||||
- **[docker]** Fixes docker swarm mode refresh second for KV. ([#4420](https://github.com/containous/traefik/pull/4420) by [ldez](https://github.com/ldez))
|
||||
- **[ecs]** Generic awsvpc support, not just Fargate ([#4360](https://github.com/containous/traefik/pull/4360) by [maartenvanderhoef](https://github.com/maartenvanderhoef))
|
||||
- **[ecs]** Cache exising task definitions to avoid rate limiting ([#4177](https://github.com/containous/traefik/pull/4177) by [hwhelan-CB](https://github.com/hwhelan-CB))
|
||||
- **[tls]** Check for dynamic tls updates on configuration preload ([#4022](https://github.com/containous/traefik/pull/4022) by [ffilippopoulos](https://github.com/ffilippopoulos))
|
||||
- **[tracing]** Support Datadog tracer priority sampling ([#4359](https://github.com/containous/traefik/pull/4359) by [jcassee](https://github.com/jcassee))
|
||||
- Update to Go 1.11.5 [CVE-2019-6486](https://nvd.nist.gov/vuln/detail/CVE-2019-6486)
|
||||
|
||||
**Documentation:**
|
||||
- **[acme]** More detailed info about Google Cloud DNS. ([#4395](https://github.com/containous/traefik/pull/4395) by [ldez](https://github.com/ldez))
|
||||
- **[acme]** Tested wildcard ACME challenge with DNSimple ([#4384](https://github.com/containous/traefik/pull/4384) by [tstackhouse](https://github.com/tstackhouse))
|
||||
- **[docker]** Note about quotes for entrypoint definition with docker-compose ([#4390](https://github.com/containous/traefik/pull/4390) by [Dragnucs](https://github.com/Dragnucs))
|
||||
- **[k8s]** Allow Træfik to update Ingress status ([#4397](https://github.com/containous/traefik/pull/4397) by [rbq](https://github.com/rbq))
|
||||
- **[k8s]** Minor formatting fixes ([#4394](https://github.com/containous/traefik/pull/4394) by [dbirks](https://github.com/dbirks))
|
||||
- **[metrics]** Missing information about statistics parameter ([#4393](https://github.com/containous/traefik/pull/4393) by [decima](https://github.com/decima))
|
||||
- **[rules]** Route priorities: document minimum priority value ([#4374](https://github.com/containous/traefik/pull/4374) by [tw-360vier](https://github.com/tw-360vier))
|
||||
- Removed repeated entryPoints.http from grpc.md ([#4370](https://github.com/containous/traefik/pull/4370) by [ishaanbahal](https://github.com/ishaanbahal))
|
||||
- Happy 2019 ([#4367](https://github.com/containous/traefik/pull/4367) by [emilevauge](https://github.com/emilevauge))
|
||||
|
||||
**Misc:**
|
||||
- Assert that test timeout service is ready. ([#4398](https://github.com/containous/traefik/pull/4398) by [timoreimann](https://github.com/timoreimann))
|
||||
|
||||
## [v1.7.7](https://github.com/containous/traefik/tree/v1.7.7) (2019-01-08)
|
||||
[All Commits](https://github.com/containous/traefik/compare/v1.7.6...v1.7.7)
|
||||
|
||||
@@ -1080,7 +1141,7 @@
|
||||
- **[acme,tls]** Rename TLSConfigurations to TLS. ([#2744](https://github.com/containous/traefik/pull/2744) by [ldez](https://github.com/ldez))
|
||||
- **[acme,provider,docker,tls]** Make the TLS certificates management dynamic. ([#2233](https://github.com/containous/traefik/pull/2233) by [nmengin](https://github.com/nmengin))
|
||||
- **[acme]** Add Let's Encrypt HTTP Challenge ([#2701](https://github.com/containous/traefik/pull/2701) by [Juliens](https://github.com/Juliens))
|
||||
- **[acme]** Update github.com/xenolf/lego to 0.4.1 ([#2304](https://github.com/containous/traefik/pull/2304) by [oldmantaiter](https://github.com/oldmantaiter))
|
||||
- **[acme]** Update github.com/go-acme/lego to 0.4.1 ([#2304](https://github.com/containous/traefik/pull/2304) by [oldmantaiter](https://github.com/oldmantaiter))
|
||||
- **[api,healthcheck,metrics,provider,webui]** Split Web into API/Dashboard, ping, metric and Rest Provider ([#2335](https://github.com/containous/traefik/pull/2335) by [Juliens](https://github.com/Juliens))
|
||||
- **[authentication]** Pass through certain forward auth negative response headers ([#2127](https://github.com/containous/traefik/pull/2127) by [wheresmysocks](https://github.com/wheresmysocks))
|
||||
- **[cluster,consul,file]** Add file to storeconfig ([#2419](https://github.com/containous/traefik/pull/2419) by [emilevauge](https://github.com/emilevauge))
|
||||
@@ -1359,7 +1420,7 @@
|
||||
|
||||
**Enhancements:**
|
||||
- **[acme,provider,docker,tls]** Make the TLS certificates management dynamic. ([#2233](https://github.com/containous/traefik/pull/2233) by [nmengin](https://github.com/nmengin))
|
||||
- **[acme]** Update github.com/xenolf/lego to 0.4.1 ([#2304](https://github.com/containous/traefik/pull/2304) by [oldmantaiter](https://github.com/oldmantaiter))
|
||||
- **[acme]** Update github.com/go-acme/lego to 0.4.1 ([#2304](https://github.com/containous/traefik/pull/2304) by [oldmantaiter](https://github.com/oldmantaiter))
|
||||
- **[api,healthcheck,metrics,provider,webui]** Split Web into API/Dashboard, ping, metric and Rest Provider ([#2335](https://github.com/containous/traefik/pull/2335) by [Juliens](https://github.com/Juliens))
|
||||
- **[authentication]** Pass through certain forward auth negative response headers ([#2127](https://github.com/containous/traefik/pull/2127) by [wheresmysocks](https://github.com/wheresmysocks))
|
||||
- **[cluster,consul,file]** Add file to storeconfig ([#2419](https://github.com/containous/traefik/pull/2419) by [emilevauge](https://github.com/emilevauge))
|
||||
|
||||
@@ -87,7 +87,7 @@ If you happen to update the provider templates (in `/templates`), you need to ru
|
||||
|
||||
[dep](https://github.com/golang/dep) is not required for building; however, it is necessary to modify dependencies (i.e., add, update, or remove third-party packages)
|
||||
|
||||
You need to use [dep](https://github.com/golang/dep) >= 0.4.1 and < 0.5.0.
|
||||
You need to use [dep](https://github.com/golang/dep) >= 0.5.0.
|
||||
|
||||
If you want to add a dependency, use `dep ensure -add` to have [dep](https://github.com/golang/dep) put it into the vendor folder and update the dep manifest/lock files (`Gopkg.toml` and `Gopkg.lock`, respectively).
|
||||
|
||||
|
||||
2290
Gopkg.lock
generated
2290
Gopkg.lock
generated
File diff suppressed because it is too large
Load Diff
32
Gopkg.toml
32
Gopkg.toml
@@ -45,6 +45,11 @@
|
||||
name = "github.com/abbot/go-http-auth"
|
||||
source = "github.com/containous/go-http-auth"
|
||||
|
||||
[[constraint]]
|
||||
name = "github.com/thoas/stats"
|
||||
# related to https://github.com/thoas/stats/pull/32
|
||||
revision = "4975baf6a358ed3ddaa42133996e1959f96c9300"
|
||||
|
||||
[[constraint]]
|
||||
branch = "master"
|
||||
name = "github.com/armon/go-proxyproto"
|
||||
@@ -54,8 +59,8 @@
|
||||
version = "1.13.11"
|
||||
|
||||
[[constraint]]
|
||||
branch = "master"
|
||||
name = "github.com/cenk/backoff"
|
||||
version = "v2.1.1"
|
||||
|
||||
[[constraint]]
|
||||
name = "github.com/containous/flaeg"
|
||||
@@ -71,7 +76,7 @@
|
||||
|
||||
[[constraint]]
|
||||
name = "github.com/containous/traefik-extra-service-fabric"
|
||||
version = "1.3.0"
|
||||
version = "v1.5.0"
|
||||
|
||||
[[constraint]]
|
||||
name = "github.com/coreos/go-systemd"
|
||||
@@ -117,8 +122,8 @@
|
||||
version = "1.3.7"
|
||||
|
||||
[[constraint]]
|
||||
branch = "master"
|
||||
name = "github.com/jjcollinge/servicefabric"
|
||||
revision = "8eebe170fa1ba25d3dfb928b3f86a7313b13b9fe"
|
||||
|
||||
[[constraint]]
|
||||
branch = "master"
|
||||
@@ -128,18 +133,6 @@
|
||||
name = "github.com/mesosphere/mesos-dns"
|
||||
source = "https://github.com/containous/mesos-dns.git"
|
||||
|
||||
[[constraint]]
|
||||
branch = "master"
|
||||
name = "github.com/mitchellh/copystructure"
|
||||
|
||||
[[constraint]]
|
||||
branch = "master"
|
||||
name = "github.com/mitchellh/hashstructure"
|
||||
|
||||
[[constraint]]
|
||||
branch = "master"
|
||||
name = "github.com/mitchellh/mapstructure"
|
||||
|
||||
[[constraint]]
|
||||
name = "github.com/opentracing/opentracing-go"
|
||||
version = "1.0.2"
|
||||
@@ -167,7 +160,7 @@
|
||||
|
||||
[[constraint]]
|
||||
name = "github.com/uber/jaeger-client-go"
|
||||
version = "2.9.0"
|
||||
version = "2.15.0"
|
||||
|
||||
[[constraint]]
|
||||
name = "github.com/uber/jaeger-lib"
|
||||
@@ -186,9 +179,8 @@
|
||||
name = "github.com/vulcand/oxy"
|
||||
|
||||
[[constraint]]
|
||||
branch = "master"
|
||||
name = "github.com/xenolf/lego"
|
||||
# version = "1.0.0"
|
||||
name = "github.com/go-acme/lego"
|
||||
version = "2.4.0"
|
||||
|
||||
[[constraint]]
|
||||
name = "google.golang.org/grpc"
|
||||
@@ -263,4 +255,4 @@
|
||||
|
||||
[[constraint]]
|
||||
name = "gopkg.in/DataDog/dd-trace-go.v1"
|
||||
version = "1.5.0"
|
||||
version = "1.7.0"
|
||||
|
||||
@@ -113,13 +113,13 @@ If you need commercial support, please contact [Containo.us](https://containo.us
|
||||
|
||||
## Download
|
||||
|
||||
- Grab the latest binary from the [releases](https://github.com/containous/traefik/releases) page and run it with the [sample configuration file](https://raw.githubusercontent.com/containous/traefik/master/traefik.sample.toml):
|
||||
- Grab the latest binary from the [releases](https://github.com/containous/traefik/releases) page and run it with the [sample configuration file](https://raw.githubusercontent.com/containous/traefik/v1.7/traefik.sample.toml):
|
||||
|
||||
```shell
|
||||
./traefik --configFile=traefik.toml
|
||||
```
|
||||
|
||||
- Or use the official tiny Docker image and run it with the [sample configuration file](https://raw.githubusercontent.com/containous/traefik/master/traefik.sample.toml):
|
||||
- Or use the official tiny Docker image and run it with the [sample configuration file](https://raw.githubusercontent.com/containous/traefik/v1.7/traefik.sample.toml):
|
||||
|
||||
```shell
|
||||
docker run -d -p 8080:8080 -p 80:80 -v $PWD/traefik.toml:/etc/traefik/traefik.toml traefik
|
||||
|
||||
@@ -17,8 +17,8 @@ import (
|
||||
"github.com/containous/traefik/log"
|
||||
acmeprovider "github.com/containous/traefik/provider/acme"
|
||||
"github.com/containous/traefik/types"
|
||||
"github.com/xenolf/lego/certcrypto"
|
||||
"github.com/xenolf/lego/registration"
|
||||
"github.com/go-acme/lego/certcrypto"
|
||||
"github.com/go-acme/lego/registration"
|
||||
)
|
||||
|
||||
// Account is used to store lets encrypt registration info
|
||||
|
||||
20
acme/acme.go
20
acme/acme.go
@@ -27,19 +27,19 @@ import (
|
||||
"github.com/containous/traefik/types"
|
||||
"github.com/containous/traefik/version"
|
||||
"github.com/eapache/channels"
|
||||
"github.com/go-acme/lego/certificate"
|
||||
"github.com/go-acme/lego/challenge"
|
||||
"github.com/go-acme/lego/challenge/dns01"
|
||||
"github.com/go-acme/lego/challenge/http01"
|
||||
"github.com/go-acme/lego/lego"
|
||||
legolog "github.com/go-acme/lego/log"
|
||||
"github.com/go-acme/lego/providers/dns"
|
||||
"github.com/go-acme/lego/registration"
|
||||
"github.com/sirupsen/logrus"
|
||||
"github.com/xenolf/lego/certificate"
|
||||
"github.com/xenolf/lego/challenge"
|
||||
"github.com/xenolf/lego/challenge/dns01"
|
||||
"github.com/xenolf/lego/challenge/http01"
|
||||
"github.com/xenolf/lego/lego"
|
||||
legolog "github.com/xenolf/lego/log"
|
||||
"github.com/xenolf/lego/providers/dns"
|
||||
"github.com/xenolf/lego/registration"
|
||||
)
|
||||
|
||||
var (
|
||||
// OSCPMustStaple enables OSCP stapling as from https://github.com/xenolf/lego/issues/270
|
||||
// OSCPMustStaple enables OSCP stapling as from https://github.com/go-acme/lego/issues/270
|
||||
OSCPMustStaple = false
|
||||
)
|
||||
|
||||
@@ -428,7 +428,7 @@ func (a *ACME) buildACMEClient(account *Account) (*lego.Client, error) {
|
||||
|
||||
config := lego.NewConfig(account)
|
||||
config.CADirURL = caServer
|
||||
config.KeyType = account.KeyType
|
||||
config.Certificate.KeyType = account.KeyType
|
||||
config.UserAgent = fmt.Sprintf("containous-traefik/%s", version.Version)
|
||||
|
||||
client, err := lego.NewClient(config)
|
||||
|
||||
@@ -9,7 +9,7 @@ import (
|
||||
"github.com/containous/traefik/cluster"
|
||||
"github.com/containous/traefik/log"
|
||||
"github.com/containous/traefik/safe"
|
||||
"github.com/xenolf/lego/challenge"
|
||||
"github.com/go-acme/lego/challenge"
|
||||
)
|
||||
|
||||
var _ challenge.ProviderTimeout = (*challengeHTTPProvider)(nil)
|
||||
|
||||
@@ -11,8 +11,8 @@ import (
|
||||
"github.com/containous/traefik/cluster"
|
||||
"github.com/containous/traefik/log"
|
||||
"github.com/containous/traefik/safe"
|
||||
"github.com/xenolf/lego/challenge"
|
||||
"github.com/xenolf/lego/challenge/tlsalpn01"
|
||||
"github.com/go-acme/lego/challenge"
|
||||
"github.com/go-acme/lego/challenge/tlsalpn01"
|
||||
)
|
||||
|
||||
var _ challenge.ProviderTimeout = (*challengeTLSProvider)(nil)
|
||||
|
||||
@@ -31,7 +31,7 @@ import (
|
||||
"github.com/containous/traefik/safe"
|
||||
traefiktls "github.com/containous/traefik/tls"
|
||||
"github.com/containous/traefik/types"
|
||||
"github.com/elazarl/go-bindata-assetfs"
|
||||
assetfs "github.com/elazarl/go-bindata-assetfs"
|
||||
"github.com/thoas/stats"
|
||||
)
|
||||
|
||||
|
||||
@@ -5,7 +5,7 @@ import (
|
||||
|
||||
"github.com/containous/mux"
|
||||
"github.com/containous/traefik/log"
|
||||
"github.com/elazarl/go-bindata-assetfs"
|
||||
assetfs "github.com/elazarl/go-bindata-assetfs"
|
||||
)
|
||||
|
||||
// DashboardHandler expose dashboard routes
|
||||
|
||||
@@ -9,7 +9,7 @@ import (
|
||||
"github.com/containous/traefik/safe"
|
||||
"github.com/containous/traefik/types"
|
||||
"github.com/containous/traefik/version"
|
||||
"github.com/elazarl/go-bindata-assetfs"
|
||||
assetfs "github.com/elazarl/go-bindata-assetfs"
|
||||
thoas_stats "github.com/thoas/stats"
|
||||
"github.com/unrolled/render"
|
||||
)
|
||||
|
||||
@@ -10,7 +10,7 @@ RUN go get golang.org/x/lint/golint \
|
||||
|
||||
# Which docker version to test on
|
||||
ARG DOCKER_VERSION=17.03.2
|
||||
ARG DEP_VERSION=0.4.1
|
||||
ARG DEP_VERSION=0.5.1
|
||||
|
||||
# Download go-bindata binary to bin folder in $GOPATH
|
||||
RUN mkdir -p /usr/local/bin \
|
||||
|
||||
@@ -13,7 +13,7 @@ import (
|
||||
"github.com/containous/traefik/job"
|
||||
"github.com/containous/traefik/log"
|
||||
"github.com/containous/traefik/safe"
|
||||
"github.com/satori/go.uuid"
|
||||
uuid "github.com/satori/go.uuid"
|
||||
)
|
||||
|
||||
// Metadata stores Object plus metadata
|
||||
|
||||
@@ -4,7 +4,7 @@ import (
|
||||
"time"
|
||||
|
||||
"github.com/containous/flaeg"
|
||||
"github.com/containous/traefik-extra-service-fabric"
|
||||
servicefabric "github.com/containous/traefik-extra-service-fabric"
|
||||
"github.com/containous/traefik/api"
|
||||
"github.com/containous/traefik/configuration"
|
||||
"github.com/containous/traefik/middlewares/accesslog"
|
||||
@@ -224,10 +224,11 @@ func NewTraefikDefaultPointersConfiguration() *TraefikConfiguration {
|
||||
ServiceName: "traefik",
|
||||
SpanNameLimit: 0,
|
||||
Jaeger: &jaeger.Config{
|
||||
SamplingServerURL: "http://localhost:5778/sampling",
|
||||
SamplingType: "const",
|
||||
SamplingParam: 1.0,
|
||||
LocalAgentHostPort: "127.0.0.1:6831",
|
||||
SamplingServerURL: "http://localhost:5778/sampling",
|
||||
SamplingType: "const",
|
||||
SamplingParam: 1.0,
|
||||
LocalAgentHostPort: "127.0.0.1:6831",
|
||||
TraceContextHeaderName: "uber-trace-id",
|
||||
},
|
||||
Zipkin: &zipkin.Config{
|
||||
HTTPEndpoint: "http://localhost:9411/api/v1/spans",
|
||||
@@ -239,6 +240,7 @@ func NewTraefikDefaultPointersConfiguration() *TraefikConfiguration {
|
||||
LocalAgentHostPort: "localhost:8126",
|
||||
GlobalTag: "",
|
||||
Debug: false,
|
||||
PrioritySampling: false,
|
||||
},
|
||||
}
|
||||
|
||||
|
||||
@@ -34,7 +34,7 @@ import (
|
||||
"github.com/containous/traefik/types"
|
||||
"github.com/containous/traefik/version"
|
||||
"github.com/coreos/go-systemd/daemon"
|
||||
"github.com/elazarl/go-bindata-assetfs"
|
||||
assetfs "github.com/elazarl/go-bindata-assetfs"
|
||||
"github.com/ogier/pflag"
|
||||
"github.com/sirupsen/logrus"
|
||||
"github.com/vulcand/oxy/roundrobin"
|
||||
|
||||
@@ -6,7 +6,7 @@ import (
|
||||
"time"
|
||||
|
||||
"github.com/containous/flaeg"
|
||||
"github.com/containous/traefik-extra-service-fabric"
|
||||
servicefabric "github.com/containous/traefik-extra-service-fabric"
|
||||
"github.com/containous/traefik/acme"
|
||||
"github.com/containous/traefik/api"
|
||||
"github.com/containous/traefik/log"
|
||||
@@ -33,8 +33,9 @@ import (
|
||||
"github.com/containous/traefik/provider/zk"
|
||||
"github.com/containous/traefik/tls"
|
||||
"github.com/containous/traefik/types"
|
||||
"github.com/go-acme/lego/challenge/dns01"
|
||||
"github.com/pkg/errors"
|
||||
"github.com/xenolf/lego/challenge/dns01"
|
||||
jaegercli "github.com/uber/jaeger-client-go"
|
||||
)
|
||||
|
||||
const (
|
||||
@@ -235,6 +236,10 @@ func (gc *GlobalConfiguration) SetEffectiveConfiguration(configFile string) {
|
||||
} else {
|
||||
gc.Docker.TemplateVersion = 2
|
||||
}
|
||||
|
||||
if gc.Docker.SwarmModeRefreshSeconds <= 0 {
|
||||
gc.Docker.SwarmModeRefreshSeconds = 15
|
||||
}
|
||||
}
|
||||
|
||||
if gc.Marathon != nil {
|
||||
@@ -331,10 +336,11 @@ func (gc *GlobalConfiguration) initTracing() {
|
||||
case jaeger.Name:
|
||||
if gc.Tracing.Jaeger == nil {
|
||||
gc.Tracing.Jaeger = &jaeger.Config{
|
||||
SamplingServerURL: "http://localhost:5778/sampling",
|
||||
SamplingType: "const",
|
||||
SamplingParam: 1.0,
|
||||
LocalAgentHostPort: "127.0.0.1:6831",
|
||||
SamplingServerURL: "http://localhost:5778/sampling",
|
||||
SamplingType: "const",
|
||||
SamplingParam: 1.0,
|
||||
LocalAgentHostPort: "127.0.0.1:6831",
|
||||
TraceContextHeaderName: jaegercli.TraceContextHeaderName,
|
||||
}
|
||||
}
|
||||
if gc.Tracing.Zipkin != nil {
|
||||
@@ -368,6 +374,7 @@ func (gc *GlobalConfiguration) initTracing() {
|
||||
LocalAgentHostPort: "localhost:8126",
|
||||
GlobalTag: "",
|
||||
Debug: false,
|
||||
PrioritySampling: false,
|
||||
}
|
||||
}
|
||||
if gc.Tracing.Zipkin != nil {
|
||||
|
||||
@@ -142,10 +142,11 @@ func TestSetEffectiveConfigurationTracing(t *testing.T) {
|
||||
expected: &tracing.Tracing{
|
||||
Backend: "jaeger",
|
||||
Jaeger: &jaeger.Config{
|
||||
SamplingServerURL: "http://localhost:5778/sampling",
|
||||
SamplingType: "const",
|
||||
SamplingParam: 1.0,
|
||||
LocalAgentHostPort: "127.0.0.1:6831",
|
||||
SamplingServerURL: "http://localhost:5778/sampling",
|
||||
SamplingType: "const",
|
||||
SamplingParam: 1.0,
|
||||
LocalAgentHostPort: "127.0.0.1:6831",
|
||||
TraceContextHeaderName: "uber-trace-id",
|
||||
},
|
||||
Zipkin: nil,
|
||||
},
|
||||
@@ -155,10 +156,11 @@ func TestSetEffectiveConfigurationTracing(t *testing.T) {
|
||||
tracing: &tracing.Tracing{
|
||||
Backend: "zipkin",
|
||||
Jaeger: &jaeger.Config{
|
||||
SamplingServerURL: "http://localhost:5778/sampling",
|
||||
SamplingType: "const",
|
||||
SamplingParam: 1.0,
|
||||
LocalAgentHostPort: "127.0.0.1:6831",
|
||||
SamplingServerURL: "http://localhost:5778/sampling",
|
||||
SamplingType: "const",
|
||||
SamplingParam: 1.0,
|
||||
LocalAgentHostPort: "127.0.0.1:6831",
|
||||
TraceContextHeaderName: "uber-trace-id",
|
||||
},
|
||||
},
|
||||
expected: &tracing.Tracing{
|
||||
@@ -177,10 +179,11 @@ func TestSetEffectiveConfigurationTracing(t *testing.T) {
|
||||
tracing: &tracing.Tracing{
|
||||
Backend: "zipkin",
|
||||
Jaeger: &jaeger.Config{
|
||||
SamplingServerURL: "http://localhost:5778/sampling",
|
||||
SamplingType: "const",
|
||||
SamplingParam: 1.0,
|
||||
LocalAgentHostPort: "127.0.0.1:6831",
|
||||
SamplingServerURL: "http://localhost:5778/sampling",
|
||||
SamplingType: "const",
|
||||
SamplingParam: 1.0,
|
||||
LocalAgentHostPort: "127.0.0.1:6831",
|
||||
TraceContextHeaderName: "uber-trace-id",
|
||||
},
|
||||
Zipkin: &zipkin.Config{
|
||||
HTTPEndpoint: "http://powpow:9411/api/v1/spans",
|
||||
|
||||
@@ -95,6 +95,7 @@ Following is the list of existing modifier rules:
|
||||
Matcher rules determine if a particular request should be forwarded to a backend.
|
||||
|
||||
The associativity rule is the following:
|
||||
|
||||
- `,` is the `OR` operator (works **only inside a matcher**, ex: `Host:foo.com,bar.com`).
|
||||
- i.e., forward a request if any rule matches.
|
||||
- Does not work for `Headers` and `HeadersRegexp`.
|
||||
@@ -236,7 +237,8 @@ The following rules are both `Matchers` and `Modifiers`, so the `Matcher` portio
|
||||
#### Priorities
|
||||
|
||||
By default, routes will be sorted (in descending order) using rules length (to avoid path overlap):
|
||||
`PathPrefix:/foo;Host:foo.com` (length == 28) will be matched before `PathPrefixStrip:/foobar` (length == 23) will be matched before `PathPrefix:/foo,/bar` (length == 20).
|
||||
- `PathPrefix:/foo;Host:foo.com` (length == 28) will be matched before `PathPrefixStrip:/foobar` (length == 23) will be matched before `PathPrefix:/foo,/bar` (length == 20).
|
||||
- A priority value of 0 will be ignored, so the default value will be calculated (rules length).
|
||||
|
||||
You can customize priority by frontend. The priority value override the rule length during sorting:
|
||||
|
||||
@@ -744,7 +746,7 @@ Once a day (the first call begins 10 minutes after the start of Traefik), we col
|
||||
|
||||
### Show me the code !
|
||||
|
||||
If you want to dig into more details, here is the source code of the collecting system: [collector.go](https://github.com/containous/traefik/blob/master/collector/collector.go)
|
||||
If you want to dig into more details, here is the source code of the collecting system: [collector.go](https://github.com/containous/traefik/blob/v1.7/collector/collector.go)
|
||||
|
||||
By default we anonymize all configuration fields, except fields tagged with `export=true`.
|
||||
|
||||
|
||||
@@ -271,62 +271,72 @@ Useful if internal networks block external DNS queries.
|
||||
|
||||
##### `provider`
|
||||
|
||||
Here is a list of supported `provider`s, that can automate the DNS verification, along with the required environment variables and their [wildcard & root domain support](/configuration/acme/#wildcard-domains) for each. Do not hesitate to complete it.
|
||||
Here is a list of supported `provider`s, that can automate the DNS verification, along with the required environment variables and their [wildcard & root domain support](/configuration/acme/#wildcard-domains) for each.
|
||||
Do not hesitate to complete it.
|
||||
Every lego environment variable can be overridden by their respective `_FILE` counterpart, which should have a filepath to a file that contains the secret as its value.
|
||||
For example, `CF_API_EMAIL_FILE=/run/secrets/traefik_cf-api-email` could be used to provide a Cloudflare API email address as a Docker secret named `traefik_cf-api-email`.
|
||||
|
||||
| Provider Name | Provider Code | Environment Variables | Wildcard & Root Domain Support |
|
||||
|--------------------------------------------------------|----------------|-------------------------------------------------------------------------------------------------------------------------------------------|--------------------------------|
|
||||
| [ACME DNS](https://github.com/joohoi/acme-dns) | `acme-dns` | `ACME_DNS_API_BASE`, `ACME_DNS_STORAGE_PATH` | Not tested yet |
|
||||
| [Alibaba Cloud](https://www.vultr.com) | `alidns` | `ALICLOUD_ACCESS_KEY`, `ALICLOUD_SECRET_KEY`, `ALICLOUD_REGION_ID` | Not tested yet |
|
||||
| [Auroradns](https://www.pcextreme.com/aurora/dns) | `auroradns` | `AURORA_USER_ID`, `AURORA_KEY`, `AURORA_ENDPOINT` | Not tested yet |
|
||||
| [Azure](https://azure.microsoft.com/services/dns/) | `azure` | `AZURE_CLIENT_ID`, `AZURE_CLIENT_SECRET`, `AZURE_SUBSCRIPTION_ID`, `AZURE_TENANT_ID`, `AZURE_RESOURCE_GROUP`, `[AZURE_METADATA_ENDPOINT]` | Not tested yet |
|
||||
| [Blue Cat](https://www.bluecatnetworks.com/) | `bluecat` | `BLUECAT_SERVER_URL`, `BLUECAT_USER_NAME`, `BLUECAT_PASSWORD`, `BLUECAT_CONFIG_NAME`, `BLUECAT_DNS_VIEW` | Not tested yet |
|
||||
| [Cloudflare](https://www.cloudflare.com) | `cloudflare` | `CF_API_EMAIL`, `CF_API_KEY` - The `Global API Key` needs to be used, not the `Origin CA Key` | YES |
|
||||
| [CloudXNS](https://www.cloudxns.net) | `cloudxns` | `CLOUDXNS_API_KEY`, `CLOUDXNS_SECRET_KEY` | Not tested yet |
|
||||
| [ConoHa](https://www.conoha.jp) | `conoha` | `CONOHA_TENANT_ID`, `CONOHA_API_USERNAME`, `CONOHA_API_PASSWORD` | YES |
|
||||
| [DigitalOcean](https://www.digitalocean.com) | `digitalocean` | `DO_AUTH_TOKEN` | YES |
|
||||
| [DNSimple](https://dnsimple.com) | `dnsimple` | `DNSIMPLE_OAUTH_TOKEN`, `DNSIMPLE_BASE_URL` | Not tested yet |
|
||||
| [DNS Made Easy](https://dnsmadeeasy.com) | `dnsmadeeasy` | `DNSMADEEASY_API_KEY`, `DNSMADEEASY_API_SECRET`, `DNSMADEEASY_SANDBOX` | Not tested yet |
|
||||
| [DNSPod](https://www.dnspod.com/) | `dnspod` | `DNSPOD_API_KEY` | Not tested yet |
|
||||
| [DreamHost](https://www.dreamhost.com/) | `dreamhost` | `DREAMHOST_API_KEY` | YES |
|
||||
| [Duck DNS](https://www.duckdns.org/) | `duckdns` | `DUCKDNS_TOKEN` | No |
|
||||
| [Dyn](https://dyn.com) | `dyn` | `DYN_CUSTOMER_NAME`, `DYN_USER_NAME`, `DYN_PASSWORD` | Not tested yet |
|
||||
| External Program | `exec` | `EXEC_PATH` | YES |
|
||||
| [Exoscale](https://www.exoscale.com) | `exoscale` | `EXOSCALE_API_KEY`, `EXOSCALE_API_SECRET`, `EXOSCALE_ENDPOINT` | YES |
|
||||
| [Fast DNS](https://www.akamai.com/) | `fastdns` | `AKAMAI_CLIENT_TOKEN`, `AKAMAI_CLIENT_SECRET`, `AKAMAI_ACCESS_TOKEN` | Not tested yet |
|
||||
| [Gandi](https://www.gandi.net) | `gandi` | `GANDI_API_KEY` | Not tested yet |
|
||||
| [Gandi v5](http://doc.livedns.gandi.net) | `gandiv5` | `GANDIV5_API_KEY` | YES |
|
||||
| [Glesys](https://glesys.com/) | `glesys` | `GLESYS_API_USER`, `GLESYS_API_KEY`, `GLESYS_DOMAIN` | Not tested yet |
|
||||
| [GoDaddy](https://godaddy.com/domains) | `godaddy` | `GODADDY_API_KEY`, `GODADDY_API_SECRET` | Not tested yet |
|
||||
| [Google Cloud DNS](https://cloud.google.com/dns/docs/) | `gcloud` | `GCE_PROJECT`, `GCE_SERVICE_ACCOUNT_FILE` | YES |
|
||||
| [hosting.de](https://www.hosting.de) | `hostingde` | `HOSTINGDE_API_KEY`, `HOSTINGDE_ZONE_NAME` | Not tested yet |
|
||||
| HTTP request | `httpreq` | `HTTPREQ_ENDPOINT`, `HTTPREQ_MODE`, `HTTPREQ_USERNAME`, `HTTPREQ_PASSWORD` (1) | YES |
|
||||
| [IIJ](https://www.iij.ad.jp/) | `iij` | `IIJ_API_ACCESS_KEY`, `IIJ_API_SECRET_KEY`, `IIJ_DO_SERVICE_CODE` | Not tested yet |
|
||||
| [INWX](https://www.inwx.de/en) | `inwx` | `INWX_USERNAME`, `INWX_PASSWORD` | YES |
|
||||
| [Lightsail](https://aws.amazon.com/lightsail/) | `lightsail` | `AWS_ACCESS_KEY_ID`, `AWS_SECRET_ACCESS_KEY`, `DNS_ZONE` | Not tested yet |
|
||||
| [Linode](https://www.linode.com) | `linode` | `LINODE_API_KEY` | Not tested yet |
|
||||
| [Linode v4](https://www.linode.com) | `linodev4` | `LINODE_TOKEN` | Not tested yet |
|
||||
| manual | - | none, but you need to run Traefik interactively, turn on `acmeLogging` to see instructions and press <kbd>Enter</kbd>. | YES |
|
||||
| [MyDNS.jp](https://www.mydns.jp/) | `mydnsjp` | `MYDNSJP_MASTER_ID`, `MYDNSJP_PASSWORD` | YES |
|
||||
| [Namecheap](https://www.namecheap.com) | `namecheap` | `NAMECHEAP_API_USER`, `NAMECHEAP_API_KEY` | YES |
|
||||
| [name.com](https://www.name.com/) | `namedotcom` | `NAMECOM_USERNAME`, `NAMECOM_API_TOKEN`, `NAMECOM_SERVER` | Not tested yet |
|
||||
| [Netcup](https://www.netcup.eu/) | `netcup` | `NETCUP_CUSTOMER_NUMBER`, `NETCUP_API_KEY`, `NETCUP_API_PASSWORD` | Not tested yet |
|
||||
| [NIFCloud](https://cloud.nifty.com/service/dns.htm) | `nifcloud` | `NIFCLOUD_ACCESS_KEY_ID`, `NIFCLOUD_SECRET_ACCESS_KEY` | Not tested yet |
|
||||
| [Ns1](https://ns1.com/) | `ns1` | `NS1_API_KEY` | Not tested yet |
|
||||
| [Open Telekom Cloud](https://cloud.telekom.de) | `otc` | `OTC_DOMAIN_NAME`, `OTC_USER_NAME`, `OTC_PASSWORD`, `OTC_PROJECT_NAME`, `OTC_IDENTITY_ENDPOINT` | Not tested yet |
|
||||
| [OVH](https://www.ovh.com) | `ovh` | `OVH_ENDPOINT`, `OVH_APPLICATION_KEY`, `OVH_APPLICATION_SECRET`, `OVH_CONSUMER_KEY` | YES |
|
||||
| [PowerDNS](https://www.powerdns.com) | `pdns` | `PDNS_API_KEY`, `PDNS_API_URL` | Not tested yet |
|
||||
| [Rackspace](https://www.rackspace.com/cloud/dns) | `rackspace` | `RACKSPACE_USER`, `RACKSPACE_API_KEY` | Not tested yet |
|
||||
| [RFC2136](https://tools.ietf.org/html/rfc2136) | `rfc2136` | `RFC2136_TSIG_KEY`, `RFC2136_TSIG_SECRET`, `RFC2136_TSIG_ALGORITHM`, `RFC2136_NAMESERVER` | Not tested yet |
|
||||
| [Route 53](https://aws.amazon.com/route53/) | `route53` | `AWS_ACCESS_KEY_ID`, `AWS_SECRET_ACCESS_KEY`, `[AWS_REGION]`, `[AWS_HOSTED_ZONE_ID]` or a configured user/instance IAM profile. | YES |
|
||||
| [Sakura Cloud](https://cloud.sakura.ad.jp/) | `sakuracloud` | `SAKURACLOUD_ACCESS_TOKEN`, `SAKURACLOUD_ACCESS_TOKEN_SECRET` | Not tested yet |
|
||||
| [Selectel](https://selectel.ru/en/) | `selectel` | `SELECTEL_API_TOKEN` | YES |
|
||||
| [Stackpath](https://www.stackpath.com/) | `stackpath` | `STACKPATH_CLIENT_ID`, `STACKPATH_CLIENT_SECRET`, `STACKPATH_STACK_ID` | Not tested yet |
|
||||
| [TransIP](https://www.transip.nl/) | `transip` | `TRANSIP_ACCOUNT_NAME`, `TRANSIP_PRIVATE_KEY_PATH` | YES |
|
||||
| [VegaDNS](https://github.com/shupp/VegaDNS-API) | `vegadns` | `SECRET_VEGADNS_KEY`, `SECRET_VEGADNS_SECRET`, `VEGADNS_URL` | Not tested yet |
|
||||
| [Vscale](https://vscale.io/) | `vscale` | `VSCALE_API_TOKEN` | YES |
|
||||
| [VULTR](https://www.vultr.com) | `vultr` | `VULTR_API_KEY` | Not tested yet |
|
||||
| Provider Name | Provider Code | Environment Variables | Wildcard & Root Domain Support |
|
||||
|-------------------------------------------------------------|----------------|---------------------------------------------------------------------------------------------------------------------------------------------|--------------------------------|
|
||||
| [ACME DNS](https://github.com/joohoi/acme-dns) | `acme-dns` | `ACME_DNS_API_BASE`, `ACME_DNS_STORAGE_PATH` | Not tested yet |
|
||||
| [Alibaba Cloud](https://www.vultr.com) | `alidns` | `ALICLOUD_ACCESS_KEY`, `ALICLOUD_SECRET_KEY`, `ALICLOUD_REGION_ID` | Not tested yet |
|
||||
| [Auroradns](https://www.pcextreme.com/aurora/dns) | `auroradns` | `AURORA_USER_ID`, `AURORA_KEY`, `AURORA_ENDPOINT` | Not tested yet |
|
||||
| [Azure](https://azure.microsoft.com/services/dns/) | `azure` | `AZURE_CLIENT_ID`, `AZURE_CLIENT_SECRET`, `AZURE_SUBSCRIPTION_ID`, `AZURE_TENANT_ID`, `AZURE_RESOURCE_GROUP`, `[AZURE_METADATA_ENDPOINT]` | Not tested yet |
|
||||
| [Blue Cat](https://www.bluecatnetworks.com/) | `bluecat` | `BLUECAT_SERVER_URL`, `BLUECAT_USER_NAME`, `BLUECAT_PASSWORD`, `BLUECAT_CONFIG_NAME`, `BLUECAT_DNS_VIEW` | Not tested yet |
|
||||
| [ClouDNS](https://www.cloudns.net/) | `cloudns` | `CLOUDNS_AUTH_ID`, `CLOUDNS_AUTH_PASSWORD` | YES |
|
||||
| [Cloudflare](https://www.cloudflare.com) | `cloudflare` | `CF_API_EMAIL`, `CF_API_KEY` - The `Global API Key` needs to be used, not the `Origin CA Key` | YES |
|
||||
| [CloudXNS](https://www.cloudxns.net) | `cloudxns` | `CLOUDXNS_API_KEY`, `CLOUDXNS_SECRET_KEY` | Not tested yet |
|
||||
| [ConoHa](https://www.conoha.jp) | `conoha` | `CONOHA_TENANT_ID`, `CONOHA_API_USERNAME`, `CONOHA_API_PASSWORD` | YES |
|
||||
| [DigitalOcean](https://www.digitalocean.com) | `digitalocean` | `DO_AUTH_TOKEN` | YES |
|
||||
| [DNSimple](https://dnsimple.com) | `dnsimple` | `DNSIMPLE_OAUTH_TOKEN`, `DNSIMPLE_BASE_URL` | YES |
|
||||
| [DNS Made Easy](https://dnsmadeeasy.com) | `dnsmadeeasy` | `DNSMADEEASY_API_KEY`, `DNSMADEEASY_API_SECRET`, `DNSMADEEASY_SANDBOX` | Not tested yet |
|
||||
| [DNSPod](https://www.dnspod.com/) | `dnspod` | `DNSPOD_API_KEY` | Not tested yet |
|
||||
| [Domain Offensive (do.de)](https://www.do.de/) | `dode` | `DODE_TOKEN` | YES |
|
||||
| [DreamHost](https://www.dreamhost.com/) | `dreamhost` | `DREAMHOST_API_KEY` | YES |
|
||||
| [Duck DNS](https://www.duckdns.org/) | `duckdns` | `DUCKDNS_TOKEN` | YES |
|
||||
| [Dyn](https://dyn.com) | `dyn` | `DYN_CUSTOMER_NAME`, `DYN_USER_NAME`, `DYN_PASSWORD` | Not tested yet |
|
||||
| External Program | `exec` | `EXEC_PATH` | YES |
|
||||
| [Exoscale](https://www.exoscale.com) | `exoscale` | `EXOSCALE_API_KEY`, `EXOSCALE_API_SECRET`, `EXOSCALE_ENDPOINT` | YES |
|
||||
| [Fast DNS](https://www.akamai.com/) | `fastdns` | `AKAMAI_CLIENT_TOKEN`, `AKAMAI_CLIENT_SECRET`, `AKAMAI_ACCESS_TOKEN` | YES |
|
||||
| [Gandi](https://www.gandi.net) | `gandi` | `GANDI_API_KEY` | Not tested yet |
|
||||
| [Gandi v5](http://doc.livedns.gandi.net) | `gandiv5` | `GANDIV5_API_KEY` | YES |
|
||||
| [Glesys](https://glesys.com/) | `glesys` | `GLESYS_API_USER`, `GLESYS_API_KEY`, `GLESYS_DOMAIN` | Not tested yet |
|
||||
| [GoDaddy](https://godaddy.com/domains) | `godaddy` | `GODADDY_API_KEY`, `GODADDY_API_SECRET` | Not tested yet |
|
||||
| [Google Cloud DNS](https://cloud.google.com/dns/docs/) | `gcloud` | `GCE_PROJECT`, Application Default Credentials (2) (3), [`GCE_SERVICE_ACCOUNT_FILE`] | YES |
|
||||
| [hosting.de](https://www.hosting.de) | `hostingde` | `HOSTINGDE_API_KEY`, `HOSTINGDE_ZONE_NAME` | Not tested yet |
|
||||
| HTTP request | `httpreq` | `HTTPREQ_ENDPOINT`, `HTTPREQ_MODE`, `HTTPREQ_USERNAME`, `HTTPREQ_PASSWORD` (1) | YES |
|
||||
| [IIJ](https://www.iij.ad.jp/) | `iij` | `IIJ_API_ACCESS_KEY`, `IIJ_API_SECRET_KEY`, `IIJ_DO_SERVICE_CODE` | Not tested yet |
|
||||
| [INWX](https://www.inwx.de/en) | `inwx` | `INWX_USERNAME`, `INWX_PASSWORD` | YES |
|
||||
| [Lightsail](https://aws.amazon.com/lightsail/) | `lightsail` | `AWS_ACCESS_KEY_ID`, `AWS_SECRET_ACCESS_KEY`, `DNS_ZONE` | Not tested yet |
|
||||
| [Linode](https://www.linode.com) | `linode` | `LINODE_API_KEY` | Not tested yet |
|
||||
| [Linode v4](https://www.linode.com) | `linodev4` | `LINODE_TOKEN` | Not tested yet |
|
||||
| manual | - | none, but you need to run Traefik interactively, turn on `acmeLogging` to see instructions and press <kbd>Enter</kbd>. | YES |
|
||||
| [MyDNS.jp](https://www.mydns.jp/) | `mydnsjp` | `MYDNSJP_MASTER_ID`, `MYDNSJP_PASSWORD` | YES |
|
||||
| [Namecheap](https://www.namecheap.com) | `namecheap` | `NAMECHEAP_API_USER`, `NAMECHEAP_API_KEY` | YES |
|
||||
| [name.com](https://www.name.com/) | `namedotcom` | `NAMECOM_USERNAME`, `NAMECOM_API_TOKEN`, `NAMECOM_SERVER` | Not tested yet |
|
||||
| [Netcup](https://www.netcup.eu/) | `netcup` | `NETCUP_CUSTOMER_NUMBER`, `NETCUP_API_KEY`, `NETCUP_API_PASSWORD` | Not tested yet |
|
||||
| [NIFCloud](https://cloud.nifty.com/service/dns.htm) | `nifcloud` | `NIFCLOUD_ACCESS_KEY_ID`, `NIFCLOUD_SECRET_ACCESS_KEY` | Not tested yet |
|
||||
| [Ns1](https://ns1.com/) | `ns1` | `NS1_API_KEY` | Not tested yet |
|
||||
| [Open Telekom Cloud](https://cloud.telekom.de) | `otc` | `OTC_DOMAIN_NAME`, `OTC_USER_NAME`, `OTC_PASSWORD`, `OTC_PROJECT_NAME`, `OTC_IDENTITY_ENDPOINT` | Not tested yet |
|
||||
| [OVH](https://www.ovh.com) | `ovh` | `OVH_ENDPOINT`, `OVH_APPLICATION_KEY`, `OVH_APPLICATION_SECRET`, `OVH_CONSUMER_KEY` | YES |
|
||||
| [Openstack Designate](https://docs.openstack.org/designate) | `designate` | `OS_AUTH_URL`, `OS_USERNAME`, `OS_PASSWORD`, `OS_TENANT_NAME`, `OS_REGION_NAME` | YES |
|
||||
| [Oracle Cloud](https://cloud.oracle.com/home) | `oraclecloud` | `OCI_COMPARTMENT_OCID`, `OCI_PRIVKEY_FILE`, `OCI_PRIVKEY_PASS`, `OCI_PUBKEY_FINGERPRINT`, `OCI_REGION`, `OCI_TENANCY_OCID`, `OCI_USER_OCID` | YES |
|
||||
| [PowerDNS](https://www.powerdns.com) | `pdns` | `PDNS_API_KEY`, `PDNS_API_URL` | Not tested yet |
|
||||
| [Rackspace](https://www.rackspace.com/cloud/dns) | `rackspace` | `RACKSPACE_USER`, `RACKSPACE_API_KEY` | Not tested yet |
|
||||
| [RFC2136](https://tools.ietf.org/html/rfc2136) | `rfc2136` | `RFC2136_TSIG_KEY`, `RFC2136_TSIG_SECRET`, `RFC2136_TSIG_ALGORITHM`, `RFC2136_NAMESERVER` | Not tested yet |
|
||||
| [Route 53](https://aws.amazon.com/route53/) | `route53` | `AWS_ACCESS_KEY_ID`, `AWS_SECRET_ACCESS_KEY`, `[AWS_REGION]`, `[AWS_HOSTED_ZONE_ID]` or a configured user/instance IAM profile. | YES |
|
||||
| [Sakura Cloud](https://cloud.sakura.ad.jp/) | `sakuracloud` | `SAKURACLOUD_ACCESS_TOKEN`, `SAKURACLOUD_ACCESS_TOKEN_SECRET` | Not tested yet |
|
||||
| [Selectel](https://selectel.ru/en/) | `selectel` | `SELECTEL_API_TOKEN` | YES |
|
||||
| [Stackpath](https://www.stackpath.com/) | `stackpath` | `STACKPATH_CLIENT_ID`, `STACKPATH_CLIENT_SECRET`, `STACKPATH_STACK_ID` | Not tested yet |
|
||||
| [TransIP](https://www.transip.nl/) | `transip` | `TRANSIP_ACCOUNT_NAME`, `TRANSIP_PRIVATE_KEY_PATH` | YES |
|
||||
| [VegaDNS](https://github.com/shupp/VegaDNS-API) | `vegadns` | `SECRET_VEGADNS_KEY`, `SECRET_VEGADNS_SECRET`, `VEGADNS_URL` | Not tested yet |
|
||||
| [Vscale](https://vscale.io/) | `vscale` | `VSCALE_API_TOKEN` | YES |
|
||||
| [VULTR](https://www.vultr.com) | `vultr` | `VULTR_API_KEY` | Not tested yet |
|
||||
| [Zone.ee](https://www.zone.ee) | `zoneee` | `ZONEEE_API_USER`, `ZONEEE_API_KEY` | YES |
|
||||
|
||||
- (1): more information about the HTTP message format can be found [here](https://github.com/xenolf/lego/blob/master/providers/dns/httpreq/readme.md)
|
||||
- (1): more information about the HTTP message format can be found [here](https://go-acme.github.io/lego/dns/httpreq/)
|
||||
- (2): https://cloud.google.com/docs/authentication/production#providing_credentials_to_your_application
|
||||
- (3): https://github.com/golang/oauth2/blob/36a7019397c4c86cf59eeab3bc0d188bac444277/google/default.go#L61-L76
|
||||
|
||||
#### `resolvers`
|
||||
|
||||
@@ -388,7 +398,7 @@ Due to ACME limitation it is not possible to define wildcards in SANs (alternati
|
||||
Most likely the root domain should receive a certificate too, so it needs to be specified as SAN and 2 `DNS-01` challenges are executed.
|
||||
In this case the generated DNS TXT record for both domains is the same.
|
||||
Even though this behaviour is [DNS RFC](https://community.letsencrypt.org/t/wildcard-issuance-two-txt-records-for-the-same-name/54528/2) compliant, it can lead to problems as all DNS providers keep DNS records cached for a certain time (TTL) and this TTL can be superior to the challenge timeout making the `DNS-01` challenge fail.
|
||||
The Traefik ACME client library [LEGO](https://github.com/xenolf/lego) supports some but not all DNS providers to work around this issue.
|
||||
The Traefik ACME client library [LEGO](https://github.com/go-acme/lego) supports some but not all DNS providers to work around this issue.
|
||||
The [`provider` table](/configuration/acme/#provider) indicates if they allow generating certificates for a wildcard domain and its root domain.
|
||||
|
||||
### `onDemand` (Deprecated)
|
||||
|
||||
@@ -301,7 +301,7 @@ curl -s "http://localhost:8080/health" | jq .
|
||||
// average response time in seconds
|
||||
"average_response_time_sec": 0.8648016000000001,
|
||||
|
||||
// request statistics [requires --statistics to be set]
|
||||
// request statistics [requires --api.statistics to be set]
|
||||
// ten most recent requests with 4xx and 5xx status codes
|
||||
"recent_errors": [
|
||||
{
|
||||
|
||||
@@ -213,9 +213,13 @@ More information about Docker's security:
|
||||
- [A thread on Stack Overflow about sharing the `/var/run/docker.sock` file](https://news.ycombinator.com/item?id=17983623)
|
||||
- [To Dind or not to DinD](https://blog.loof.fr/2018/01/to-dind-or-not-do-dind.html)
|
||||
|
||||
### Security Compensation
|
||||
### Workarounds
|
||||
|
||||
The main security compensation is to expose the Docker socket over TCP, instead of the default Unix socket file.
|
||||
!!! note "Improved Security"
|
||||
|
||||
[TraefikEE](https://containo.us/traefikee) solves this problem by separating the control plane (connected to Docker) and the data plane (handling the requests).
|
||||
|
||||
Another possible workaround is to expose the Docker socket over TCP, instead of the default Unix socket file.
|
||||
It allows different implementation levels of the [AAA (Authentication, Authorization, Accounting) concepts](https://en.wikipedia.org/wiki/AAA_(computer_security)), depending on your security assessment:
|
||||
|
||||
- Authentication with Client Certificates as described in [the "Protect the Docker daemon socket" page of Docker's documentation](https://docs.docker.com/engine/security/https/)
|
||||
|
||||
@@ -29,7 +29,7 @@ Traefik can be configured:
|
||||
|
||||
|
||||
```shell
|
||||
curl -XPUT @file "http://localhost:8080/api/providers/rest"
|
||||
curl -XPUT -d @file "http://localhost:8080/api/providers/rest"
|
||||
```
|
||||
|
||||
with `@file`:
|
||||
|
||||
@@ -96,11 +96,12 @@ Labels, set through extensions or the property manager, can be used on services
|
||||
|
||||
| Label | Description |
|
||||
|------------------------------------------------------------|---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|
|
||||
| `traefik.enable=false` | Disable this container in Traefik |
|
||||
| `traefik.enable=false` | Disable this container in Traefik |
|
||||
| `traefik.backend.circuitbreaker.expression=EXPR` | Create a [circuit breaker](/basics/#backends) to be used against the backend |
|
||||
| `traefik.servicefabric.groupname` | Group all services with the same name into a single backend in Traefik |
|
||||
| `traefik.servicefabric.groupweight` | Set the weighting of the current services nodes in the backend group |
|
||||
| `traefik.servicefabric.enablelabeloverrides` | Toggle whether labels can be overridden using the Service Fabric Property Manager API |
|
||||
| `traefik.servicefabric.groupname` | Group all services with the same name into a single backend in Traefik |
|
||||
| `traefik.servicefabric.groupweight` | Set the weighting of the current services nodes in the backend group |
|
||||
| `traefik.servicefabric.enablelabeloverrides` | Toggle whether labels can be overridden using the Service Fabric Property Manager API |
|
||||
| `traefik.servicefabric.endpointname` | Specify the name of the endpoint |
|
||||
| `traefik.backend.healthcheck.path=/health` | Enable health check for the backend, hitting the container at `path`. |
|
||||
| `traefik.backend.healthcheck.port=8080` | Allow to use a different port for the health check. |
|
||||
| `traefik.backend.healthcheck.interval=1s` | Define the health check interval. |
|
||||
|
||||
@@ -93,7 +93,7 @@ For more information about the CLI, see the documentation about [Traefik command
|
||||
Whitespace is used as option separator and `,` is used as value separator for the list.
|
||||
The names of the options are case-insensitive.
|
||||
|
||||
In compose file the entrypoint syntax is different:
|
||||
In compose file the entrypoint syntax is different. Notice how quotes are used:
|
||||
|
||||
```yaml
|
||||
traefik:
|
||||
|
||||
@@ -20,7 +20,7 @@
|
||||
# Buckets for latency metrics
|
||||
#
|
||||
# Optional
|
||||
# Default: [0.1, 0.3, 1.2, 5]
|
||||
# Default: [0.1, 0.3, 1.2, 5.0]
|
||||
#
|
||||
buckets = [0.1,0.3,1.2,5.0]
|
||||
|
||||
|
||||
@@ -58,6 +58,13 @@ Traefik supports three tracing backends: Jaeger, Zipkin and DataDog.
|
||||
# Default: "127.0.0.1:6831"
|
||||
#
|
||||
localAgentHostPort = "127.0.0.1:6831"
|
||||
|
||||
# Trace Context Header Name is the http header name used to propagate tracing context.
|
||||
# This must be in lower-case to avoid mismatches when decoding incoming headers.
|
||||
#
|
||||
# Default: "uber-trace-id"
|
||||
#
|
||||
traceContextHeaderName = "uber-trace-id"
|
||||
```
|
||||
|
||||
!!! warning
|
||||
@@ -156,4 +163,10 @@ Traefik supports three tracing backends: Jaeger, Zipkin and DataDog.
|
||||
#
|
||||
globalTag = ""
|
||||
|
||||
# Enable priority sampling. When using distributed tracing, this option must be enabled in order
|
||||
# to get all the parts of a distributed trace sampled.
|
||||
#
|
||||
# Default: false
|
||||
#
|
||||
prioritySampling = false
|
||||
```
|
||||
|
||||
@@ -66,7 +66,7 @@ _(But if you'd rather configure some of your routes manually, Traefik supports t
|
||||
|
||||
In this quickstart, we'll use [Docker compose](https://docs.docker.com/compose) to create our demo infrastructure.
|
||||
|
||||
To save some time, you can clone [Traefik's repository](https://github.com/containous/traefik) and use the quickstart files located in the [examples/quickstart](https://github.com/containous/traefik/tree/master/examples/quickstart/) directory.
|
||||
To save some time, you can clone [Traefik's repository](https://github.com/containous/traefik) and use the quickstart files located in the [examples/quickstart](https://github.com/containous/traefik/tree/v1.7/examples/quickstart/) directory.
|
||||
|
||||
### 1 — Launch Traefik — Tell It to Listen to Docker
|
||||
|
||||
@@ -190,7 +190,7 @@ You will learn fundamental Traefik features and see some demos with Kubernetes.
|
||||
|
||||
### The Official Binary File
|
||||
|
||||
You can grab the latest binary from the [releases](https://github.com/containous/traefik/releases) page and just run it with the [sample configuration file](https://raw.githubusercontent.com/containous/traefik/master/traefik.sample.toml):
|
||||
You can grab the latest binary from the [releases](https://github.com/containous/traefik/releases) page and just run it with the [sample configuration file](https://raw.githubusercontent.com/containous/traefik/v1.7/traefik.sample.toml):
|
||||
|
||||
```shell
|
||||
./traefik -c traefik.toml
|
||||
|
||||
@@ -14,7 +14,6 @@ defaultEntryPoints = ["https"]
|
||||
[entryPoints]
|
||||
[entryPoints.http]
|
||||
address = ":80"
|
||||
[entryPoints.http]
|
||||
|
||||
[api]
|
||||
|
||||
|
||||
@@ -4,7 +4,7 @@ This guide explains how to use Traefik as an Ingress controller for a Kubernetes
|
||||
|
||||
If you are not familiar with Ingresses in Kubernetes you might want to read the [Kubernetes user guide](https://kubernetes.io/docs/concepts/services-networking/ingress/)
|
||||
|
||||
The config files used in this guide can be found in the [examples directory](https://github.com/containous/traefik/tree/master/examples/k8s)
|
||||
The config files used in this guide can be found in the [examples directory](https://github.com/containous/traefik/tree/v1.7/examples/k8s)
|
||||
|
||||
## Prerequisites
|
||||
|
||||
@@ -68,10 +68,10 @@ subjects:
|
||||
namespace: kube-system
|
||||
```
|
||||
|
||||
[examples/k8s/traefik-rbac.yaml](https://github.com/containous/traefik/tree/master/examples/k8s/traefik-rbac.yaml)
|
||||
[examples/k8s/traefik-rbac.yaml](https://github.com/containous/traefik/tree/v1.7/examples/k8s/traefik-rbac.yaml)
|
||||
|
||||
```shell
|
||||
kubectl apply -f https://raw.githubusercontent.com/containous/traefik/master/examples/k8s/traefik-rbac.yaml
|
||||
kubectl apply -f https://raw.githubusercontent.com/containous/traefik/v1.7/examples/k8s/traefik-rbac.yaml
|
||||
```
|
||||
|
||||
For namespaced restrictions, one RoleBinding is required per watched namespace along with a corresponding configuration of Traefik's `kubernetes.namespaces` parameter.
|
||||
@@ -148,7 +148,7 @@ spec:
|
||||
type: NodePort
|
||||
```
|
||||
|
||||
[examples/k8s/traefik-deployment.yaml](https://github.com/containous/traefik/tree/master/examples/k8s/traefik-deployment.yaml)
|
||||
[examples/k8s/traefik-deployment.yaml](https://github.com/containous/traefik/tree/v1.7/examples/k8s/traefik-deployment.yaml)
|
||||
|
||||
!!! note
|
||||
The Service will expose two NodePorts which allow access to the ingress and the web interface.
|
||||
@@ -216,7 +216,7 @@ spec:
|
||||
name: admin
|
||||
```
|
||||
|
||||
[examples/k8s/traefik-ds.yaml](https://github.com/containous/traefik/tree/master/examples/k8s/traefik-ds.yaml)
|
||||
[examples/k8s/traefik-ds.yaml](https://github.com/containous/traefik/tree/v1.7/examples/k8s/traefik-ds.yaml)
|
||||
|
||||
!!! note
|
||||
This will create a Daemonset that uses privileged ports 80/8080 on the host. This may not work on all providers, but illustrates the static (non-NodePort) hostPort binding. The `traefik-ingress-service` can still be used inside the cluster to access the DaemonSet pods.
|
||||
@@ -224,11 +224,11 @@ spec:
|
||||
To deploy Traefik to your cluster start by submitting one of the YAML files to the cluster with `kubectl`:
|
||||
|
||||
```shell
|
||||
kubectl apply -f https://raw.githubusercontent.com/containous/traefik/master/examples/k8s/traefik-deployment.yaml
|
||||
kubectl apply -f https://raw.githubusercontent.com/containous/traefik/v1.7/examples/k8s/traefik-deployment.yaml
|
||||
```
|
||||
|
||||
```shell
|
||||
kubectl apply -f https://raw.githubusercontent.com/containous/traefik/master/examples/k8s/traefik-ds.yaml
|
||||
kubectl apply -f https://raw.githubusercontent.com/containous/traefik/v1.7/examples/k8s/traefik-ds.yaml
|
||||
```
|
||||
|
||||
There are some significant differences between using Deployments and DaemonSets:
|
||||
@@ -352,10 +352,10 @@ spec:
|
||||
servicePort: web
|
||||
```
|
||||
|
||||
[examples/k8s/ui.yaml](https://github.com/containous/traefik/tree/master/examples/k8s/ui.yaml)
|
||||
[examples/k8s/ui.yaml](https://github.com/containous/traefik/tree/v1.7/examples/k8s/ui.yaml)
|
||||
|
||||
```shell
|
||||
kubectl apply -f https://raw.githubusercontent.com/containous/traefik/master/examples/k8s/ui.yaml
|
||||
kubectl apply -f https://raw.githubusercontent.com/containous/traefik/v1.7/examples/k8s/ui.yaml
|
||||
```
|
||||
|
||||
Now lets setup an entry in our `/etc/hosts` file to route `traefik-ui.minikube` to our cluster.
|
||||
@@ -581,10 +581,10 @@ spec:
|
||||
- containerPort: 80
|
||||
```
|
||||
|
||||
[examples/k8s/cheese-deployments.yaml](https://github.com/containous/traefik/tree/master/examples/k8s/cheese-deployments.yaml)
|
||||
[examples/k8s/cheese-deployments.yaml](https://github.com/containous/traefik/tree/v1.7/examples/k8s/cheese-deployments.yaml)
|
||||
|
||||
```shell
|
||||
kubectl apply -f https://raw.githubusercontent.com/containous/traefik/master/examples/k8s/cheese-deployments.yaml
|
||||
kubectl apply -f https://raw.githubusercontent.com/containous/traefik/v1.7/examples/k8s/cheese-deployments.yaml
|
||||
```
|
||||
|
||||
Next we need to setup a Service for each of the cheese pods.
|
||||
@@ -636,10 +636,10 @@ spec:
|
||||
!!! note
|
||||
We also set a [circuit breaker expression](/basics/#backends) for one of the backends by setting the `traefik.backend.circuitbreaker` annotation on the service.
|
||||
|
||||
[examples/k8s/cheese-services.yaml](https://github.com/containous/traefik/tree/master/examples/k8s/cheese-services.yaml)
|
||||
[examples/k8s/cheese-services.yaml](https://github.com/containous/traefik/tree/v1.7/examples/k8s/cheese-services.yaml)
|
||||
|
||||
```shell
|
||||
kubectl apply -f https://raw.githubusercontent.com/containous/traefik/master/examples/k8s/cheese-services.yaml
|
||||
kubectl apply -f https://raw.githubusercontent.com/containous/traefik/v1.7/examples/k8s/cheese-services.yaml
|
||||
```
|
||||
|
||||
Now we can submit an ingress for the cheese websites.
|
||||
@@ -676,13 +676,13 @@ spec:
|
||||
servicePort: http
|
||||
```
|
||||
|
||||
[examples/k8s/cheese-ingress.yaml](https://github.com/containous/traefik/tree/master/examples/k8s/cheese-ingress.yaml)
|
||||
[examples/k8s/cheese-ingress.yaml](https://github.com/containous/traefik/tree/v1.7/examples/k8s/cheese-ingress.yaml)
|
||||
|
||||
!!! note
|
||||
we list each hostname, and add a backend service.
|
||||
We list each hostname, and add a backend service.
|
||||
|
||||
```shell
|
||||
kubectl apply -f https://raw.githubusercontent.com/containous/traefik/master/examples/k8s/cheese-ingress.yaml
|
||||
kubectl apply -f https://raw.githubusercontent.com/containous/traefik/v1.7/examples/k8s/cheese-ingress.yaml
|
||||
```
|
||||
|
||||
Now visit the [Traefik dashboard](http://traefik-ui.minikube/) and you should see a frontend for each host.
|
||||
@@ -731,13 +731,13 @@ spec:
|
||||
servicePort: http
|
||||
```
|
||||
|
||||
[examples/k8s/cheeses-ingress.yaml](https://github.com/containous/traefik/tree/master/examples/k8s/cheeses-ingress.yaml)
|
||||
[examples/k8s/cheeses-ingress.yaml](https://github.com/containous/traefik/tree/v1.7/examples/k8s/cheeses-ingress.yaml)
|
||||
|
||||
!!! note
|
||||
We are configuring Traefik to strip the prefix from the url path with the `traefik.frontend.rule.type` annotation so that we can use the containers from the previous example without modification.
|
||||
|
||||
```shell
|
||||
kubectl apply -f https://raw.githubusercontent.com/containous/traefik/master/examples/k8s/cheeses-ingress.yaml
|
||||
kubectl apply -f https://raw.githubusercontent.com/containous/traefik/v1.7/examples/k8s/cheeses-ingress.yaml
|
||||
```
|
||||
|
||||
```shell
|
||||
@@ -783,11 +783,11 @@ Traefik will now look for cheddar service endpoints (ports on healthy pods) in b
|
||||
Deploying cheddar into the cheese namespace and afterwards shutting down cheddar in the default namespace is enough to migrate the traffic.
|
||||
|
||||
!!! note
|
||||
The kubernetes documentation does not specify this merging behavior.
|
||||
The kubernetes documentation does not specify this merging behavior.
|
||||
|
||||
!!! note
|
||||
Merging ingress definitions can cause problems if the annotations differ or if the services handle requests differently.
|
||||
Be careful and extra cautious when running multiple overlapping ingress definitions.
|
||||
Merging ingress definitions can cause problems if the annotations differ or if the services handle requests differently.
|
||||
Be careful and extra cautious when running multiple overlapping ingress definitions.
|
||||
|
||||
## Specifying Routing Priorities
|
||||
|
||||
|
||||
@@ -22,6 +22,12 @@ rules:
|
||||
- get
|
||||
- list
|
||||
- watch
|
||||
- apiGroups:
|
||||
- extensions
|
||||
resources:
|
||||
- ingresses/status
|
||||
verbs:
|
||||
- update
|
||||
---
|
||||
kind: ClusterRoleBinding
|
||||
apiVersion: rbac.authorization.k8s.io/v1beta1
|
||||
|
||||
@@ -2,7 +2,7 @@
|
||||
|
||||
In this quickstart, we'll use [Docker compose](https://docs.docker.com/compose) to create our demo infrastructure.
|
||||
|
||||
To save some time, you can clone [Traefik's repository](https://github.com/containous/traefik) and use the quickstart files located in the [examples/quickstart](https://github.com/containous/traefik/tree/master/examples/quickstart/) directory.
|
||||
To save some time, you can clone [Traefik's repository](https://github.com/containous/traefik) and use the quickstart files located in the [examples/quickstart](https://github.com/containous/traefik/tree/v1.7/examples/quickstart/) directory.
|
||||
|
||||
### 1 — Launch Traefik — Tell It to Listen to Docker
|
||||
|
||||
|
||||
42
exp.Dockerfile
Normal file
42
exp.Dockerfile
Normal file
@@ -0,0 +1,42 @@
|
||||
# WEBUI
|
||||
FROM node:8.15.0 as webui
|
||||
|
||||
ENV WEBUI_DIR /src/webui
|
||||
RUN mkdir -p $WEBUI_DIR
|
||||
|
||||
COPY ./webui/ $WEBUI_DIR/
|
||||
|
||||
WORKDIR $WEBUI_DIR
|
||||
RUN yarn install
|
||||
|
||||
RUN npm run build
|
||||
|
||||
# BUILD
|
||||
FROM golang:1.11-alpine as gobuild
|
||||
|
||||
RUN apk --update upgrade \
|
||||
&& apk --no-cache --no-progress add git mercurial bash gcc musl-dev curl tar \
|
||||
&& rm -rf /var/cache/apk/*
|
||||
|
||||
RUN mkdir -p /usr/local/bin \
|
||||
&& curl -fsSL -o /usr/local/bin/go-bindata https://github.com/containous/go-bindata/releases/download/v1.0.0/go-bindata \
|
||||
&& chmod +x /usr/local/bin/go-bindata
|
||||
|
||||
WORKDIR /go/src/github.com/containous/traefik
|
||||
COPY . /go/src/github.com/containous/traefik
|
||||
|
||||
RUN rm -rf /go/src/github.com/containous/traefik/static/
|
||||
COPY --from=webui /src/static/ /go/src/github.com/containous/traefik/static/
|
||||
|
||||
RUN ./script/make.sh generate binary
|
||||
|
||||
## IMAGE
|
||||
FROM scratch
|
||||
|
||||
COPY script/ca-certificates.crt /etc/ssl/certs/
|
||||
COPY --from=gobuild /go/src/github.com/containous/traefik/dist/traefik /
|
||||
|
||||
EXPOSE 80
|
||||
VOLUME ["/tmp"]
|
||||
|
||||
ENTRYPOINT ["/traefik"]
|
||||
@@ -10,7 +10,7 @@ import (
|
||||
|
||||
"github.com/abronan/valkeyrie"
|
||||
"github.com/abronan/valkeyrie/store"
|
||||
"github.com/abronan/valkeyrie/store/etcd/v3"
|
||||
etcdv3 "github.com/abronan/valkeyrie/store/etcd/v3"
|
||||
"github.com/containous/traefik/integration/try"
|
||||
"github.com/go-check/check"
|
||||
|
||||
|
||||
31
integration/fixtures/grpc/config_retry.toml
Normal file
31
integration/fixtures/grpc/config_retry.toml
Normal file
@@ -0,0 +1,31 @@
|
||||
defaultEntryPoints = ["https"]
|
||||
|
||||
rootCAs = [ """{{ .CertContent }}""" ]
|
||||
|
||||
[retry]
|
||||
|
||||
[entryPoints]
|
||||
[entryPoints.https]
|
||||
address = ":4443"
|
||||
[entryPoints.https.tls]
|
||||
[[entryPoints.https.tls.certificates]]
|
||||
certFile = """{{ .CertContent }}"""
|
||||
keyFile = """{{ .KeyContent }}"""
|
||||
|
||||
|
||||
[api]
|
||||
|
||||
[file]
|
||||
|
||||
[backends]
|
||||
[backends.backend1]
|
||||
[backends.backend1.servers.server1]
|
||||
url = "https://127.0.0.1:{{ .GRPCServerPort }}"
|
||||
weight = 1
|
||||
|
||||
|
||||
[frontends]
|
||||
[frontends.frontend1]
|
||||
backend = "backend1"
|
||||
[frontends.frontend1.routes.test_1]
|
||||
rule = "Host:127.0.0.1"
|
||||
@@ -417,3 +417,45 @@ func (s *GRPCSuite) TestGRPCBufferWithFlushInterval(c *check.C) {
|
||||
})
|
||||
c.Assert(err, check.IsNil)
|
||||
}
|
||||
|
||||
func (s *GRPCSuite) TestGRPCWithRetry(c *check.C) {
|
||||
lis, err := net.Listen("tcp", ":0")
|
||||
_, port, err := net.SplitHostPort(lis.Addr().String())
|
||||
c.Assert(err, check.IsNil)
|
||||
|
||||
go func() {
|
||||
err := startGRPCServer(lis, &myserver{})
|
||||
c.Log(err)
|
||||
c.Assert(err, check.IsNil)
|
||||
}()
|
||||
|
||||
file := s.adaptFile(c, "fixtures/grpc/config_retry.toml", struct {
|
||||
CertContent string
|
||||
KeyContent string
|
||||
GRPCServerPort string
|
||||
}{
|
||||
CertContent: string(LocalhostCert),
|
||||
KeyContent: string(LocalhostKey),
|
||||
GRPCServerPort: port,
|
||||
})
|
||||
|
||||
defer os.Remove(file)
|
||||
cmd, display := s.traefikCmd(withConfigFile(file))
|
||||
defer display(c)
|
||||
|
||||
err = cmd.Start()
|
||||
c.Assert(err, check.IsNil)
|
||||
defer cmd.Process.Kill()
|
||||
|
||||
// wait for Traefik
|
||||
err = try.GetRequest("http://127.0.0.1:8080/api/providers", 1*time.Second, try.BodyContains("Host:127.0.0.1"))
|
||||
c.Assert(err, check.IsNil)
|
||||
|
||||
var response string
|
||||
err = try.Do(1*time.Second, func() error {
|
||||
response, err = callHelloClientGRPC("World", true)
|
||||
return err
|
||||
})
|
||||
c.Assert(err, check.IsNil)
|
||||
c.Assert(response, check.Equals, "Hello World")
|
||||
}
|
||||
|
||||
@@ -1,5 +1,8 @@
|
||||
consul:
|
||||
image: consul
|
||||
# use v1.4.0 because https://github.com/hashicorp/consul/issues/5270
|
||||
# v1.4.1 cannot be used.
|
||||
# waiting for v1.4.2
|
||||
image: consul:1.4.0
|
||||
command: agent -server -bootstrap-expect 1 -client 0.0.0.0 -log-level debug -ui
|
||||
ports:
|
||||
- "8400:8400"
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
pebble:
|
||||
image: letsencrypt/pebble:2018-11-02
|
||||
image: letsencrypt/pebble:v2.0.1
|
||||
command: pebble --dnsserver ${DOCKER_HOST_IP}:5053
|
||||
ports:
|
||||
- 14000:14000
|
||||
|
||||
@@ -1,6 +1,7 @@
|
||||
package integration
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"net/http"
|
||||
"os"
|
||||
"time"
|
||||
@@ -38,6 +39,10 @@ func (s *TimeoutSuite) TestForwardingTimeouts(c *check.C) {
|
||||
c.Assert(err, checker.IsNil)
|
||||
c.Assert(response.StatusCode, checker.Equals, http.StatusGatewayTimeout)
|
||||
|
||||
// Check that timeout service is available
|
||||
statusURL := fmt.Sprintf("http://%s:9000/statusTest?status=200", httpTimeoutEndpoint)
|
||||
c.Assert(try.GetRequest(statusURL, 60*time.Second, try.StatusCodeIs(http.StatusOK)), checker.IsNil)
|
||||
|
||||
// This simulates a ResponseHeaderTimeout.
|
||||
response, err = http.Get("http://127.0.0.1:8000/responseHeaderTimeout?sleep=1000")
|
||||
c.Assert(err, checker.IsNil)
|
||||
|
||||
@@ -44,8 +44,9 @@ func TestLogRotation(t *testing.T) {
|
||||
if err != nil {
|
||||
t.Fatalf("Error setting up temporary directory: %s", err)
|
||||
}
|
||||
defer os.RemoveAll(tempDir)
|
||||
|
||||
fileName := tempDir + "traefik.log"
|
||||
fileName := filepath.Join(tempDir, "traefik.log")
|
||||
rotatedFileName := fileName + ".rotated"
|
||||
|
||||
config := &types.AccessLog{FilePath: fileName, Format: CommonFormat}
|
||||
@@ -587,6 +588,7 @@ func captureStdout(t *testing.T) (out *os.File, restoreStdout func()) {
|
||||
|
||||
restoreStdout = func() {
|
||||
os.Stdout = original
|
||||
os.RemoveAll(file.Name())
|
||||
}
|
||||
|
||||
return file, restoreStdout
|
||||
|
||||
@@ -110,6 +110,7 @@ type retryResponseWriterWithoutCloseNotify struct {
|
||||
responseWriter http.ResponseWriter
|
||||
headers http.Header
|
||||
shouldRetry bool
|
||||
written bool
|
||||
}
|
||||
|
||||
func (rr *retryResponseWriterWithoutCloseNotify) ShouldRetry() bool {
|
||||
@@ -121,6 +122,9 @@ func (rr *retryResponseWriterWithoutCloseNotify) DisableRetries() {
|
||||
}
|
||||
|
||||
func (rr *retryResponseWriterWithoutCloseNotify) Header() http.Header {
|
||||
if rr.written {
|
||||
return rr.responseWriter.Header()
|
||||
}
|
||||
return rr.headers
|
||||
}
|
||||
|
||||
@@ -155,6 +159,7 @@ func (rr *retryResponseWriterWithoutCloseNotify) WriteHeader(code int) {
|
||||
}
|
||||
|
||||
rr.responseWriter.WriteHeader(code)
|
||||
rr.written = true
|
||||
}
|
||||
|
||||
func (rr *retryResponseWriterWithoutCloseNotify) Hijack() (net.Conn, *bufio.ReadWriter, error) {
|
||||
|
||||
@@ -15,9 +15,14 @@ const Name = "datadog"
|
||||
|
||||
// Config provides configuration settings for a datadog tracer
|
||||
type Config struct {
|
||||
LocalAgentHostPort string `description:"Set datadog-agent's host:port that the reporter will used. Defaults to localhost:8126" export:"false"`
|
||||
GlobalTag string `description:"Key:Value tag to be set on all the spans." export:"true"`
|
||||
Debug bool `description:"Enable DataDog debug." export:"true"`
|
||||
LocalAgentHostPort string `description:"Set datadog-agent's host:port that the reporter will used. Defaults to localhost:8126" export:"false"`
|
||||
GlobalTag string `description:"Key:Value tag to be set on all the spans." export:"true"`
|
||||
Debug bool `description:"Enable DataDog debug." export:"true"`
|
||||
PrioritySampling bool `description:"Enable priority sampling. When using distributed tracing, this option must be enabled in order to get all the parts of a distributed trace sampled."`
|
||||
TraceIDHeaderName string `description:"Specifies the header name that will be used to store the trace ID." export:"true"`
|
||||
ParentIDHeaderName string `description:"Specifies the header name that will be used to store the parent ID." export:"true"`
|
||||
SamplingPriorityHeaderName string `description:"Specifies the header name that will be used to store the sampling priority." export:"true"`
|
||||
BagagePrefixHeaderName string `description:"specifies the header name prefix that will be used to store baggage items in a map." export:"true"`
|
||||
}
|
||||
|
||||
// Setup sets up the tracer
|
||||
@@ -29,12 +34,22 @@ func (c *Config) Setup(serviceName string) (opentracing.Tracer, io.Closer, error
|
||||
value = tag[1]
|
||||
}
|
||||
|
||||
tracer := ddtracer.New(
|
||||
opts := []datadog.StartOption{
|
||||
datadog.WithAgentAddr(c.LocalAgentHostPort),
|
||||
datadog.WithServiceName(serviceName),
|
||||
datadog.WithGlobalTag(tag[0], value),
|
||||
datadog.WithDebugMode(c.Debug),
|
||||
)
|
||||
datadog.WithPropagator(datadog.NewPropagator(&datadog.PropagatorConfig{
|
||||
TraceHeader: c.TraceIDHeaderName,
|
||||
ParentHeader: c.ParentIDHeaderName,
|
||||
PriorityHeader: c.SamplingPriorityHeaderName,
|
||||
BaggagePrefix: c.BagagePrefixHeaderName,
|
||||
})),
|
||||
}
|
||||
if c.PrioritySampling {
|
||||
opts = append(opts, datadog.WithPrioritySampling())
|
||||
}
|
||||
tracer := ddtracer.New(opts...)
|
||||
|
||||
// Without this, child spans are getting the NOOP tracer
|
||||
opentracing.SetGlobalTracer(tracer)
|
||||
|
||||
@@ -5,6 +5,7 @@ import (
|
||||
|
||||
"github.com/containous/traefik/log"
|
||||
"github.com/opentracing/opentracing-go"
|
||||
"github.com/uber/jaeger-client-go"
|
||||
jaegercfg "github.com/uber/jaeger-client-go/config"
|
||||
jaegermet "github.com/uber/jaeger-lib/metrics"
|
||||
)
|
||||
@@ -14,10 +15,11 @@ const Name = "jaeger"
|
||||
|
||||
// Config provides configuration settings for a jaeger tracer
|
||||
type Config struct {
|
||||
SamplingServerURL string `description:"set the sampling server url." export:"false"`
|
||||
SamplingType string `description:"set the sampling type." export:"true"`
|
||||
SamplingParam float64 `description:"set the sampling parameter." export:"true"`
|
||||
LocalAgentHostPort string `description:"set jaeger-agent's host:port that the reporter will used." export:"false"`
|
||||
SamplingServerURL string `description:"set the sampling server url." export:"false"`
|
||||
SamplingType string `description:"set the sampling type." export:"true"`
|
||||
SamplingParam float64 `description:"set the sampling parameter." export:"true"`
|
||||
LocalAgentHostPort string `description:"set jaeger-agent's host:port that the reporter will used." export:"false"`
|
||||
TraceContextHeaderName string `description:"set the header to use for the trace-id." export:"true"`
|
||||
}
|
||||
|
||||
// Setup sets up the tracer
|
||||
@@ -32,6 +34,9 @@ func (c *Config) Setup(componentName string) (opentracing.Tracer, io.Closer, err
|
||||
LogSpans: true,
|
||||
LocalAgentHostPort: c.LocalAgentHostPort,
|
||||
},
|
||||
Headers: &jaeger.HeadersConfig{
|
||||
TraceContextHeaderName: c.TraceContextHeaderName,
|
||||
},
|
||||
}
|
||||
|
||||
jMetricsFactory := jaegermet.NullFactory
|
||||
|
||||
@@ -6,6 +6,7 @@ dev_addr: 0.0.0.0:8000
|
||||
|
||||
repo_name: 'GitHub'
|
||||
repo_url: 'https://github.com/containous/traefik'
|
||||
edit_uri: 'edit/v1.7/docs/'
|
||||
|
||||
docs_dir: 'docs'
|
||||
|
||||
@@ -25,7 +26,7 @@ theme:
|
||||
prev: 'Previous'
|
||||
next: 'Next'
|
||||
|
||||
copyright: "Copyright © 2016-2018 Containous SAS"
|
||||
copyright: "Copyright © 2016-2019 Containous"
|
||||
|
||||
google_analytics:
|
||||
- 'UA-51880359-3'
|
||||
|
||||
@@ -7,8 +7,8 @@ import (
|
||||
"crypto/x509"
|
||||
|
||||
"github.com/containous/traefik/log"
|
||||
"github.com/xenolf/lego/certcrypto"
|
||||
"github.com/xenolf/lego/registration"
|
||||
"github.com/go-acme/lego/certcrypto"
|
||||
"github.com/go-acme/lego/registration"
|
||||
)
|
||||
|
||||
// Account is used to store lets encrypt registration info
|
||||
|
||||
@@ -9,8 +9,8 @@ import (
|
||||
"github.com/containous/mux"
|
||||
"github.com/containous/traefik/log"
|
||||
"github.com/containous/traefik/safe"
|
||||
"github.com/xenolf/lego/challenge"
|
||||
"github.com/xenolf/lego/challenge/http01"
|
||||
"github.com/go-acme/lego/challenge"
|
||||
"github.com/go-acme/lego/challenge/http01"
|
||||
)
|
||||
|
||||
var _ challenge.ProviderTimeout = (*challengeHTTP)(nil)
|
||||
|
||||
@@ -5,8 +5,8 @@ import (
|
||||
|
||||
"github.com/containous/traefik/log"
|
||||
"github.com/containous/traefik/types"
|
||||
"github.com/xenolf/lego/challenge"
|
||||
"github.com/xenolf/lego/challenge/tlsalpn01"
|
||||
"github.com/go-acme/lego/challenge"
|
||||
"github.com/go-acme/lego/challenge/tlsalpn01"
|
||||
)
|
||||
|
||||
var _ challenge.Provider = (*challengeTLSALPN)(nil)
|
||||
|
||||
@@ -20,19 +20,19 @@ import (
|
||||
traefiktls "github.com/containous/traefik/tls"
|
||||
"github.com/containous/traefik/types"
|
||||
"github.com/containous/traefik/version"
|
||||
"github.com/go-acme/lego/certificate"
|
||||
"github.com/go-acme/lego/challenge"
|
||||
"github.com/go-acme/lego/challenge/dns01"
|
||||
"github.com/go-acme/lego/lego"
|
||||
legolog "github.com/go-acme/lego/log"
|
||||
"github.com/go-acme/lego/providers/dns"
|
||||
"github.com/go-acme/lego/registration"
|
||||
"github.com/pkg/errors"
|
||||
"github.com/sirupsen/logrus"
|
||||
"github.com/xenolf/lego/certificate"
|
||||
"github.com/xenolf/lego/challenge"
|
||||
"github.com/xenolf/lego/challenge/dns01"
|
||||
"github.com/xenolf/lego/lego"
|
||||
legolog "github.com/xenolf/lego/log"
|
||||
"github.com/xenolf/lego/providers/dns"
|
||||
"github.com/xenolf/lego/registration"
|
||||
)
|
||||
|
||||
var (
|
||||
// OSCPMustStaple enables OSCP stapling as from https://github.com/xenolf/lego/issues/270
|
||||
// OSCPMustStaple enables OSCP stapling as from https://github.com/go-acme/lego/issues/270
|
||||
OSCPMustStaple = false
|
||||
)
|
||||
|
||||
@@ -232,7 +232,7 @@ func (p *Provider) getClient() (*lego.Client, error) {
|
||||
|
||||
config := lego.NewConfig(account)
|
||||
config.CADirURL = caServer
|
||||
config.KeyType = account.KeyType
|
||||
config.Certificate.KeyType = account.KeyType
|
||||
config.UserAgent = fmt.Sprintf("containous-traefik/%s", version.Version)
|
||||
|
||||
client, err := lego.NewClient(config)
|
||||
|
||||
@@ -7,8 +7,8 @@ import (
|
||||
"github.com/containous/traefik/safe"
|
||||
traefiktls "github.com/containous/traefik/tls"
|
||||
"github.com/containous/traefik/types"
|
||||
"github.com/go-acme/lego/certcrypto"
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/xenolf/lego/certcrypto"
|
||||
)
|
||||
|
||||
func TestGetUncheckedCertificates(t *testing.T) {
|
||||
|
||||
@@ -19,9 +19,11 @@ import (
|
||||
"github.com/containous/traefik/provider"
|
||||
"github.com/containous/traefik/safe"
|
||||
"github.com/containous/traefik/types"
|
||||
"github.com/patrickmn/go-cache"
|
||||
)
|
||||
|
||||
var _ provider.Provider = (*Provider)(nil)
|
||||
var existingTaskDefCache = cache.New(30*time.Minute, 5*time.Minute)
|
||||
|
||||
// Provider holds configurations of the provider.
|
||||
type Provider struct {
|
||||
@@ -291,7 +293,7 @@ func (p *Provider) listInstances(ctx context.Context, client *awsClient) ([]ecsI
|
||||
}
|
||||
|
||||
var mach *machine
|
||||
if aws.StringValue(task.LaunchType) == ecs.LaunchTypeFargate {
|
||||
if len(task.Attachments) != 0 {
|
||||
var ports []portMapping
|
||||
for _, mapping := range containerDefinition.PortMappings {
|
||||
if mapping != nil {
|
||||
@@ -400,16 +402,22 @@ func (p *Provider) lookupEc2Instances(ctx context.Context, client *awsClient, cl
|
||||
func (p *Provider) lookupTaskDefinitions(ctx context.Context, client *awsClient, taskDefArns map[string]*ecs.Task) (map[string]*ecs.TaskDefinition, error) {
|
||||
taskDef := make(map[string]*ecs.TaskDefinition)
|
||||
for arn, task := range taskDefArns {
|
||||
resp, err := client.ecs.DescribeTaskDefinitionWithContext(ctx, &ecs.DescribeTaskDefinitionInput{
|
||||
TaskDefinition: task.TaskDefinitionArn,
|
||||
})
|
||||
if definition, ok := existingTaskDefCache.Get(arn); ok {
|
||||
taskDef[arn] = definition.(*ecs.TaskDefinition)
|
||||
log.Debugf("Found cached task definition for %s. Skipping the call", arn)
|
||||
} else {
|
||||
resp, err := client.ecs.DescribeTaskDefinitionWithContext(ctx, &ecs.DescribeTaskDefinitionInput{
|
||||
TaskDefinition: task.TaskDefinitionArn,
|
||||
})
|
||||
|
||||
if err != nil {
|
||||
log.Errorf("Unable to describe task definition: %s", err)
|
||||
return nil, err
|
||||
if err != nil {
|
||||
log.Errorf("Unable to describe task definition: %s", err)
|
||||
return nil, err
|
||||
}
|
||||
|
||||
taskDef[arn] = resp.TaskDefinition
|
||||
existingTaskDefCache.Set(arn, resp.TaskDefinition, cache.DefaultExpiration)
|
||||
}
|
||||
|
||||
taskDef[arn] = resp.TaskDefinition
|
||||
}
|
||||
return taskDef, nil
|
||||
}
|
||||
|
||||
@@ -5,7 +5,7 @@ import (
|
||||
|
||||
"github.com/abronan/valkeyrie/store"
|
||||
"github.com/abronan/valkeyrie/store/etcd/v2"
|
||||
"github.com/abronan/valkeyrie/store/etcd/v3"
|
||||
etcdv3 "github.com/abronan/valkeyrie/store/etcd/v3"
|
||||
"github.com/containous/traefik/log"
|
||||
"github.com/containous/traefik/provider"
|
||||
"github.com/containous/traefik/provider/kv"
|
||||
|
||||
@@ -175,17 +175,36 @@ func (p *Provider) loadFileConfig(filename string, parseTemplate bool) (*types.C
|
||||
} else {
|
||||
configuration, err = p.DecodeConfiguration(fileContent)
|
||||
}
|
||||
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
var tlsConfigs []*tls.Configuration
|
||||
for _, conf := range configuration.TLS {
|
||||
bytes, err := conf.Certificate.CertFile.Read()
|
||||
if err != nil {
|
||||
log.Error(err)
|
||||
continue
|
||||
}
|
||||
conf.Certificate.CertFile = tls.FileOrContent(string(bytes))
|
||||
|
||||
bytes, err = conf.Certificate.KeyFile.Read()
|
||||
if err != nil {
|
||||
log.Error(err)
|
||||
continue
|
||||
}
|
||||
conf.Certificate.KeyFile = tls.FileOrContent(string(bytes))
|
||||
tlsConfigs = append(tlsConfigs, conf)
|
||||
}
|
||||
configuration.TLS = tlsConfigs
|
||||
|
||||
if configuration == nil || configuration.Backends == nil && configuration.Frontends == nil && configuration.TLS == nil {
|
||||
configuration = &types.Configuration{
|
||||
Frontends: make(map[string]*types.Frontend),
|
||||
Backends: make(map[string]*types.Backend),
|
||||
}
|
||||
}
|
||||
return configuration, err
|
||||
return configuration, nil
|
||||
}
|
||||
|
||||
func (p *Provider) loadFileConfigFromDirectory(directory string, configuration *types.Configuration) (*types.Configuration, error) {
|
||||
|
||||
@@ -12,6 +12,7 @@ import (
|
||||
"github.com/containous/traefik/safe"
|
||||
"github.com/containous/traefik/types"
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/require"
|
||||
)
|
||||
|
||||
// createRandomFile Helper
|
||||
@@ -329,6 +330,27 @@ func createProvider(t *testing.T, test ProvideTestCase, watch bool) (*Provider,
|
||||
}
|
||||
|
||||
return provider, func() {
|
||||
os.Remove(tempDir)
|
||||
os.RemoveAll(tempDir)
|
||||
}
|
||||
}
|
||||
|
||||
func TestTLSContent(t *testing.T) {
|
||||
tempDir := createTempDir(t, "testdir")
|
||||
defer os.RemoveAll(tempDir)
|
||||
|
||||
fileTLS := createRandomFile(t, tempDir, "CONTENT")
|
||||
fileConfig := createRandomFile(t, tempDir, `
|
||||
[[tls]]
|
||||
entryPoints = ["https"]
|
||||
[tls.certificate]
|
||||
certFile = "`+fileTLS.Name()+`"
|
||||
keyFile = "`+fileTLS.Name()+`"
|
||||
`)
|
||||
|
||||
provider := &Provider{}
|
||||
configuration, err := provider.loadFileConfig(fileConfig.Name(), true)
|
||||
require.NoError(t, err)
|
||||
|
||||
require.Equal(t, "CONTENT", configuration.TLS[0].Certificate.CertFile.String())
|
||||
require.Equal(t, "CONTENT", configuration.TLS[0].Certificate.KeyFile.String())
|
||||
}
|
||||
|
||||
@@ -245,6 +245,11 @@ func (p *Provider) loadIngresses(k8sClient Client) (*types.Configuration, error)
|
||||
baseName = pa.Backend.ServiceName
|
||||
}
|
||||
|
||||
entryPoints := getSliceStringValue(i.Annotations, annotationKubernetesFrontendEntryPoints)
|
||||
if len(entryPoints) > 0 {
|
||||
baseName = strings.Join(entryPoints, "-") + "_" + baseName
|
||||
}
|
||||
|
||||
if priority > 0 {
|
||||
baseName = strconv.Itoa(priority) + "-" + baseName
|
||||
}
|
||||
@@ -277,7 +282,6 @@ func (p *Provider) loadIngresses(k8sClient Client) (*types.Configuration, error)
|
||||
|
||||
passHostHeader := getBoolValue(i.Annotations, annotationKubernetesPreserveHost, !p.DisablePassHostHeaders)
|
||||
passTLSCert := getBoolValue(i.Annotations, annotationKubernetesPassTLSCert, p.EnablePassTLSCert) // Deprecated
|
||||
entryPoints := getSliceStringValue(i.Annotations, annotationKubernetesFrontendEntryPoints)
|
||||
|
||||
frontend = &types.Frontend{
|
||||
Backend: baseName,
|
||||
@@ -502,40 +506,65 @@ func (p *Provider) addGlobalBackend(cl Client, i *extensionsv1beta1.Ingress, tem
|
||||
templateObjects.Backends[defaultBackendName].Buffering = getBuffering(service)
|
||||
templateObjects.Backends[defaultBackendName].ResponseForwarding = getResponseForwarding(service)
|
||||
|
||||
endpoints, exists, err := cl.GetEndpoints(service.Namespace, service.Name)
|
||||
if err != nil {
|
||||
return fmt.Errorf("error retrieving endpoint information from k8s API %s/%s: %v", service.Namespace, service.Name, err)
|
||||
}
|
||||
if !exists {
|
||||
return fmt.Errorf("endpoints not found for %s/%s", service.Namespace, service.Name)
|
||||
}
|
||||
if len(endpoints.Subsets) == 0 {
|
||||
return fmt.Errorf("endpoints not available for %s/%s", service.Namespace, service.Name)
|
||||
}
|
||||
for _, port := range service.Spec.Ports {
|
||||
|
||||
for _, subset := range endpoints.Subsets {
|
||||
endpointPort := endpointPortNumber(corev1.ServicePort{Protocol: "TCP", Port: int32(i.Spec.Backend.ServicePort.IntValue())}, subset.Ports)
|
||||
if endpointPort == 0 {
|
||||
// endpoint port does not match service.
|
||||
continue
|
||||
}
|
||||
// We have to treat external-name service differently here b/c it doesn't have any endpoints
|
||||
if service.Spec.Type == corev1.ServiceTypeExternalName {
|
||||
|
||||
protocol := "http"
|
||||
for _, address := range subset.Addresses {
|
||||
if endpointPort == 443 || strings.HasPrefix(i.Spec.Backend.ServicePort.String(), "https") {
|
||||
protocol := "http"
|
||||
if port.Port == 443 || strings.HasPrefix(port.Name, "https") {
|
||||
protocol = "https"
|
||||
}
|
||||
|
||||
url := fmt.Sprintf("%s://%s", protocol, net.JoinHostPort(address.IP, strconv.FormatInt(int64(endpointPort), 10)))
|
||||
name := url
|
||||
if address.TargetRef != nil && address.TargetRef.Name != "" {
|
||||
name = address.TargetRef.Name
|
||||
url := protocol + "://" + service.Spec.ExternalName
|
||||
if port.Port != 443 && port.Port != 80 {
|
||||
url = fmt.Sprintf("%s:%d", url, port.Port)
|
||||
}
|
||||
|
||||
templateObjects.Backends[defaultBackendName].Servers[name] = types.Server{
|
||||
templateObjects.Backends[defaultBackendName].Servers[url] = types.Server{
|
||||
URL: url,
|
||||
Weight: label.DefaultWeight,
|
||||
}
|
||||
|
||||
} else {
|
||||
|
||||
endpoints, exists, err := cl.GetEndpoints(service.Namespace, service.Name)
|
||||
if err != nil {
|
||||
return fmt.Errorf("error retrieving endpoint information from k8s API %s/%s: %v", service.Namespace, service.Name, err)
|
||||
}
|
||||
if !exists {
|
||||
return fmt.Errorf("endpoints not found for %s/%s", service.Namespace, service.Name)
|
||||
}
|
||||
if len(endpoints.Subsets) == 0 {
|
||||
return fmt.Errorf("endpoints not available for %s/%s", service.Namespace, service.Name)
|
||||
}
|
||||
|
||||
for _, subset := range endpoints.Subsets {
|
||||
|
||||
endpointPort := endpointPortNumber(port, subset.Ports)
|
||||
if endpointPort == 0 {
|
||||
// endpoint port does not match service.
|
||||
continue
|
||||
}
|
||||
|
||||
protocol := "http"
|
||||
for _, address := range subset.Addresses {
|
||||
if endpointPort == 443 || strings.HasPrefix(i.Spec.Backend.ServicePort.String(), "https") {
|
||||
protocol = "https"
|
||||
}
|
||||
|
||||
url := fmt.Sprintf("%s://%s", protocol, net.JoinHostPort(address.IP, strconv.FormatInt(int64(endpointPort), 10)))
|
||||
name := url
|
||||
if address.TargetRef != nil && address.TargetRef.Name != "" {
|
||||
name = address.TargetRef.Name
|
||||
}
|
||||
|
||||
templateObjects.Backends[defaultBackendName].Servers[name] = types.Server{
|
||||
URL: url,
|
||||
Weight: label.DefaultWeight,
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -945,8 +974,12 @@ func getFrontendRedirect(i *extensionsv1beta1.Ingress, baseName, path string) *t
|
||||
permanent := getBoolValue(i.Annotations, annotationKubernetesRedirectPermanent, false)
|
||||
|
||||
if appRoot := getStringValue(i.Annotations, annotationKubernetesAppRoot, ""); appRoot != "" && (path == "/" || path == "") {
|
||||
regex := fmt.Sprintf("%s$", baseName)
|
||||
if path == "" {
|
||||
regex = fmt.Sprintf("%s/$", baseName)
|
||||
}
|
||||
return &types.Redirect{
|
||||
Regex: fmt.Sprintf("%s$", baseName),
|
||||
Regex: regex,
|
||||
Replacement: fmt.Sprintf("%s/%s", strings.TrimRight(baseName, "/"), strings.TrimLeft(appRoot, "/")),
|
||||
Permanent: permanent,
|
||||
}
|
||||
|
||||
@@ -284,6 +284,59 @@ func TestLoadIngresses(t *testing.T) {
|
||||
assert.Equal(t, expected, actual)
|
||||
}
|
||||
|
||||
func TestLoadGlobalIngressWithExternalName(t *testing.T) {
|
||||
ingresses := []*extensionsv1beta1.Ingress{
|
||||
buildIngress(
|
||||
iNamespace("testing"),
|
||||
iSpecBackends(iSpecBackend(iIngressBackend("service1", intstr.FromInt(80)))),
|
||||
),
|
||||
}
|
||||
|
||||
services := []*corev1.Service{
|
||||
buildService(
|
||||
sName("service1"),
|
||||
sNamespace("testing"),
|
||||
sUID("1"),
|
||||
sSpec(
|
||||
sType("ExternalName"),
|
||||
sExternalName("some-external-name"),
|
||||
sPorts(sPort(80, ""))),
|
||||
),
|
||||
}
|
||||
|
||||
watchChan := make(chan interface{})
|
||||
client := clientMock{
|
||||
ingresses: ingresses,
|
||||
services: services,
|
||||
watchChan: watchChan,
|
||||
}
|
||||
provider := Provider{}
|
||||
|
||||
actual, err := provider.loadIngresses(client)
|
||||
require.NoError(t, err, "error loading ingresses")
|
||||
|
||||
expected := buildConfiguration(
|
||||
backends(
|
||||
backend("global-default-backend",
|
||||
lbMethod("wrr"),
|
||||
servers(
|
||||
server("http://some-external-name", weight(1)),
|
||||
),
|
||||
),
|
||||
),
|
||||
frontends(
|
||||
frontend("global-default-backend",
|
||||
frontendName("global-default-frontend"),
|
||||
passHostHeader(),
|
||||
routes(
|
||||
route("/", "PathPrefix:/"),
|
||||
),
|
||||
),
|
||||
),
|
||||
)
|
||||
assert.Equal(t, expected, actual)
|
||||
}
|
||||
|
||||
func TestLoadGlobalIngressWithPortNumbers(t *testing.T) {
|
||||
ingresses := []*extensionsv1beta1.Ingress{
|
||||
buildIngress(
|
||||
@@ -374,7 +427,7 @@ func TestLoadGlobalIngressWithHttpsPortNames(t *testing.T) {
|
||||
eUID("1"),
|
||||
subset(
|
||||
eAddresses(eAddress("10.10.0.1")),
|
||||
ePorts(ePort(8080, ""))),
|
||||
ePorts(ePort(8080, "https-global"))),
|
||||
),
|
||||
}
|
||||
|
||||
@@ -1456,7 +1509,7 @@ rateset:
|
||||
server("http://example.com", weight(1))),
|
||||
lbMethod("wrr"),
|
||||
),
|
||||
backend("other/",
|
||||
backend("http-https_other/",
|
||||
servers(
|
||||
server("http://example.com", weight(1)),
|
||||
server("http://example.com", weight(1))),
|
||||
@@ -1564,7 +1617,7 @@ rateset:
|
||||
route("/stuff", "PathPrefix:/stuff"),
|
||||
route("other", "Host:other")),
|
||||
),
|
||||
frontend("other/",
|
||||
frontend("http-https_other/",
|
||||
passHostHeader(),
|
||||
entryPoints("http", "https"),
|
||||
routes(
|
||||
@@ -1686,7 +1739,7 @@ rateset:
|
||||
),
|
||||
frontend("root3",
|
||||
passHostHeader(),
|
||||
redirectRegex("root3$", "root3/root"),
|
||||
redirectRegex("root3/$", "root3/root"),
|
||||
routes(
|
||||
route("root3", "Host:root3"),
|
||||
),
|
||||
@@ -2783,24 +2836,24 @@ func TestTLSSecretLoad(t *testing.T) {
|
||||
|
||||
expected := buildConfiguration(
|
||||
backends(
|
||||
backend("example.com",
|
||||
backend("ep1-ep2_example.com",
|
||||
servers(),
|
||||
lbMethod("wrr"),
|
||||
),
|
||||
backend("example.org",
|
||||
backend("ep1-ep2_example.org",
|
||||
servers(),
|
||||
lbMethod("wrr"),
|
||||
),
|
||||
),
|
||||
frontends(
|
||||
frontend("example.com",
|
||||
frontend("ep1-ep2_example.com",
|
||||
entryPoints("ep1", "ep2"),
|
||||
passHostHeader(),
|
||||
routes(
|
||||
route("example.com", "Host:example.com"),
|
||||
),
|
||||
),
|
||||
frontend("example.org",
|
||||
frontend("ep1-ep2_example.org",
|
||||
entryPoints("ep1", "ep2"),
|
||||
passHostHeader(),
|
||||
routes(
|
||||
|
||||
@@ -8,7 +8,7 @@ import (
|
||||
"github.com/stretchr/testify/require"
|
||||
corev1 "k8s.io/api/core/v1"
|
||||
extensionsv1beta1 "k8s.io/api/extensions/v1beta1"
|
||||
"k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/util/intstr"
|
||||
)
|
||||
|
||||
|
||||
@@ -71,9 +71,18 @@ func (p *Provider) apiProvide(configurationChan chan<- types.ConfigMessage, pool
|
||||
}
|
||||
|
||||
ctx := context.Background()
|
||||
var stacks = listRancherStacks(rancherClient)
|
||||
var services = listRancherServices(rancherClient)
|
||||
var container = listRancherContainer(rancherClient)
|
||||
stacks, err := listRancherStacks(rancherClient)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
services, err := listRancherServices(rancherClient)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
container, err := listRancherContainer(rancherClient)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
var rancherData = parseAPISourcedRancherData(stacks, services, container)
|
||||
|
||||
@@ -94,20 +103,29 @@ func (p *Provider) apiProvide(configurationChan chan<- types.ConfigMessage, pool
|
||||
|
||||
if errAPI != nil {
|
||||
log.Errorf("Cannot establish connection: %+v, Rancher API return: %+v; Skipping refresh Data from Rancher API.", errAPI, checkAPI)
|
||||
} else {
|
||||
log.Debugf("Refreshing new Data from Rancher API")
|
||||
stacks := listRancherStacks(rancherClient)
|
||||
services := listRancherServices(rancherClient)
|
||||
container := listRancherContainer(rancherClient)
|
||||
continue
|
||||
}
|
||||
log.Debugf("Refreshing new Data from Rancher API")
|
||||
stacks, err = listRancherStacks(rancherClient)
|
||||
if err != nil {
|
||||
continue
|
||||
}
|
||||
services, err = listRancherServices(rancherClient)
|
||||
if err != nil {
|
||||
continue
|
||||
}
|
||||
container, err = listRancherContainer(rancherClient)
|
||||
if err != nil {
|
||||
continue
|
||||
}
|
||||
|
||||
rancherData := parseAPISourcedRancherData(stacks, services, container)
|
||||
rancherData := parseAPISourcedRancherData(stacks, services, container)
|
||||
|
||||
configuration := p.buildConfiguration(rancherData)
|
||||
if configuration != nil {
|
||||
configurationChan <- types.ConfigMessage{
|
||||
ProviderName: "rancher",
|
||||
Configuration: configuration,
|
||||
}
|
||||
configuration := p.buildConfiguration(rancherData)
|
||||
if configuration != nil {
|
||||
configurationChan <- types.ConfigMessage{
|
||||
ProviderName: "rancher",
|
||||
Configuration: configuration,
|
||||
}
|
||||
}
|
||||
case <-stop:
|
||||
@@ -133,7 +151,7 @@ func (p *Provider) apiProvide(configurationChan chan<- types.ConfigMessage, pool
|
||||
return nil
|
||||
}
|
||||
|
||||
func listRancherStacks(client *rancher.RancherClient) []*rancher.Stack {
|
||||
func listRancherStacks(client *rancher.RancherClient) ([]*rancher.Stack, error) {
|
||||
|
||||
var stackList []*rancher.Stack
|
||||
|
||||
@@ -147,10 +165,10 @@ func listRancherStacks(client *rancher.RancherClient) []*rancher.Stack {
|
||||
stackList = append(stackList, &stacks.Data[k])
|
||||
}
|
||||
|
||||
return stackList
|
||||
return stackList, err
|
||||
}
|
||||
|
||||
func listRancherServices(client *rancher.RancherClient) []*rancher.Service {
|
||||
func listRancherServices(client *rancher.RancherClient) ([]*rancher.Service, error) {
|
||||
|
||||
var servicesList []*rancher.Service
|
||||
|
||||
@@ -164,10 +182,10 @@ func listRancherServices(client *rancher.RancherClient) []*rancher.Service {
|
||||
servicesList = append(servicesList, &services.Data[k])
|
||||
}
|
||||
|
||||
return servicesList
|
||||
return servicesList, err
|
||||
}
|
||||
|
||||
func listRancherContainer(client *rancher.RancherClient) []*rancher.Container {
|
||||
func listRancherContainer(client *rancher.RancherClient) ([]*rancher.Container, error) {
|
||||
|
||||
var containerList []*rancher.Container
|
||||
|
||||
@@ -175,6 +193,7 @@ func listRancherContainer(client *rancher.RancherClient) []*rancher.Container {
|
||||
|
||||
if err != nil {
|
||||
log.Errorf("Cannot get Provider Services %+v", err)
|
||||
return containerList, err
|
||||
}
|
||||
|
||||
valid := true
|
||||
@@ -195,7 +214,7 @@ func listRancherContainer(client *rancher.RancherClient) []*rancher.Container {
|
||||
}
|
||||
}
|
||||
|
||||
return containerList
|
||||
return containerList, err
|
||||
}
|
||||
|
||||
func parseAPISourcedRancherData(stacks []*rancher.Stack, services []*rancher.Service, containers []*rancher.Container) []rancherData {
|
||||
|
||||
@@ -1,19 +0,0 @@
|
||||
#!/usr/bin/env bash
|
||||
set -e
|
||||
|
||||
if [ -n "$TRAVIS_COMMIT" ]; then
|
||||
echo "Deploying PR..."
|
||||
else
|
||||
echo "Skipping deploy PR"
|
||||
exit 0
|
||||
fi
|
||||
|
||||
# create docker image containous/traefik
|
||||
echo "Updating docker containous/traefik image..."
|
||||
docker login -u $DOCKER_USER -p $DOCKER_PASS
|
||||
docker tag containous/traefik containous/traefik:${TRAVIS_COMMIT}
|
||||
docker push containous/traefik:${TRAVIS_COMMIT}
|
||||
docker tag containous/traefik containous/traefik:experimental
|
||||
docker push containous/traefik:experimental
|
||||
|
||||
echo "Deployed"
|
||||
@@ -22,7 +22,7 @@ ssh-add ~/.ssh/traefiker_rsa
|
||||
echo "Updating traefik-library-imag repo..."
|
||||
git clone git@github.com:containous/traefik-library-image.git
|
||||
cd traefik-library-image
|
||||
./update.sh $VERSION
|
||||
./updatev1.sh $VERSION
|
||||
git add -A
|
||||
echo $VERSION | git commit --file -
|
||||
echo $VERSION | git tag -a $VERSION --file -
|
||||
|
||||
@@ -33,9 +33,9 @@ import (
|
||||
traefiktls "github.com/containous/traefik/tls"
|
||||
"github.com/containous/traefik/types"
|
||||
"github.com/containous/traefik/whitelist"
|
||||
"github.com/go-acme/lego/challenge/tlsalpn01"
|
||||
"github.com/sirupsen/logrus"
|
||||
"github.com/urfave/negroni"
|
||||
"github.com/xenolf/lego/challenge/tlsalpn01"
|
||||
)
|
||||
|
||||
var httpServerLogger = stdlog.New(log.WriterLevel(logrus.DebugLevel), "", 0)
|
||||
|
||||
@@ -211,6 +211,7 @@ func (s *Server) getRoundTripper(entryPointName string, passTLSCert bool, tls *t
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to create TLSClientConfig: %v", err)
|
||||
}
|
||||
tlsConfig.InsecureSkipVerify = s.globalConfiguration.InsecureSkipVerify
|
||||
|
||||
transport, err := createHTTPTransport(s.globalConfiguration)
|
||||
if err != nil {
|
||||
|
||||
@@ -112,6 +112,15 @@ func (s *Server) buildMiddlewares(frontendName string, frontend *types.Frontend,
|
||||
middle = append(middle, handler)
|
||||
}
|
||||
|
||||
// TLSClientHeaders
|
||||
tlsClientHeadersMiddleware := middlewares.NewTLSClientHeaders(frontend)
|
||||
if tlsClientHeadersMiddleware != nil {
|
||||
log.Debugf("Adding TLSClientHeaders middleware for frontend %s", frontendName)
|
||||
|
||||
handler := s.tracingMiddleware.NewNegroniHandlerWrapper("TLSClientHeaders", tlsClientHeadersMiddleware, false)
|
||||
middle = append(middle, handler)
|
||||
}
|
||||
|
||||
// Authentication
|
||||
if frontend.Auth != nil {
|
||||
authMiddleware, err := mauth.NewAuthenticator(frontend.Auth, s.tracingMiddleware)
|
||||
@@ -123,15 +132,6 @@ func (s *Server) buildMiddlewares(frontendName string, frontend *types.Frontend,
|
||||
middle = append(middle, handler)
|
||||
}
|
||||
|
||||
// TLSClientHeaders
|
||||
tlsClientHeadersMiddleware := middlewares.NewTLSClientHeaders(frontend)
|
||||
if tlsClientHeadersMiddleware != nil {
|
||||
log.Debugf("Adding TLSClientHeaders middleware for frontend %s", frontendName)
|
||||
|
||||
handler := s.tracingMiddleware.NewNegroniHandlerWrapper("TLSClientHeaders", tlsClientHeadersMiddleware, false)
|
||||
middle = append(middle, handler)
|
||||
}
|
||||
|
||||
return middle, buildModifyResponse(secureMiddleware, headerMiddleware), postConfig, nil
|
||||
}
|
||||
|
||||
|
||||
53
vendor/github.com/akamai/AkamaiOPEN-edgegrid-golang/client-v1/client.go
generated
vendored
53
vendor/github.com/akamai/AkamaiOPEN-edgegrid-golang/client-v1/client.go
generated
vendored
@@ -6,8 +6,11 @@ import (
|
||||
"errors"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"mime/multipart"
|
||||
"net/http"
|
||||
"net/url"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"runtime"
|
||||
"strings"
|
||||
|
||||
@@ -16,7 +19,7 @@ import (
|
||||
)
|
||||
|
||||
var (
|
||||
libraryVersion = "0.6.0"
|
||||
libraryVersion = "0.6.2"
|
||||
// UserAgent is the User-Agent value sent for all requests
|
||||
UserAgent = "Akamai-Open-Edgegrid-golang/" + libraryVersion + " golang/" + strings.TrimPrefix(runtime.Version(), "go")
|
||||
// Client is the *http.Client to use
|
||||
@@ -61,13 +64,21 @@ func NewRequest(config edgegrid.Config, method, path string, body io.Reader) (*h
|
||||
// NewJSONRequest creates an HTTP request that can be sent to the Akamai APIs with a JSON body
|
||||
// The JSON body is encoded and the Content-Type/Accept headers are set automatically.
|
||||
func NewJSONRequest(config edgegrid.Config, method, path string, body interface{}) (*http.Request, error) {
|
||||
jsonBody, err := jsonhooks.Marshal(body)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
var req *http.Request
|
||||
var err error
|
||||
|
||||
if body != nil {
|
||||
jsonBody, err := jsonhooks.Marshal(body)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
buf := bytes.NewReader(jsonBody)
|
||||
req, err = NewRequest(config, method, path, buf)
|
||||
} else {
|
||||
req, err = NewRequest(config, method, path, nil)
|
||||
}
|
||||
|
||||
buf := bytes.NewReader(jsonBody)
|
||||
req, err := NewRequest(config, method, path, buf)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@@ -78,6 +89,36 @@ func NewJSONRequest(config edgegrid.Config, method, path string, body interface{
|
||||
return req, nil
|
||||
}
|
||||
|
||||
// NewMultiPartFormDataRequest creates an HTTP request that uploads a file to the Akamai API
|
||||
func NewMultiPartFormDataRequest(config edgegrid.Config, uriPath, filePath string, otherFormParams map[string]string) (*http.Request, error) {
|
||||
file, err := os.Open(filePath)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
defer file.Close()
|
||||
|
||||
body := &bytes.Buffer{}
|
||||
writer := multipart.NewWriter(body)
|
||||
// TODO: make this field name configurable
|
||||
part, err := writer.CreateFormFile("importFile", filepath.Base(filePath))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
_, err = io.Copy(part, file)
|
||||
|
||||
for key, val := range otherFormParams {
|
||||
_ = writer.WriteField(key, val)
|
||||
}
|
||||
err = writer.Close()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
req, err := NewRequest(config, "POST", uriPath, body)
|
||||
req.Header.Set("Content-Type", writer.FormDataContentType())
|
||||
return req, err
|
||||
}
|
||||
|
||||
// Do performs a given HTTP Request, signed with the Akamai OPEN Edgegrid
|
||||
// Authorization header. An edgegrid.Response or an error is returned.
|
||||
func Do(config edgegrid.Config, req *http.Request) (*http.Response, error) {
|
||||
|
||||
47
vendor/github.com/akamai/AkamaiOPEN-edgegrid-golang/client-v1/errors.go
generated
vendored
47
vendor/github.com/akamai/AkamaiOPEN-edgegrid-golang/client-v1/errors.go
generated
vendored
@@ -12,22 +12,42 @@ import (
|
||||
// APIError exposes an Akamai OPEN Edgegrid Error
|
||||
type APIError struct {
|
||||
error
|
||||
Type string `json:"type"`
|
||||
Title string `json:"title"`
|
||||
Status int `json:"status"`
|
||||
Detail string `json:"detail"`
|
||||
Instance string `json:"instance"`
|
||||
Method string `json:"method"`
|
||||
ServerIP string `json:"serverIp"`
|
||||
ClientIP string `json:"clientIp"`
|
||||
RequestID string `json:"requestId"`
|
||||
RequestTime string `json:"requestTime"`
|
||||
Response *http.Response `json:"-"`
|
||||
RawBody string `json:"-"`
|
||||
Type string `json:"type"`
|
||||
Title string `json:"title"`
|
||||
Status int `json:"status"`
|
||||
Detail string `json:"detail"`
|
||||
Errors []APIErrorDetail `json:"errors"`
|
||||
Problems []APIErrorDetail `json:"problems"`
|
||||
Instance string `json:"instance"`
|
||||
Method string `json:"method"`
|
||||
ServerIP string `json:"serverIp"`
|
||||
ClientIP string `json:"clientIp"`
|
||||
RequestID string `json:"requestId"`
|
||||
RequestTime string `json:"requestTime"`
|
||||
Response *http.Response `json:"-"`
|
||||
RawBody string `json:"-"`
|
||||
}
|
||||
|
||||
type APIErrorDetail struct {
|
||||
Type string `json:"type"`
|
||||
Title string `json:"title"`
|
||||
Detail string `json:"detail"`
|
||||
RejectedValue string `json:"rejectedValue"`
|
||||
}
|
||||
|
||||
func (error APIError) Error() string {
|
||||
return strings.TrimSpace(fmt.Sprintf("API Error: %d %s %s More Info %s", error.Status, error.Title, error.Detail, error.Type))
|
||||
var errorDetails string
|
||||
if len(error.Errors) > 0 {
|
||||
for _, e := range error.Errors {
|
||||
errorDetails = fmt.Sprintf("%s \n %s", errorDetails, e)
|
||||
}
|
||||
}
|
||||
if len(error.Problems) > 0 {
|
||||
for _, e := range error.Problems {
|
||||
errorDetails = fmt.Sprintf("%s \n %s", errorDetails, e)
|
||||
}
|
||||
}
|
||||
return strings.TrimSpace(fmt.Sprintf("API Error: %d %s %s More Info %s\n %s", error.Status, error.Title, error.Detail, error.Type, errorDetails))
|
||||
}
|
||||
|
||||
// NewAPIError creates a new API error based on a Response,
|
||||
@@ -45,7 +65,6 @@ func NewAPIError(response *http.Response) APIError {
|
||||
// other purposes.
|
||||
func NewAPIErrorFromBody(response *http.Response, body []byte) APIError {
|
||||
error := APIError{}
|
||||
|
||||
if err := jsonhooks.Unmarshal(body, &error); err == nil {
|
||||
error.Status = response.StatusCode
|
||||
error.Title = response.Status
|
||||
|
||||
19
vendor/github.com/akamai/AkamaiOPEN-edgegrid-golang/configdns-v1/record.go
generated
vendored
19
vendor/github.com/akamai/AkamaiOPEN-edgegrid-golang/configdns-v1/record.go
generated
vendored
@@ -1323,15 +1323,16 @@ func (record *RrsigRecord) ToMap() map[string]interface{} {
|
||||
}
|
||||
|
||||
type SoaRecord struct {
|
||||
fieldMap []string `json:"-"`
|
||||
TTL int `json:"ttl,omitempty"`
|
||||
Originserver string `json:"originserver,omitempty"`
|
||||
Contact string `json:"contact,omitempty"`
|
||||
Serial uint `json:"serial,omitempty"`
|
||||
Refresh int `json:"refresh,omitempty"`
|
||||
Retry int `json:"retry,omitempty"`
|
||||
Expire int `json:"expire,omitempty"`
|
||||
Minimum uint `json:"minimum,omitempty"`
|
||||
fieldMap []string `json:"-"`
|
||||
originalSerial uint `json:"-"`
|
||||
TTL int `json:"ttl,omitempty"`
|
||||
Originserver string `json:"originserver,omitempty"`
|
||||
Contact string `json:"contact,omitempty"`
|
||||
Serial uint `json:"serial,omitempty"`
|
||||
Refresh int `json:"refresh,omitempty"`
|
||||
Retry int `json:"retry,omitempty"`
|
||||
Expire int `json:"expire,omitempty"`
|
||||
Minimum uint `json:"minimum,omitempty"`
|
||||
}
|
||||
|
||||
func NewSoaRecord() *SoaRecord {
|
||||
|
||||
34
vendor/github.com/akamai/AkamaiOPEN-edgegrid-golang/configdns-v1/zone.go
generated
vendored
34
vendor/github.com/akamai/AkamaiOPEN-edgegrid-golang/configdns-v1/zone.go
generated
vendored
@@ -82,7 +82,7 @@ func GetZone(hostname string) (*Zone, error) {
|
||||
} else if res.StatusCode == 404 {
|
||||
return nil, &ZoneError{zoneName: hostname}
|
||||
} else {
|
||||
err = client.BodyJSON(res, &zone)
|
||||
err = client.BodyJSON(res, zone)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@@ -762,11 +762,18 @@ func (zone *Zone) removeTxtRecord(record *TxtRecord) error {
|
||||
return errors.New("Txt Record not found")
|
||||
}
|
||||
|
||||
func (zone *Zone) PreMarshalJSON() error {
|
||||
func (zone *Zone) PostUnmarshalJSON() error {
|
||||
if zone.Zone.Soa.Serial > 0 {
|
||||
zone.Zone.Soa.Serial = zone.Zone.Soa.Serial + 1
|
||||
} else {
|
||||
zone.Zone.Soa.originalSerial = zone.Zone.Soa.Serial
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (zone *Zone) PreMarshalJSON() error {
|
||||
if zone.Zone.Soa.Serial == 0 {
|
||||
zone.Zone.Soa.Serial = uint(time.Now().Unix())
|
||||
} else if zone.Zone.Soa.Serial == zone.Zone.Soa.originalSerial {
|
||||
zone.Zone.Soa.Serial = zone.Zone.Soa.Serial + 1
|
||||
}
|
||||
return nil
|
||||
}
|
||||
@@ -786,21 +793,24 @@ func (zone *Zone) validateCnames() (bool, []name) {
|
||||
}
|
||||
|
||||
func (zone *Zone) removeCnameName(host string) {
|
||||
for i, v := range cnameNames {
|
||||
if v.name == host {
|
||||
r := cnameNames[:i]
|
||||
cnameNames = append(r, cnameNames[i+1:]...)
|
||||
var ncn []name
|
||||
for _, v := range cnameNames {
|
||||
if v.name != host {
|
||||
ncn =append(ncn, v)
|
||||
}
|
||||
}
|
||||
cnameNames = ncn
|
||||
}
|
||||
|
||||
|
||||
func (zone *Zone) removeNonCnameName(host string) {
|
||||
for i, v := range nonCnameNames {
|
||||
if v.name == host {
|
||||
r := nonCnameNames[:i]
|
||||
nonCnameNames = append(r, nonCnameNames[i+1:]...)
|
||||
var ncn []name
|
||||
for _, v := range nonCnameNames {
|
||||
if v.name != host {
|
||||
ncn =append(ncn, v)
|
||||
}
|
||||
}
|
||||
nonCnameNames = ncn
|
||||
}
|
||||
|
||||
func (zone *Zone) FindRecords(recordType string, options map[string]interface{}) []DNSRecord {
|
||||
|
||||
4
vendor/github.com/akamai/AkamaiOPEN-edgegrid-golang/edgegrid/config.go
generated
vendored
4
vendor/github.com/akamai/AkamaiOPEN-edgegrid-golang/edgegrid/config.go
generated
vendored
@@ -7,7 +7,7 @@ import (
|
||||
"strings"
|
||||
|
||||
"github.com/go-ini/ini"
|
||||
"gopkg.in/mattes/go-expand-tilde.v1"
|
||||
"github.com/mitchellh/go-homedir"
|
||||
)
|
||||
|
||||
// Config struct provides all the necessary fields to
|
||||
@@ -86,7 +86,7 @@ func InitEdgeRc(filepath string, section string) (Config, error) {
|
||||
// Tilde seems to be not working when passing ~/.edgerc as file
|
||||
// Takes current user and use home dir instead
|
||||
|
||||
path, err := tilde.Expand(filepath)
|
||||
path, err := homedir.Expand(filepath)
|
||||
|
||||
if err != nil {
|
||||
return c, fmt.Errorf(errorMap[ErrHomeDirNotFound], err)
|
||||
|
||||
6
vendor/github.com/akamai/AkamaiOPEN-edgegrid-golang/edgegrid/signer.go
generated
vendored
6
vendor/github.com/akamai/AkamaiOPEN-edgegrid-golang/edgegrid/signer.go
generated
vendored
@@ -14,8 +14,8 @@ import (
|
||||
"time"
|
||||
"unicode"
|
||||
|
||||
"github.com/google/uuid"
|
||||
log "github.com/sirupsen/logrus"
|
||||
"github.com/tuvistavie/securerandom"
|
||||
)
|
||||
|
||||
const defaultSection = "DEFAULT"
|
||||
@@ -49,12 +49,12 @@ func makeEdgeTimeStamp() string {
|
||||
// It is a random string used to detect replayed request messages.
|
||||
// A GUID is recommended.
|
||||
func createNonce() string {
|
||||
uuid, err := securerandom.Uuid()
|
||||
uuid, err := uuid.NewRandom()
|
||||
if err != nil {
|
||||
log.Errorf(errorMap[ErrUUIDGenerateFailed], err)
|
||||
return ""
|
||||
}
|
||||
return uuid
|
||||
return uuid.String()
|
||||
}
|
||||
|
||||
func stringMinifier(in string) (out string) {
|
||||
|
||||
2
vendor/github.com/akamai/AkamaiOPEN-edgegrid-golang/jsonhooks-v1/jsonhooks.go
generated
vendored
2
vendor/github.com/akamai/AkamaiOPEN-edgegrid-golang/jsonhooks-v1/jsonhooks.go
generated
vendored
@@ -44,7 +44,7 @@ type PreJSONMarshaler interface {
|
||||
// ImplementsPreJSONMarshaler checks for support for the PreMarshalJSON pre-hook
|
||||
func ImplementsPreJSONMarshaler(v interface{}) bool {
|
||||
value := reflect.ValueOf(v)
|
||||
if value.Kind() == reflect.Ptr && value.IsNil() {
|
||||
if !value.IsValid() {
|
||||
return false
|
||||
}
|
||||
|
||||
|
||||
110
vendor/github.com/aliyun/alibaba-cloud-sdk-go/services/push/push_notice_to_android.go
generated
vendored
110
vendor/github.com/aliyun/alibaba-cloud-sdk-go/services/push/push_notice_to_android.go
generated
vendored
@@ -1,110 +0,0 @@
|
||||
package push
|
||||
|
||||
//Licensed under the Apache License, Version 2.0 (the "License");
|
||||
//you may not use this file except in compliance with the License.
|
||||
//You may obtain a copy of the License at
|
||||
//
|
||||
//http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
//Unless required by applicable law or agreed to in writing, software
|
||||
//distributed under the License is distributed on an "AS IS" BASIS,
|
||||
//WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
//See the License for the specific language governing permissions and
|
||||
//limitations under the License.
|
||||
//
|
||||
// Code generated by Alibaba Cloud SDK Code Generator.
|
||||
// Changes may cause incorrect behavior and will be lost if the code is regenerated.
|
||||
|
||||
import (
|
||||
"github.com/aliyun/alibaba-cloud-sdk-go/sdk/requests"
|
||||
"github.com/aliyun/alibaba-cloud-sdk-go/sdk/responses"
|
||||
)
|
||||
|
||||
// PushNoticeToAndroid invokes the push.PushNoticeToAndroid API synchronously
|
||||
// api document: https://help.aliyun.com/api/push/pushnoticetoandroid.html
|
||||
func (client *Client) PushNoticeToAndroid(request *PushNoticeToAndroidRequest) (response *PushNoticeToAndroidResponse, err error) {
|
||||
response = CreatePushNoticeToAndroidResponse()
|
||||
err = client.DoAction(request, response)
|
||||
return
|
||||
}
|
||||
|
||||
// PushNoticeToAndroidWithChan invokes the push.PushNoticeToAndroid API asynchronously
|
||||
// api document: https://help.aliyun.com/api/push/pushnoticetoandroid.html
|
||||
// asynchronous document: https://help.aliyun.com/document_detail/66220.html
|
||||
func (client *Client) PushNoticeToAndroidWithChan(request *PushNoticeToAndroidRequest) (<-chan *PushNoticeToAndroidResponse, <-chan error) {
|
||||
responseChan := make(chan *PushNoticeToAndroidResponse, 1)
|
||||
errChan := make(chan error, 1)
|
||||
err := client.AddAsyncTask(func() {
|
||||
defer close(responseChan)
|
||||
defer close(errChan)
|
||||
response, err := client.PushNoticeToAndroid(request)
|
||||
if err != nil {
|
||||
errChan <- err
|
||||
} else {
|
||||
responseChan <- response
|
||||
}
|
||||
})
|
||||
if err != nil {
|
||||
errChan <- err
|
||||
close(responseChan)
|
||||
close(errChan)
|
||||
}
|
||||
return responseChan, errChan
|
||||
}
|
||||
|
||||
// PushNoticeToAndroidWithCallback invokes the push.PushNoticeToAndroid API asynchronously
|
||||
// api document: https://help.aliyun.com/api/push/pushnoticetoandroid.html
|
||||
// asynchronous document: https://help.aliyun.com/document_detail/66220.html
|
||||
func (client *Client) PushNoticeToAndroidWithCallback(request *PushNoticeToAndroidRequest, callback func(response *PushNoticeToAndroidResponse, err error)) <-chan int {
|
||||
result := make(chan int, 1)
|
||||
err := client.AddAsyncTask(func() {
|
||||
var response *PushNoticeToAndroidResponse
|
||||
var err error
|
||||
defer close(result)
|
||||
response, err = client.PushNoticeToAndroid(request)
|
||||
callback(response, err)
|
||||
result <- 1
|
||||
})
|
||||
if err != nil {
|
||||
defer close(result)
|
||||
callback(nil, err)
|
||||
result <- 0
|
||||
}
|
||||
return result
|
||||
}
|
||||
|
||||
// PushNoticeToAndroidRequest is the request struct for api PushNoticeToAndroid
|
||||
type PushNoticeToAndroidRequest struct {
|
||||
*requests.RpcRequest
|
||||
AppKey requests.Integer `position:"Query" name:"AppKey"`
|
||||
Target string `position:"Query" name:"Target"`
|
||||
TargetValue string `position:"Query" name:"TargetValue"`
|
||||
Title string `position:"Query" name:"Title"`
|
||||
Body string `position:"Query" name:"Body"`
|
||||
JobKey string `position:"Query" name:"JobKey"`
|
||||
ExtParameters string `position:"Query" name:"ExtParameters"`
|
||||
}
|
||||
|
||||
// PushNoticeToAndroidResponse is the response struct for api PushNoticeToAndroid
|
||||
type PushNoticeToAndroidResponse struct {
|
||||
*responses.BaseResponse
|
||||
RequestId string `json:"RequestId" xml:"RequestId"`
|
||||
MessageId string `json:"MessageId" xml:"MessageId"`
|
||||
}
|
||||
|
||||
// CreatePushNoticeToAndroidRequest creates a request to invoke PushNoticeToAndroid API
|
||||
func CreatePushNoticeToAndroidRequest() (request *PushNoticeToAndroidRequest) {
|
||||
request = &PushNoticeToAndroidRequest{
|
||||
RpcRequest: &requests.RpcRequest{},
|
||||
}
|
||||
request.InitWithApiInfo("Push", "2016-08-01", "PushNoticeToAndroid", "", "")
|
||||
return
|
||||
}
|
||||
|
||||
// CreatePushNoticeToAndroidResponse creates a response to parse from PushNoticeToAndroid response
|
||||
func CreatePushNoticeToAndroidResponse() (response *PushNoticeToAndroidResponse) {
|
||||
response = &PushNoticeToAndroidResponse{
|
||||
BaseResponse: &responses.BaseResponse{},
|
||||
}
|
||||
return
|
||||
}
|
||||
111
vendor/github.com/aliyun/alibaba-cloud-sdk-go/services/push/push_notice_toi_os.go
generated
vendored
111
vendor/github.com/aliyun/alibaba-cloud-sdk-go/services/push/push_notice_toi_os.go
generated
vendored
@@ -1,111 +0,0 @@
|
||||
package push
|
||||
|
||||
//Licensed under the Apache License, Version 2.0 (the "License");
|
||||
//you may not use this file except in compliance with the License.
|
||||
//You may obtain a copy of the License at
|
||||
//
|
||||
//http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
//Unless required by applicable law or agreed to in writing, software
|
||||
//distributed under the License is distributed on an "AS IS" BASIS,
|
||||
//WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
//See the License for the specific language governing permissions and
|
||||
//limitations under the License.
|
||||
//
|
||||
// Code generated by Alibaba Cloud SDK Code Generator.
|
||||
// Changes may cause incorrect behavior and will be lost if the code is regenerated.
|
||||
|
||||
import (
|
||||
"github.com/aliyun/alibaba-cloud-sdk-go/sdk/requests"
|
||||
"github.com/aliyun/alibaba-cloud-sdk-go/sdk/responses"
|
||||
)
|
||||
|
||||
// PushNoticeToiOS invokes the push.PushNoticeToiOS API synchronously
|
||||
// api document: https://help.aliyun.com/api/push/pushnoticetoios.html
|
||||
func (client *Client) PushNoticeToiOS(request *PushNoticeToiOSRequest) (response *PushNoticeToiOSResponse, err error) {
|
||||
response = CreatePushNoticeToiOSResponse()
|
||||
err = client.DoAction(request, response)
|
||||
return
|
||||
}
|
||||
|
||||
// PushNoticeToiOSWithChan invokes the push.PushNoticeToiOS API asynchronously
|
||||
// api document: https://help.aliyun.com/api/push/pushnoticetoios.html
|
||||
// asynchronous document: https://help.aliyun.com/document_detail/66220.html
|
||||
func (client *Client) PushNoticeToiOSWithChan(request *PushNoticeToiOSRequest) (<-chan *PushNoticeToiOSResponse, <-chan error) {
|
||||
responseChan := make(chan *PushNoticeToiOSResponse, 1)
|
||||
errChan := make(chan error, 1)
|
||||
err := client.AddAsyncTask(func() {
|
||||
defer close(responseChan)
|
||||
defer close(errChan)
|
||||
response, err := client.PushNoticeToiOS(request)
|
||||
if err != nil {
|
||||
errChan <- err
|
||||
} else {
|
||||
responseChan <- response
|
||||
}
|
||||
})
|
||||
if err != nil {
|
||||
errChan <- err
|
||||
close(responseChan)
|
||||
close(errChan)
|
||||
}
|
||||
return responseChan, errChan
|
||||
}
|
||||
|
||||
// PushNoticeToiOSWithCallback invokes the push.PushNoticeToiOS API asynchronously
|
||||
// api document: https://help.aliyun.com/api/push/pushnoticetoios.html
|
||||
// asynchronous document: https://help.aliyun.com/document_detail/66220.html
|
||||
func (client *Client) PushNoticeToiOSWithCallback(request *PushNoticeToiOSRequest, callback func(response *PushNoticeToiOSResponse, err error)) <-chan int {
|
||||
result := make(chan int, 1)
|
||||
err := client.AddAsyncTask(func() {
|
||||
var response *PushNoticeToiOSResponse
|
||||
var err error
|
||||
defer close(result)
|
||||
response, err = client.PushNoticeToiOS(request)
|
||||
callback(response, err)
|
||||
result <- 1
|
||||
})
|
||||
if err != nil {
|
||||
defer close(result)
|
||||
callback(nil, err)
|
||||
result <- 0
|
||||
}
|
||||
return result
|
||||
}
|
||||
|
||||
// PushNoticeToiOSRequest is the request struct for api PushNoticeToiOS
|
||||
type PushNoticeToiOSRequest struct {
|
||||
*requests.RpcRequest
|
||||
AppKey requests.Integer `position:"Query" name:"AppKey"`
|
||||
Target string `position:"Query" name:"Target"`
|
||||
TargetValue string `position:"Query" name:"TargetValue"`
|
||||
ApnsEnv string `position:"Query" name:"ApnsEnv"`
|
||||
Title string `position:"Query" name:"Title"`
|
||||
Body string `position:"Query" name:"Body"`
|
||||
JobKey string `position:"Query" name:"JobKey"`
|
||||
ExtParameters string `position:"Query" name:"ExtParameters"`
|
||||
}
|
||||
|
||||
// PushNoticeToiOSResponse is the response struct for api PushNoticeToiOS
|
||||
type PushNoticeToiOSResponse struct {
|
||||
*responses.BaseResponse
|
||||
RequestId string `json:"RequestId" xml:"RequestId"`
|
||||
MessageId string `json:"MessageId" xml:"MessageId"`
|
||||
}
|
||||
|
||||
// CreatePushNoticeToiOSRequest creates a request to invoke PushNoticeToiOS API
|
||||
func CreatePushNoticeToiOSRequest() (request *PushNoticeToiOSRequest) {
|
||||
request = &PushNoticeToiOSRequest{
|
||||
RpcRequest: &requests.RpcRequest{},
|
||||
}
|
||||
request.InitWithApiInfo("Push", "2016-08-01", "PushNoticeToiOS", "", "")
|
||||
return
|
||||
}
|
||||
|
||||
// CreatePushNoticeToiOSResponse creates a response to parse from PushNoticeToiOS response
|
||||
func CreatePushNoticeToiOSResponse() (response *PushNoticeToiOSResponse) {
|
||||
response = &PushNoticeToiOSResponse{
|
||||
BaseResponse: &responses.BaseResponse{},
|
||||
}
|
||||
return
|
||||
}
|
||||
11
vendor/github.com/cenk/backoff/context.go
generated
vendored
11
vendor/github.com/cenk/backoff/context.go
generated
vendored
@@ -1,9 +1,8 @@
|
||||
package backoff
|
||||
|
||||
import (
|
||||
"context"
|
||||
"time"
|
||||
|
||||
"golang.org/x/net/context"
|
||||
)
|
||||
|
||||
// BackOffContext is a backoff policy that stops retrying after the context
|
||||
@@ -52,9 +51,13 @@ func (b *backOffContext) Context() context.Context {
|
||||
|
||||
func (b *backOffContext) NextBackOff() time.Duration {
|
||||
select {
|
||||
case <-b.Context().Done():
|
||||
case <-b.ctx.Done():
|
||||
return Stop
|
||||
default:
|
||||
return b.BackOff.NextBackOff()
|
||||
}
|
||||
next := b.BackOff.NextBackOff()
|
||||
if deadline, ok := b.ctx.Deadline(); ok && deadline.Sub(time.Now()) < next {
|
||||
return Stop
|
||||
}
|
||||
return next
|
||||
}
|
||||
|
||||
7
vendor/github.com/cenk/backoff/exponential.go
generated
vendored
7
vendor/github.com/cenk/backoff/exponential.go
generated
vendored
@@ -63,7 +63,6 @@ type ExponentialBackOff struct {
|
||||
|
||||
currentInterval time.Duration
|
||||
startTime time.Time
|
||||
random *rand.Rand
|
||||
}
|
||||
|
||||
// Clock is an interface that returns current time for BackOff.
|
||||
@@ -89,7 +88,6 @@ func NewExponentialBackOff() *ExponentialBackOff {
|
||||
MaxInterval: DefaultMaxInterval,
|
||||
MaxElapsedTime: DefaultMaxElapsedTime,
|
||||
Clock: SystemClock,
|
||||
random: rand.New(rand.NewSource(time.Now().UnixNano())),
|
||||
}
|
||||
b.Reset()
|
||||
return b
|
||||
@@ -118,10 +116,7 @@ func (b *ExponentialBackOff) NextBackOff() time.Duration {
|
||||
return Stop
|
||||
}
|
||||
defer b.incrementCurrentInterval()
|
||||
if b.random == nil {
|
||||
b.random = rand.New(rand.NewSource(time.Now().UnixNano()))
|
||||
}
|
||||
return getRandomValueFromInterval(b.RandomizationFactor, b.random.Float64(), b.currentInterval)
|
||||
return getRandomValueFromInterval(b.RandomizationFactor, rand.Float64(), b.currentInterval)
|
||||
}
|
||||
|
||||
// GetElapsedTime returns the elapsed time since an ExponentialBackOff instance
|
||||
|
||||
12
vendor/github.com/cenk/backoff/retry.go
generated
vendored
12
vendor/github.com/cenk/backoff/retry.go
generated
vendored
@@ -15,7 +15,6 @@ type Notify func(error, time.Duration)
|
||||
|
||||
// Retry the operation o until it does not return error or BackOff stops.
|
||||
// o is guaranteed to be run at least once.
|
||||
// It is the caller's responsibility to reset b after Retry returns.
|
||||
//
|
||||
// If o returns a *PermanentError, the operation is not retried, and the
|
||||
// wrapped error is returned.
|
||||
@@ -29,6 +28,7 @@ func Retry(o Operation, b BackOff) error { return RetryNotify(o, b, nil) }
|
||||
func RetryNotify(operation Operation, b BackOff, notify Notify) error {
|
||||
var err error
|
||||
var next time.Duration
|
||||
var t *time.Timer
|
||||
|
||||
cb := ensureContext(b)
|
||||
|
||||
@@ -42,7 +42,7 @@ func RetryNotify(operation Operation, b BackOff, notify Notify) error {
|
||||
return permanent.Err
|
||||
}
|
||||
|
||||
if next = b.NextBackOff(); next == Stop {
|
||||
if next = cb.NextBackOff(); next == Stop {
|
||||
return err
|
||||
}
|
||||
|
||||
@@ -50,11 +50,15 @@ func RetryNotify(operation Operation, b BackOff, notify Notify) error {
|
||||
notify(err, next)
|
||||
}
|
||||
|
||||
t := time.NewTimer(next)
|
||||
if t == nil {
|
||||
t = time.NewTimer(next)
|
||||
defer t.Stop()
|
||||
} else {
|
||||
t.Reset(next)
|
||||
}
|
||||
|
||||
select {
|
||||
case <-cb.Context().Done():
|
||||
t.Stop()
|
||||
return err
|
||||
case <-t.C:
|
||||
}
|
||||
|
||||
2
vendor/github.com/cenk/backoff/ticker.go
generated
vendored
2
vendor/github.com/cenk/backoff/ticker.go
generated
vendored
@@ -1,7 +1,6 @@
|
||||
package backoff
|
||||
|
||||
import (
|
||||
"runtime"
|
||||
"sync"
|
||||
"time"
|
||||
)
|
||||
@@ -34,7 +33,6 @@ func NewTicker(b BackOff) *Ticker {
|
||||
}
|
||||
t.b.Reset()
|
||||
go t.run()
|
||||
runtime.SetFinalizer(t, (*Ticker).Stop)
|
||||
return t
|
||||
}
|
||||
|
||||
|
||||
20
vendor/github.com/cenkalti/backoff/LICENSE
generated
vendored
Normal file
20
vendor/github.com/cenkalti/backoff/LICENSE
generated
vendored
Normal file
@@ -0,0 +1,20 @@
|
||||
The MIT License (MIT)
|
||||
|
||||
Copyright (c) 2014 Cenk Altı
|
||||
|
||||
Permission is hereby granted, free of charge, to any person obtaining a copy of
|
||||
this software and associated documentation files (the "Software"), to deal in
|
||||
the Software without restriction, including without limitation the rights to
|
||||
use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of
|
||||
the Software, and to permit persons to whom the Software is furnished to do so,
|
||||
subject to the following conditions:
|
||||
|
||||
The above copyright notice and this permission notice shall be included in all
|
||||
copies or substantial portions of the Software.
|
||||
|
||||
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS
|
||||
FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR
|
||||
COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
|
||||
IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
|
||||
CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
|
||||
66
vendor/github.com/cenkalti/backoff/backoff.go
generated
vendored
Normal file
66
vendor/github.com/cenkalti/backoff/backoff.go
generated
vendored
Normal file
@@ -0,0 +1,66 @@
|
||||
// Package backoff implements backoff algorithms for retrying operations.
|
||||
//
|
||||
// Use Retry function for retrying operations that may fail.
|
||||
// If Retry does not meet your needs,
|
||||
// copy/paste the function into your project and modify as you wish.
|
||||
//
|
||||
// There is also Ticker type similar to time.Ticker.
|
||||
// You can use it if you need to work with channels.
|
||||
//
|
||||
// See Examples section below for usage examples.
|
||||
package backoff
|
||||
|
||||
import "time"
|
||||
|
||||
// BackOff is a backoff policy for retrying an operation.
|
||||
type BackOff interface {
|
||||
// NextBackOff returns the duration to wait before retrying the operation,
|
||||
// or backoff. Stop to indicate that no more retries should be made.
|
||||
//
|
||||
// Example usage:
|
||||
//
|
||||
// duration := backoff.NextBackOff();
|
||||
// if (duration == backoff.Stop) {
|
||||
// // Do not retry operation.
|
||||
// } else {
|
||||
// // Sleep for duration and retry operation.
|
||||
// }
|
||||
//
|
||||
NextBackOff() time.Duration
|
||||
|
||||
// Reset to initial state.
|
||||
Reset()
|
||||
}
|
||||
|
||||
// Stop indicates that no more retries should be made for use in NextBackOff().
|
||||
const Stop time.Duration = -1
|
||||
|
||||
// ZeroBackOff is a fixed backoff policy whose backoff time is always zero,
|
||||
// meaning that the operation is retried immediately without waiting, indefinitely.
|
||||
type ZeroBackOff struct{}
|
||||
|
||||
func (b *ZeroBackOff) Reset() {}
|
||||
|
||||
func (b *ZeroBackOff) NextBackOff() time.Duration { return 0 }
|
||||
|
||||
// StopBackOff is a fixed backoff policy that always returns backoff.Stop for
|
||||
// NextBackOff(), meaning that the operation should never be retried.
|
||||
type StopBackOff struct{}
|
||||
|
||||
func (b *StopBackOff) Reset() {}
|
||||
|
||||
func (b *StopBackOff) NextBackOff() time.Duration { return Stop }
|
||||
|
||||
// ConstantBackOff is a backoff policy that always returns the same backoff delay.
|
||||
// This is in contrast to an exponential backoff policy,
|
||||
// which returns a delay that grows longer as you call NextBackOff() over and over again.
|
||||
type ConstantBackOff struct {
|
||||
Interval time.Duration
|
||||
}
|
||||
|
||||
func (b *ConstantBackOff) Reset() {}
|
||||
func (b *ConstantBackOff) NextBackOff() time.Duration { return b.Interval }
|
||||
|
||||
func NewConstantBackOff(d time.Duration) *ConstantBackOff {
|
||||
return &ConstantBackOff{Interval: d}
|
||||
}
|
||||
63
vendor/github.com/cenkalti/backoff/context.go
generated
vendored
Normal file
63
vendor/github.com/cenkalti/backoff/context.go
generated
vendored
Normal file
@@ -0,0 +1,63 @@
|
||||
package backoff
|
||||
|
||||
import (
|
||||
"context"
|
||||
"time"
|
||||
)
|
||||
|
||||
// BackOffContext is a backoff policy that stops retrying after the context
|
||||
// is canceled.
|
||||
type BackOffContext interface {
|
||||
BackOff
|
||||
Context() context.Context
|
||||
}
|
||||
|
||||
type backOffContext struct {
|
||||
BackOff
|
||||
ctx context.Context
|
||||
}
|
||||
|
||||
// WithContext returns a BackOffContext with context ctx
|
||||
//
|
||||
// ctx must not be nil
|
||||
func WithContext(b BackOff, ctx context.Context) BackOffContext {
|
||||
if ctx == nil {
|
||||
panic("nil context")
|
||||
}
|
||||
|
||||
if b, ok := b.(*backOffContext); ok {
|
||||
return &backOffContext{
|
||||
BackOff: b.BackOff,
|
||||
ctx: ctx,
|
||||
}
|
||||
}
|
||||
|
||||
return &backOffContext{
|
||||
BackOff: b,
|
||||
ctx: ctx,
|
||||
}
|
||||
}
|
||||
|
||||
func ensureContext(b BackOff) BackOffContext {
|
||||
if cb, ok := b.(BackOffContext); ok {
|
||||
return cb
|
||||
}
|
||||
return WithContext(b, context.Background())
|
||||
}
|
||||
|
||||
func (b *backOffContext) Context() context.Context {
|
||||
return b.ctx
|
||||
}
|
||||
|
||||
func (b *backOffContext) NextBackOff() time.Duration {
|
||||
select {
|
||||
case <-b.ctx.Done():
|
||||
return Stop
|
||||
default:
|
||||
}
|
||||
next := b.BackOff.NextBackOff()
|
||||
if deadline, ok := b.ctx.Deadline(); ok && deadline.Sub(time.Now()) < next {
|
||||
return Stop
|
||||
}
|
||||
return next
|
||||
}
|
||||
153
vendor/github.com/cenkalti/backoff/exponential.go
generated
vendored
Normal file
153
vendor/github.com/cenkalti/backoff/exponential.go
generated
vendored
Normal file
@@ -0,0 +1,153 @@
|
||||
package backoff
|
||||
|
||||
import (
|
||||
"math/rand"
|
||||
"time"
|
||||
)
|
||||
|
||||
/*
|
||||
ExponentialBackOff is a backoff implementation that increases the backoff
|
||||
period for each retry attempt using a randomization function that grows exponentially.
|
||||
|
||||
NextBackOff() is calculated using the following formula:
|
||||
|
||||
randomized interval =
|
||||
RetryInterval * (random value in range [1 - RandomizationFactor, 1 + RandomizationFactor])
|
||||
|
||||
In other words NextBackOff() will range between the randomization factor
|
||||
percentage below and above the retry interval.
|
||||
|
||||
For example, given the following parameters:
|
||||
|
||||
RetryInterval = 2
|
||||
RandomizationFactor = 0.5
|
||||
Multiplier = 2
|
||||
|
||||
the actual backoff period used in the next retry attempt will range between 1 and 3 seconds,
|
||||
multiplied by the exponential, that is, between 2 and 6 seconds.
|
||||
|
||||
Note: MaxInterval caps the RetryInterval and not the randomized interval.
|
||||
|
||||
If the time elapsed since an ExponentialBackOff instance is created goes past the
|
||||
MaxElapsedTime, then the method NextBackOff() starts returning backoff.Stop.
|
||||
|
||||
The elapsed time can be reset by calling Reset().
|
||||
|
||||
Example: Given the following default arguments, for 10 tries the sequence will be,
|
||||
and assuming we go over the MaxElapsedTime on the 10th try:
|
||||
|
||||
Request # RetryInterval (seconds) Randomized Interval (seconds)
|
||||
|
||||
1 0.5 [0.25, 0.75]
|
||||
2 0.75 [0.375, 1.125]
|
||||
3 1.125 [0.562, 1.687]
|
||||
4 1.687 [0.8435, 2.53]
|
||||
5 2.53 [1.265, 3.795]
|
||||
6 3.795 [1.897, 5.692]
|
||||
7 5.692 [2.846, 8.538]
|
||||
8 8.538 [4.269, 12.807]
|
||||
9 12.807 [6.403, 19.210]
|
||||
10 19.210 backoff.Stop
|
||||
|
||||
Note: Implementation is not thread-safe.
|
||||
*/
|
||||
type ExponentialBackOff struct {
|
||||
InitialInterval time.Duration
|
||||
RandomizationFactor float64
|
||||
Multiplier float64
|
||||
MaxInterval time.Duration
|
||||
// After MaxElapsedTime the ExponentialBackOff stops.
|
||||
// It never stops if MaxElapsedTime == 0.
|
||||
MaxElapsedTime time.Duration
|
||||
Clock Clock
|
||||
|
||||
currentInterval time.Duration
|
||||
startTime time.Time
|
||||
}
|
||||
|
||||
// Clock is an interface that returns current time for BackOff.
|
||||
type Clock interface {
|
||||
Now() time.Time
|
||||
}
|
||||
|
||||
// Default values for ExponentialBackOff.
|
||||
const (
|
||||
DefaultInitialInterval = 500 * time.Millisecond
|
||||
DefaultRandomizationFactor = 0.5
|
||||
DefaultMultiplier = 1.5
|
||||
DefaultMaxInterval = 60 * time.Second
|
||||
DefaultMaxElapsedTime = 15 * time.Minute
|
||||
)
|
||||
|
||||
// NewExponentialBackOff creates an instance of ExponentialBackOff using default values.
|
||||
func NewExponentialBackOff() *ExponentialBackOff {
|
||||
b := &ExponentialBackOff{
|
||||
InitialInterval: DefaultInitialInterval,
|
||||
RandomizationFactor: DefaultRandomizationFactor,
|
||||
Multiplier: DefaultMultiplier,
|
||||
MaxInterval: DefaultMaxInterval,
|
||||
MaxElapsedTime: DefaultMaxElapsedTime,
|
||||
Clock: SystemClock,
|
||||
}
|
||||
b.Reset()
|
||||
return b
|
||||
}
|
||||
|
||||
type systemClock struct{}
|
||||
|
||||
func (t systemClock) Now() time.Time {
|
||||
return time.Now()
|
||||
}
|
||||
|
||||
// SystemClock implements Clock interface that uses time.Now().
|
||||
var SystemClock = systemClock{}
|
||||
|
||||
// Reset the interval back to the initial retry interval and restarts the timer.
|
||||
func (b *ExponentialBackOff) Reset() {
|
||||
b.currentInterval = b.InitialInterval
|
||||
b.startTime = b.Clock.Now()
|
||||
}
|
||||
|
||||
// NextBackOff calculates the next backoff interval using the formula:
|
||||
// Randomized interval = RetryInterval +/- (RandomizationFactor * RetryInterval)
|
||||
func (b *ExponentialBackOff) NextBackOff() time.Duration {
|
||||
// Make sure we have not gone over the maximum elapsed time.
|
||||
if b.MaxElapsedTime != 0 && b.GetElapsedTime() > b.MaxElapsedTime {
|
||||
return Stop
|
||||
}
|
||||
defer b.incrementCurrentInterval()
|
||||
return getRandomValueFromInterval(b.RandomizationFactor, rand.Float64(), b.currentInterval)
|
||||
}
|
||||
|
||||
// GetElapsedTime returns the elapsed time since an ExponentialBackOff instance
|
||||
// is created and is reset when Reset() is called.
|
||||
//
|
||||
// The elapsed time is computed using time.Now().UnixNano(). It is
|
||||
// safe to call even while the backoff policy is used by a running
|
||||
// ticker.
|
||||
func (b *ExponentialBackOff) GetElapsedTime() time.Duration {
|
||||
return b.Clock.Now().Sub(b.startTime)
|
||||
}
|
||||
|
||||
// Increments the current interval by multiplying it with the multiplier.
|
||||
func (b *ExponentialBackOff) incrementCurrentInterval() {
|
||||
// Check for overflow, if overflow is detected set the current interval to the max interval.
|
||||
if float64(b.currentInterval) >= float64(b.MaxInterval)/b.Multiplier {
|
||||
b.currentInterval = b.MaxInterval
|
||||
} else {
|
||||
b.currentInterval = time.Duration(float64(b.currentInterval) * b.Multiplier)
|
||||
}
|
||||
}
|
||||
|
||||
// Returns a random value from the following interval:
|
||||
// [randomizationFactor * currentInterval, randomizationFactor * currentInterval].
|
||||
func getRandomValueFromInterval(randomizationFactor, random float64, currentInterval time.Duration) time.Duration {
|
||||
var delta = randomizationFactor * float64(currentInterval)
|
||||
var minInterval = float64(currentInterval) - delta
|
||||
var maxInterval = float64(currentInterval) + delta
|
||||
|
||||
// Get a random value from the range [minInterval, maxInterval].
|
||||
// The formula used below has a +1 because if the minInterval is 1 and the maxInterval is 3 then
|
||||
// we want a 33% chance for selecting either 1, 2 or 3.
|
||||
return time.Duration(minInterval + (random * (maxInterval - minInterval + 1)))
|
||||
}
|
||||
82
vendor/github.com/cenkalti/backoff/retry.go
generated
vendored
Normal file
82
vendor/github.com/cenkalti/backoff/retry.go
generated
vendored
Normal file
@@ -0,0 +1,82 @@
|
||||
package backoff
|
||||
|
||||
import "time"
|
||||
|
||||
// An Operation is executing by Retry() or RetryNotify().
|
||||
// The operation will be retried using a backoff policy if it returns an error.
|
||||
type Operation func() error
|
||||
|
||||
// Notify is a notify-on-error function. It receives an operation error and
|
||||
// backoff delay if the operation failed (with an error).
|
||||
//
|
||||
// NOTE that if the backoff policy stated to stop retrying,
|
||||
// the notify function isn't called.
|
||||
type Notify func(error, time.Duration)
|
||||
|
||||
// Retry the operation o until it does not return error or BackOff stops.
|
||||
// o is guaranteed to be run at least once.
|
||||
//
|
||||
// If o returns a *PermanentError, the operation is not retried, and the
|
||||
// wrapped error is returned.
|
||||
//
|
||||
// Retry sleeps the goroutine for the duration returned by BackOff after a
|
||||
// failed operation returns.
|
||||
func Retry(o Operation, b BackOff) error { return RetryNotify(o, b, nil) }
|
||||
|
||||
// RetryNotify calls notify function with the error and wait duration
|
||||
// for each failed attempt before sleep.
|
||||
func RetryNotify(operation Operation, b BackOff, notify Notify) error {
|
||||
var err error
|
||||
var next time.Duration
|
||||
var t *time.Timer
|
||||
|
||||
cb := ensureContext(b)
|
||||
|
||||
b.Reset()
|
||||
for {
|
||||
if err = operation(); err == nil {
|
||||
return nil
|
||||
}
|
||||
|
||||
if permanent, ok := err.(*PermanentError); ok {
|
||||
return permanent.Err
|
||||
}
|
||||
|
||||
if next = cb.NextBackOff(); next == Stop {
|
||||
return err
|
||||
}
|
||||
|
||||
if notify != nil {
|
||||
notify(err, next)
|
||||
}
|
||||
|
||||
if t == nil {
|
||||
t = time.NewTimer(next)
|
||||
defer t.Stop()
|
||||
} else {
|
||||
t.Reset(next)
|
||||
}
|
||||
|
||||
select {
|
||||
case <-cb.Context().Done():
|
||||
return err
|
||||
case <-t.C:
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// PermanentError signals that the operation should not be retried.
|
||||
type PermanentError struct {
|
||||
Err error
|
||||
}
|
||||
|
||||
func (e *PermanentError) Error() string {
|
||||
return e.Err.Error()
|
||||
}
|
||||
|
||||
// Permanent wraps the given err in a *PermanentError.
|
||||
func Permanent(err error) *PermanentError {
|
||||
return &PermanentError{
|
||||
Err: err,
|
||||
}
|
||||
}
|
||||
82
vendor/github.com/cenkalti/backoff/ticker.go
generated
vendored
Normal file
82
vendor/github.com/cenkalti/backoff/ticker.go
generated
vendored
Normal file
@@ -0,0 +1,82 @@
|
||||
package backoff
|
||||
|
||||
import (
|
||||
"sync"
|
||||
"time"
|
||||
)
|
||||
|
||||
// Ticker holds a channel that delivers `ticks' of a clock at times reported by a BackOff.
|
||||
//
|
||||
// Ticks will continue to arrive when the previous operation is still running,
|
||||
// so operations that take a while to fail could run in quick succession.
|
||||
type Ticker struct {
|
||||
C <-chan time.Time
|
||||
c chan time.Time
|
||||
b BackOffContext
|
||||
stop chan struct{}
|
||||
stopOnce sync.Once
|
||||
}
|
||||
|
||||
// NewTicker returns a new Ticker containing a channel that will send
|
||||
// the time at times specified by the BackOff argument. Ticker is
|
||||
// guaranteed to tick at least once. The channel is closed when Stop
|
||||
// method is called or BackOff stops. It is not safe to manipulate the
|
||||
// provided backoff policy (notably calling NextBackOff or Reset)
|
||||
// while the ticker is running.
|
||||
func NewTicker(b BackOff) *Ticker {
|
||||
c := make(chan time.Time)
|
||||
t := &Ticker{
|
||||
C: c,
|
||||
c: c,
|
||||
b: ensureContext(b),
|
||||
stop: make(chan struct{}),
|
||||
}
|
||||
t.b.Reset()
|
||||
go t.run()
|
||||
return t
|
||||
}
|
||||
|
||||
// Stop turns off a ticker. After Stop, no more ticks will be sent.
|
||||
func (t *Ticker) Stop() {
|
||||
t.stopOnce.Do(func() { close(t.stop) })
|
||||
}
|
||||
|
||||
func (t *Ticker) run() {
|
||||
c := t.c
|
||||
defer close(c)
|
||||
|
||||
// Ticker is guaranteed to tick at least once.
|
||||
afterC := t.send(time.Now())
|
||||
|
||||
for {
|
||||
if afterC == nil {
|
||||
return
|
||||
}
|
||||
|
||||
select {
|
||||
case tick := <-afterC:
|
||||
afterC = t.send(tick)
|
||||
case <-t.stop:
|
||||
t.c = nil // Prevent future ticks from being sent to the channel.
|
||||
return
|
||||
case <-t.b.Context().Done():
|
||||
return
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (t *Ticker) send(tick time.Time) <-chan time.Time {
|
||||
select {
|
||||
case t.c <- tick:
|
||||
case <-t.stop:
|
||||
return nil
|
||||
}
|
||||
|
||||
next := t.b.NextBackOff()
|
||||
if next == Stop {
|
||||
t.Stop()
|
||||
return nil
|
||||
}
|
||||
|
||||
return time.After(next)
|
||||
}
|
||||
35
vendor/github.com/cenkalti/backoff/tries.go
generated
vendored
Normal file
35
vendor/github.com/cenkalti/backoff/tries.go
generated
vendored
Normal file
@@ -0,0 +1,35 @@
|
||||
package backoff
|
||||
|
||||
import "time"
|
||||
|
||||
/*
|
||||
WithMaxRetries creates a wrapper around another BackOff, which will
|
||||
return Stop if NextBackOff() has been called too many times since
|
||||
the last time Reset() was called
|
||||
|
||||
Note: Implementation is not thread-safe.
|
||||
*/
|
||||
func WithMaxRetries(b BackOff, max uint64) BackOff {
|
||||
return &backOffTries{delegate: b, maxTries: max}
|
||||
}
|
||||
|
||||
type backOffTries struct {
|
||||
delegate BackOff
|
||||
maxTries uint64
|
||||
numTries uint64
|
||||
}
|
||||
|
||||
func (b *backOffTries) NextBackOff() time.Duration {
|
||||
if b.maxTries > 0 {
|
||||
if b.maxTries <= b.numTries {
|
||||
return Stop
|
||||
}
|
||||
b.numTries++
|
||||
}
|
||||
return b.delegate.NextBackOff()
|
||||
}
|
||||
|
||||
func (b *backOffTries) Reset() {
|
||||
b.numTries = 0
|
||||
b.delegate.Reset()
|
||||
}
|
||||
9
vendor/github.com/containous/staert/toml.go
generated
vendored
9
vendor/github.com/containous/staert/toml.go
generated
vendored
@@ -47,10 +47,7 @@ func (ts *TomlSource) Parse(cmd *flaeg.Command) (*flaeg.Command, error) {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
flgArgs, hasUnderField, err := generateArgs(metadata, boolFlags)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
flgArgs, hasUnderField := generateArgs(metadata, boolFlags)
|
||||
|
||||
err = flaeg.Load(cmd.Config, cmd.DefaultPointersConfig, flgArgs)
|
||||
if err != nil && err != flaeg.ErrParserNotFound {
|
||||
@@ -89,7 +86,7 @@ func findFile(filename string, dirNFile []string) string {
|
||||
return ""
|
||||
}
|
||||
|
||||
func generateArgs(metadata toml.MetaData, flags []string) ([]string, bool, error) {
|
||||
func generateArgs(metadata toml.MetaData, flags []string) ([]string, bool) {
|
||||
var flgArgs []string
|
||||
keys := metadata.Keys()
|
||||
hasUnderField := false
|
||||
@@ -117,5 +114,5 @@ func generateArgs(metadata toml.MetaData, flags []string) ([]string, bool, error
|
||||
}
|
||||
}
|
||||
|
||||
return flgArgs, hasUnderField, nil
|
||||
return flgArgs, hasUnderField
|
||||
}
|
||||
|
||||
11
vendor/github.com/containous/traefik-extra-service-fabric/servicefabric.go
generated
vendored
11
vendor/github.com/containous/traefik-extra-service-fabric/servicefabric.go
generated
vendored
@@ -15,7 +15,7 @@ import (
|
||||
"github.com/containous/traefik/provider/label"
|
||||
"github.com/containous/traefik/safe"
|
||||
"github.com/containous/traefik/types"
|
||||
"github.com/jjcollinge/logrus-appinsights"
|
||||
appinsights "github.com/jjcollinge/logrus-appinsights"
|
||||
sf "github.com/jjcollinge/servicefabric"
|
||||
)
|
||||
|
||||
@@ -164,11 +164,12 @@ func getClusterServices(sfClient sfClient) ([]ServiceItemExtended, error) {
|
||||
for _, partition := range partitions.Items {
|
||||
partitionExt := PartitionItemExtended{PartitionItem: partition}
|
||||
|
||||
if isStateful(item) {
|
||||
switch {
|
||||
case isStateful(item):
|
||||
partitionExt.Replicas = getValidReplicas(sfClient, app, service, partition)
|
||||
} else if isStateless(item) {
|
||||
case isStateless(item):
|
||||
partitionExt.Instances = getValidInstances(sfClient, app, service, partition)
|
||||
} else {
|
||||
default:
|
||||
log.Errorf("Unsupported service kind %s in service %s", partition.ServiceKind, service.Name)
|
||||
continue
|
||||
}
|
||||
@@ -291,7 +292,7 @@ func getLabels(sfClient sfClient, service *sf.ServiceItem, app *sf.ApplicationIt
|
||||
}
|
||||
|
||||
func createAppInsightsHook(appInsightsClientName string, instrumentationKey string, maxBatchSize int, interval flaeg.Duration) {
|
||||
hook, err := logrus_appinsights.New(appInsightsClientName, logrus_appinsights.Config{
|
||||
hook, err := appinsights.New(appInsightsClientName, appinsights.Config{
|
||||
InstrumentationKey: instrumentationKey,
|
||||
MaxBatchSize: maxBatchSize, // optional
|
||||
MaxBatchInterval: time.Duration(interval), // optional
|
||||
|
||||
5
vendor/github.com/containous/traefik-extra-service-fabric/servicefabric_config.go
generated
vendored
5
vendor/github.com/containous/traefik-extra-service-fabric/servicefabric_config.go
generated
vendored
@@ -48,6 +48,7 @@ func (p *Provider) buildConfiguration(services []ServiceItemExtended) (*types.Co
|
||||
"getWhiteList": getWhiteList,
|
||||
"getHeaders": getHeaders,
|
||||
"getRedirect": getRedirect,
|
||||
"getErrorPages": getErrorPages,
|
||||
|
||||
// SF Service Grouping
|
||||
"getGroupedServices": getFuncServicesGroupedByLabel(traefikSFGroupName),
|
||||
@@ -172,3 +173,7 @@ func getCircuitBreaker(service ServiceItemExtended) *types.CircuitBreaker {
|
||||
func getLoadBalancer(service ServiceItemExtended) *types.LoadBalancer {
|
||||
return label.GetLoadBalancer(service.Labels)
|
||||
}
|
||||
|
||||
func getErrorPages(service ServiceItemExtended) map[string]*types.ErrorPage {
|
||||
return label.GetErrorPages(service.Labels)
|
||||
}
|
||||
|
||||
56
vendor/github.com/containous/traefik-extra-service-fabric/servicefabric_tmpl.go
generated
vendored
56
vendor/github.com/containous/traefik-extra-service-fabric/servicefabric_tmpl.go
generated
vendored
@@ -8,8 +8,13 @@ const tmpl = `
|
||||
{{range $partition := $service.Partitions }}
|
||||
{{range $instance := $partition.Instances }}
|
||||
[backends."{{ $aggName }}".servers."{{ $service.ID }}-{{ $instance.ID }}"]
|
||||
url = "{{ getDefaultEndpoint $instance }}"
|
||||
weight = {{ getGroupedWeight $service }}
|
||||
{{ $endpointName := getLabelValue $service "traefik.servicefabric.endpointname" "" }}
|
||||
{{if $endpointName }}
|
||||
url = "{{ getNamedEndpoint $instance $endpointName }}"
|
||||
{{else}}
|
||||
url = "{{ getDefaultEndpoint $instance }}"
|
||||
{{end}}
|
||||
{{end}}
|
||||
{{end}}
|
||||
{{end}}
|
||||
@@ -65,8 +70,13 @@ const tmpl = `
|
||||
|
||||
{{range $instance := $partition.Instances}}
|
||||
[backends."{{ $service.Name }}".servers."{{ $instance.ID }}"]
|
||||
url = "{{ getDefaultEndpoint $instance }}"
|
||||
weight = {{ getWeight $service }}
|
||||
{{ $endpointName := getLabelValue $service "traefik.servicefabric.endpointname" "" }}
|
||||
{{if $endpointName }}
|
||||
url = "{{ getNamedEndpoint $instance $endpointName }}"
|
||||
{{else}}
|
||||
url = "{{ getDefaultEndpoint $instance }}"
|
||||
{{end}}
|
||||
{{end}}
|
||||
|
||||
{{else if isStateful $service}}
|
||||
@@ -75,11 +85,16 @@ const tmpl = `
|
||||
{{if isPrimary $replica}}
|
||||
{{ $backendName := getBackendName $service $partition }}
|
||||
[backends."{{ $backendName }}".servers."{{ $replica.ID }}"]
|
||||
url = "{{ getDefaultEndpoint $replica }}"
|
||||
weight = 1
|
||||
weight = 1
|
||||
{{ $endpointName := getLabelValue $service "traefik.servicefabric.endpointname" "" }}
|
||||
{{if $endpointName }}
|
||||
url = "{{ getNamedEndpoint $replica $endpointName }}"
|
||||
{{else}}
|
||||
url = "{{ getDefaultEndpoint $replica }}"
|
||||
{{end}}
|
||||
|
||||
[backends."{{$backendName}}".LoadBalancer]
|
||||
method = "drr"
|
||||
[backends."{{$backendName}}".LoadBalancer]
|
||||
method = "drr"
|
||||
|
||||
{{end}}
|
||||
{{end}}
|
||||
@@ -114,14 +129,14 @@ const tmpl = `
|
||||
passHostHeader = {{ getPassHostHeader $service }}
|
||||
passTLSCert = {{ getPassTLSCert $service }}
|
||||
priority = {{ getPriority $service }}
|
||||
|
||||
|
||||
{{ $entryPoints := getEntryPoints $service }}
|
||||
{{if $entryPoints }}
|
||||
entryPoints = [{{range $entryPoints }}
|
||||
"{{.}}",
|
||||
{{end}}]
|
||||
{{end}}
|
||||
|
||||
|
||||
{{ $basicAuth := getBasicAuth $service }}
|
||||
{{if $basicAuth }}
|
||||
basicAuth = [{{range $basicAuth }}
|
||||
@@ -147,6 +162,19 @@ const tmpl = `
|
||||
permanent = {{ $redirect.Permanent }}
|
||||
{{end}}
|
||||
|
||||
{{ $errorPages := getErrorPages $service }}
|
||||
{{if $errorPages }}
|
||||
[frontends."frontend-{{ $frontendName }}".errors]
|
||||
{{range $pageName, $page := $errorPages }}
|
||||
[frontends."frontend-{{ $frontendName }}".errors."{{ $pageName }}"]
|
||||
status = [{{range $page.Status }}
|
||||
"{{.}}",
|
||||
{{end}}]
|
||||
backend = "{{ $page.Backend }}"
|
||||
query = "{{ $page.Query }}"
|
||||
{{end}}
|
||||
{{end}}
|
||||
|
||||
{{ $headers := getHeaders $service }}
|
||||
{{if $headers }}
|
||||
[frontends."frontend-{{ $frontendName }}".headers]
|
||||
@@ -166,33 +194,33 @@ const tmpl = `
|
||||
PublicKey = "{{ $headers.PublicKey }}"
|
||||
ReferrerPolicy = "{{ $headers.ReferrerPolicy }}"
|
||||
IsDevelopment = {{ $headers.IsDevelopment }}
|
||||
|
||||
|
||||
{{if $headers.AllowedHosts }}
|
||||
AllowedHosts = [{{range $headers.AllowedHosts }}
|
||||
"{{.}}",
|
||||
{{end}}]
|
||||
{{end}}
|
||||
|
||||
|
||||
{{if $headers.HostsProxyHeaders }}
|
||||
HostsProxyHeaders = [{{range $headers.HostsProxyHeaders }}
|
||||
"{{.}}",
|
||||
{{end}}]
|
||||
{{end}}
|
||||
|
||||
|
||||
{{if $headers.CustomRequestHeaders }}
|
||||
[frontends."frontend-{{ $frontendName }}".headers.customRequestHeaders]
|
||||
{{range $k, $v := $headers.CustomRequestHeaders }}
|
||||
{{$k}} = "{{$v}}"
|
||||
{{end}}
|
||||
{{end}}
|
||||
|
||||
|
||||
{{if $headers.CustomResponseHeaders }}
|
||||
[frontends."frontend-{{ $frontendName }}".headers.customResponseHeaders]
|
||||
{{range $k, $v := $headers.CustomResponseHeaders }}
|
||||
{{$k}} = "{{$v}}"
|
||||
{{end}}
|
||||
{{end}}
|
||||
|
||||
|
||||
{{if $headers.SSLProxyHeaders }}
|
||||
[frontends."frontend-{{ $frontendName }}".headers.SSLProxyHeaders]
|
||||
{{range $k, $v := $headers.SSLProxyHeaders }}
|
||||
@@ -200,7 +228,7 @@ const tmpl = `
|
||||
{{end}}
|
||||
{{end}}
|
||||
{{end}}
|
||||
|
||||
|
||||
{{range $key, $value := getFrontendRules $service }}
|
||||
[frontends."frontend-{{ $frontendName }}".routes."{{ $key }}"]
|
||||
rule = "{{ $value }}"
|
||||
|
||||
105
vendor/github.com/exoscale/egoscale/accounts.go
generated
vendored
105
vendor/github.com/exoscale/egoscale/accounts.go
generated
vendored
@@ -1,32 +1,12 @@
|
||||
package egoscale
|
||||
|
||||
import "fmt"
|
||||
|
||||
// AccountType represents the type of an Account
|
||||
//
|
||||
// http://docs.cloudstack.apache.org/projects/cloudstack-administration/en/4.8/accounts.html#accounts-users-and-domains
|
||||
type AccountType int16
|
||||
|
||||
//go:generate stringer -type AccountType
|
||||
const (
|
||||
// UserAccount represents a User
|
||||
UserAccount AccountType = 0
|
||||
// AdminAccount represents an Admin
|
||||
AdminAccount AccountType = 1
|
||||
// DomainAdminAccount represents a Domain Admin
|
||||
DomainAdminAccount AccountType = 2
|
||||
)
|
||||
|
||||
// Account provides the detailed account information
|
||||
type Account struct {
|
||||
AccountDetails map[string]string `json:"accountdetails,omitempty" doc:"details for the account"`
|
||||
AccountType AccountType `json:"accounttype,omitempty" doc:"account type (admin, domain-admin, user)"`
|
||||
CPUAvailable string `json:"cpuavailable,omitempty" doc:"the total number of cpu cores available to be created for this account"`
|
||||
CPULimit string `json:"cpulimit,omitempty" doc:"the total number of cpu cores the account can own"`
|
||||
CPUTotal int64 `json:"cputotal,omitempty" doc:"the total number of cpu cores owned by account"`
|
||||
DefaultZoneID *UUID `json:"defaultzoneid,omitempty" doc:"the default zone of the account"`
|
||||
Domain string `json:"domain,omitempty" doc:"name of the Domain the account belongs too"`
|
||||
DomainID *UUID `json:"domainid,omitempty" doc:"id of the Domain the account belongs too"`
|
||||
EipLimit string `json:"eiplimit,omitempty" doc:"the total number of public elastic ip addresses this account can acquire"`
|
||||
Groups []string `json:"groups,omitempty" doc:"the list of acl groups that account belongs to"`
|
||||
ID *UUID `json:"id,omitempty" doc:"the id of the account"`
|
||||
@@ -74,55 +54,23 @@ type Account struct {
|
||||
// ListRequest builds the ListAccountsGroups request
|
||||
func (a Account) ListRequest() (ListCommand, error) {
|
||||
return &ListAccounts{
|
||||
ID: a.ID,
|
||||
DomainID: a.DomainID,
|
||||
AccountType: a.AccountType,
|
||||
State: a.State,
|
||||
ID: a.ID,
|
||||
State: a.State,
|
||||
}, nil
|
||||
}
|
||||
|
||||
//go:generate go run generate/main.go -interface=Listable ListAccounts
|
||||
|
||||
// ListAccounts represents a query to display the accounts
|
||||
type ListAccounts struct {
|
||||
AccountType AccountType `json:"accounttype,omitempty" doc:"list accounts by account type. Valid account types are 1 (admin), 2 (domain-admin), and 0 (user)."`
|
||||
DomainID *UUID `json:"domainid,omitempty" doc:"list only resources belonging to the domain specified"`
|
||||
ID *UUID `json:"id,omitempty" doc:"list account by account ID"`
|
||||
IsCleanUpRequired *bool `json:"iscleanuprequired,omitempty" doc:"list accounts by cleanuprequired attribute (values are true or false)"`
|
||||
IsRecursive *bool `json:"isrecursive,omitempty" doc:"defaults to false, but if true, lists all resources from the parent specified by the domainId till leaves."`
|
||||
Keyword string `json:"keyword,omitempty" doc:"List by keyword"`
|
||||
ListAll *bool `json:"listall,omitempty" doc:"If set to false, list only resources belonging to the command's caller; if set to true - list resources that the caller is authorized to see. Default value is false"`
|
||||
Name string `json:"name,omitempty" doc:"list account by account name"`
|
||||
Page int `json:"page,omitempty"`
|
||||
PageSize int `json:"pagesize,omitempty"`
|
||||
State string `json:"state,omitempty" doc:"list accounts by state. Valid states are enabled, disabled, and locked."`
|
||||
_ bool `name:"listAccounts" description:"Lists accounts and provides detailed account information for listed accounts"`
|
||||
}
|
||||
|
||||
func (ListAccounts) response() interface{} {
|
||||
return new(ListAccountsResponse)
|
||||
}
|
||||
|
||||
// SetPage sets the current page
|
||||
func (ls *ListAccounts) SetPage(page int) {
|
||||
ls.Page = page
|
||||
}
|
||||
|
||||
// SetPageSize sets the page size
|
||||
func (ls *ListAccounts) SetPageSize(pageSize int) {
|
||||
ls.PageSize = pageSize
|
||||
}
|
||||
|
||||
func (ListAccounts) each(resp interface{}, callback IterateItemFunc) {
|
||||
vms, ok := resp.(*ListAccountsResponse)
|
||||
if !ok {
|
||||
callback(nil, fmt.Errorf("wrong type. ListAccountsResponse expected, got %T", resp))
|
||||
return
|
||||
}
|
||||
|
||||
for i := range vms.Account {
|
||||
if !callback(&vms.Account[i], nil) {
|
||||
break
|
||||
}
|
||||
}
|
||||
ID *UUID `json:"id,omitempty" doc:"List account by account ID"`
|
||||
IsCleanUpRequired *bool `json:"iscleanuprequired,omitempty" doc:"list accounts by cleanuprequired attribute (values are true or false)"`
|
||||
Keyword string `json:"keyword,omitempty" doc:"List by keyword"`
|
||||
Name string `json:"name,omitempty" doc:"List account by account name"`
|
||||
Page int `json:"page,omitempty"`
|
||||
PageSize int `json:"pagesize,omitempty"`
|
||||
State string `json:"state,omitempty" doc:"List accounts by state. Valid states are enabled, disabled, and locked."`
|
||||
_ bool `name:"listAccounts" description:"Lists accounts and provides detailed account information for listed accounts"`
|
||||
}
|
||||
|
||||
// ListAccountsResponse represents a list of accounts
|
||||
@@ -130,32 +78,3 @@ type ListAccountsResponse struct {
|
||||
Count int `json:"count"`
|
||||
Account []Account `json:"account"`
|
||||
}
|
||||
|
||||
// EnableAccount represents the activation of an account
|
||||
type EnableAccount struct {
|
||||
Account string `json:"account,omitempty" doc:"Enables specified account."`
|
||||
DomainID *UUID `json:"domainid,omitempty" doc:"Enables specified account in this domain."`
|
||||
ID *UUID `json:"id,omitempty" doc:"Account id"`
|
||||
_ bool `name:"enableAccount" description:"Enables an account"`
|
||||
}
|
||||
|
||||
func (EnableAccount) response() interface{} {
|
||||
return new(Account)
|
||||
}
|
||||
|
||||
// DisableAccount (Async) represents the deactivation of an account
|
||||
type DisableAccount struct {
|
||||
Lock *bool `json:"lock" doc:"If true, only lock the account; else disable the account"`
|
||||
Account string `json:"account,omitempty" doc:"Disables specified account."`
|
||||
DomainID *UUID `json:"domainid,omitempty" doc:"Disables specified account in this domain."`
|
||||
ID *UUID `json:"id,omitempty" doc:"Account id"`
|
||||
_ bool `name:"disableAccount" description:"Disables an account"`
|
||||
}
|
||||
|
||||
func (DisableAccount) response() interface{} {
|
||||
return new(AsyncJobResult)
|
||||
}
|
||||
|
||||
func (DisableAccount) asyncResponse() interface{} {
|
||||
return new(Account)
|
||||
}
|
||||
|
||||
43
vendor/github.com/exoscale/egoscale/accounts_response.go
generated
vendored
Normal file
43
vendor/github.com/exoscale/egoscale/accounts_response.go
generated
vendored
Normal file
@@ -0,0 +1,43 @@
|
||||
// code generated; DO NOT EDIT.
|
||||
|
||||
package egoscale
|
||||
|
||||
import "fmt"
|
||||
|
||||
// Response returns the struct to unmarshal
|
||||
func (ListAccounts) Response() interface{} {
|
||||
return new(ListAccountsResponse)
|
||||
}
|
||||
|
||||
// ListRequest returns itself
|
||||
func (ls *ListAccounts) ListRequest() (ListCommand, error) {
|
||||
if ls == nil {
|
||||
return nil, fmt.Errorf("%T cannot be nil", ls)
|
||||
}
|
||||
return ls, nil
|
||||
}
|
||||
|
||||
// SetPage sets the current apge
|
||||
func (ls *ListAccounts) SetPage(page int) {
|
||||
ls.Page = page
|
||||
}
|
||||
|
||||
// SetPageSize sets the page size
|
||||
func (ls *ListAccounts) SetPageSize(pageSize int) {
|
||||
ls.PageSize = pageSize
|
||||
}
|
||||
|
||||
// Each triggers the callback for each, valid answer or any non 404 issue
|
||||
func (ListAccounts) Each(resp interface{}, callback IterateItemFunc) {
|
||||
items, ok := resp.(*ListAccountsResponse)
|
||||
if !ok {
|
||||
callback(nil, fmt.Errorf("wrong type, ListAccountsResponse was expected, got %T", resp))
|
||||
return
|
||||
}
|
||||
|
||||
for i := range items.Account {
|
||||
if !callback(&items.Account[i], nil) {
|
||||
break
|
||||
}
|
||||
}
|
||||
}
|
||||
16
vendor/github.com/exoscale/egoscale/accounttype_string.go
generated
vendored
16
vendor/github.com/exoscale/egoscale/accounttype_string.go
generated
vendored
@@ -1,16 +0,0 @@
|
||||
// Code generated by "stringer -type AccountType"; DO NOT EDIT.
|
||||
|
||||
package egoscale
|
||||
|
||||
import "strconv"
|
||||
|
||||
const _AccountType_name = "UserAccountAdminAccountDomainAdminAccount"
|
||||
|
||||
var _AccountType_index = [...]uint8{0, 11, 23, 41}
|
||||
|
||||
func (i AccountType) String() string {
|
||||
if i < 0 || i >= AccountType(len(_AccountType_index)-1) {
|
||||
return "AccountType(" + strconv.FormatInt(int64(i), 10) + ")"
|
||||
}
|
||||
return _AccountType_name[_AccountType_index[i]:_AccountType_index[i+1]]
|
||||
}
|
||||
83
vendor/github.com/exoscale/egoscale/addresses.go
generated
vendored
83
vendor/github.com/exoscale/egoscale/addresses.go
generated
vendored
@@ -8,14 +8,10 @@ import (
|
||||
|
||||
// IPAddress represents an IP Address
|
||||
type IPAddress struct {
|
||||
Account string `json:"account,omitempty" doc:"the account the public IP address is associated with"`
|
||||
Allocated string `json:"allocated,omitempty" doc:"date the public IP address was acquired"`
|
||||
Associated string `json:"associated,omitempty" doc:"date the public IP address was associated"`
|
||||
AssociatedNetworkID *UUID `json:"associatednetworkid,omitempty" doc:"the ID of the Network associated with the IP address"`
|
||||
AssociatedNetworkName string `json:"associatednetworkname,omitempty" doc:"the name of the Network associated with the IP address"`
|
||||
Domain string `json:"domain,omitempty" doc:"the domain the public IP address is associated with"`
|
||||
DomainID *UUID `json:"domainid,omitempty" doc:"the domain ID the public IP address is associated with"`
|
||||
ForDisplay bool `json:"fordisplay,omitempty" doc:"is public ip for display to the regular user"`
|
||||
ForVirtualNetwork bool `json:"forvirtualnetwork,omitempty" doc:"the virtual network for the IP address"`
|
||||
ID *UUID `json:"id,omitempty" doc:"public IP address id"`
|
||||
IPAddress net.IP `json:"ipaddress,omitempty" doc:"public IP address"`
|
||||
@@ -48,9 +44,7 @@ func (IPAddress) ResourceType() string {
|
||||
// ListRequest builds the ListAdresses request
|
||||
func (ipaddress IPAddress) ListRequest() (ListCommand, error) {
|
||||
req := &ListPublicIPAddresses{
|
||||
Account: ipaddress.Account,
|
||||
AssociatedNetworkID: ipaddress.AssociatedNetworkID,
|
||||
DomainID: ipaddress.DomainID,
|
||||
ID: ipaddress.ID,
|
||||
IPAddress: ipaddress.IPAddress,
|
||||
PhysicalNetworkID: ipaddress.PhysicalNetworkID,
|
||||
@@ -63,9 +57,6 @@ func (ipaddress IPAddress) ListRequest() (ListCommand, error) {
|
||||
if ipaddress.IsSourceNat {
|
||||
req.IsSourceNat = &ipaddress.IsSourceNat
|
||||
}
|
||||
if ipaddress.ForDisplay {
|
||||
req.ForDisplay = &ipaddress.ForDisplay
|
||||
}
|
||||
if ipaddress.ForVirtualNetwork {
|
||||
req.ForVirtualNetwork = &ipaddress.ForVirtualNetwork
|
||||
}
|
||||
@@ -86,21 +77,19 @@ func (ipaddress IPAddress) Delete(ctx context.Context, client *Client) error {
|
||||
|
||||
// AssociateIPAddress (Async) represents the IP creation
|
||||
type AssociateIPAddress struct {
|
||||
Account string `json:"account,omitempty" doc:"the account to associate with this IP address"`
|
||||
DomainID *UUID `json:"domainid,omitempty" doc:"the ID of the domain to associate with this IP address"`
|
||||
ForDisplay *bool `json:"fordisplay,omitempty" doc:"an optional field, whether to the display the ip to the end user or not"`
|
||||
IsPortable *bool `json:"isportable,omitempty" doc:"should be set to true if public IP is required to be transferable across zones, if not specified defaults to false"`
|
||||
NetworkdID *UUID `json:"networkid,omitempty" doc:"The network this ip address should be associated to."`
|
||||
RegionID int `json:"regionid,omitempty" doc:"region ID from where portable ip is to be associated."`
|
||||
ZoneID *UUID `json:"zoneid,omitempty" doc:"the ID of the availability zone you want to acquire an public IP address from"`
|
||||
_ bool `name:"associateIpAddress" description:"Acquires and associates a public IP to an account."`
|
||||
IsPortable *bool `json:"isportable,omitempty" doc:"should be set to true if public IP is required to be transferable across zones, if not specified defaults to false"`
|
||||
NetworkdID *UUID `json:"networkid,omitempty" doc:"The network this ip address should be associated to."`
|
||||
ZoneID *UUID `json:"zoneid,omitempty" doc:"the ID of the availability zone you want to acquire an public IP address from"`
|
||||
_ bool `name:"associateIpAddress" description:"Acquires and associates a public IP to an account."`
|
||||
}
|
||||
|
||||
func (AssociateIPAddress) response() interface{} {
|
||||
// Response returns the struct to unmarshal
|
||||
func (AssociateIPAddress) Response() interface{} {
|
||||
return new(AsyncJobResult)
|
||||
}
|
||||
|
||||
func (AssociateIPAddress) asyncResponse() interface{} {
|
||||
// AsyncResponse returns the struct to unmarshal the async job
|
||||
func (AssociateIPAddress) AsyncResponse() interface{} {
|
||||
return new(IPAddress)
|
||||
}
|
||||
|
||||
@@ -110,47 +99,47 @@ type DisassociateIPAddress struct {
|
||||
_ bool `name:"disassociateIpAddress" description:"Disassociates an ip address from the account."`
|
||||
}
|
||||
|
||||
func (DisassociateIPAddress) response() interface{} {
|
||||
// Response returns the struct to unmarshal
|
||||
func (DisassociateIPAddress) Response() interface{} {
|
||||
return new(AsyncJobResult)
|
||||
}
|
||||
|
||||
func (DisassociateIPAddress) asyncResponse() interface{} {
|
||||
return new(booleanResponse)
|
||||
// AsyncResponse returns the struct to unmarshal the async job
|
||||
func (DisassociateIPAddress) AsyncResponse() interface{} {
|
||||
return new(BooleanResponse)
|
||||
}
|
||||
|
||||
// UpdateIPAddress (Async) represents the IP modification
|
||||
type UpdateIPAddress struct {
|
||||
ID *UUID `json:"id" doc:"the id of the public ip address to update"`
|
||||
CustomID *UUID `json:"customid,omitempty" doc:"an optional field, in case you want to set a custom id to the resource. Allowed to Root Admins only"`
|
||||
ForDisplay *bool `json:"fordisplay,omitempty" doc:"an optional field, whether to the display the ip to the end user or not"`
|
||||
_ bool `name:"updateIpAddress" description:"Updates an ip address"`
|
||||
ID *UUID `json:"id" doc:"the id of the public ip address to update"`
|
||||
CustomID *UUID `json:"customid,omitempty" doc:"an optional field, in case you want to set a custom id to the resource. Allowed to Root Admins only"`
|
||||
_ bool `name:"updateIpAddress" description:"Updates an ip address"`
|
||||
}
|
||||
|
||||
func (UpdateIPAddress) response() interface{} {
|
||||
// Response returns the struct to unmarshal
|
||||
func (UpdateIPAddress) Response() interface{} {
|
||||
return new(AsyncJobResult)
|
||||
}
|
||||
|
||||
func (UpdateIPAddress) asyncResponse() interface{} {
|
||||
// AsyncResponse returns the struct to unmarshal the async job
|
||||
func (UpdateIPAddress) AsyncResponse() interface{} {
|
||||
return new(IPAddress)
|
||||
}
|
||||
|
||||
//go:generate go run generate/main.go -interface=Listable ListPublicIPAddresses
|
||||
|
||||
// ListPublicIPAddresses represents a search for public IP addresses
|
||||
type ListPublicIPAddresses struct {
|
||||
Account string `json:"account,omitempty" doc:"list resources by account. Must be used with the domainId parameter."`
|
||||
AllocatedOnly *bool `json:"allocatedonly,omitempty" doc:"limits search results to allocated public IP addresses"`
|
||||
AssociatedNetworkID *UUID `json:"associatednetworkid,omitempty" doc:"lists all public IP addresses associated to the network specified"`
|
||||
DomainID *UUID `json:"domainid,omitempty" doc:"list only resources belonging to the domain specified"`
|
||||
ForDisplay *bool `json:"fordisplay,omitempty" doc:"list resources by display flag; only ROOT admin is eligible to pass this parameter"`
|
||||
ForLoadBalancing *bool `json:"forloadbalancing,omitempty" doc:"list only ips used for load balancing"`
|
||||
ForVirtualNetwork *bool `json:"forvirtualnetwork,omitempty" doc:"the virtual network for the IP address"`
|
||||
ID *UUID `json:"id,omitempty" doc:"lists ip address by id"`
|
||||
IPAddress net.IP `json:"ipaddress,omitempty" doc:"lists the specified IP address"`
|
||||
IsElastic *bool `json:"iselastic,omitempty" doc:"list only elastic ip addresses"`
|
||||
IsRecursive *bool `json:"isrecursive,omitempty" doc:"defaults to false, but if true, lists all resources from the parent specified by the domainId till leaves."`
|
||||
IsSourceNat *bool `json:"issourcenat,omitempty" doc:"list only source nat ip addresses"`
|
||||
IsStaticNat *bool `json:"isstaticnat,omitempty" doc:"list only static nat ip addresses"`
|
||||
Keyword string `json:"keyword,omitempty" doc:"List by keyword"`
|
||||
ListAll *bool `json:"listall,omitempty" doc:"If set to false, list only resources belonging to the command's caller; if set to true - list resources that the caller is authorized to see. Default value is false"`
|
||||
Page int `json:"page,omitempty"`
|
||||
PageSize int `json:"pagesize,omitempty"`
|
||||
PhysicalNetworkID *UUID `json:"physicalnetworkid,omitempty" doc:"lists all public IP addresses by physical network id"`
|
||||
@@ -165,31 +154,3 @@ type ListPublicIPAddressesResponse struct {
|
||||
Count int `json:"count"`
|
||||
PublicIPAddress []IPAddress `json:"publicipaddress"`
|
||||
}
|
||||
|
||||
func (ListPublicIPAddresses) response() interface{} {
|
||||
return new(ListPublicIPAddressesResponse)
|
||||
}
|
||||
|
||||
// SetPage sets the current page
|
||||
func (ls *ListPublicIPAddresses) SetPage(page int) {
|
||||
ls.Page = page
|
||||
}
|
||||
|
||||
// SetPageSize sets the page size
|
||||
func (ls *ListPublicIPAddresses) SetPageSize(pageSize int) {
|
||||
ls.PageSize = pageSize
|
||||
}
|
||||
|
||||
func (ListPublicIPAddresses) each(resp interface{}, callback IterateItemFunc) {
|
||||
ips, ok := resp.(*ListPublicIPAddressesResponse)
|
||||
if !ok {
|
||||
callback(nil, fmt.Errorf("wrong type. ListPublicIPAddressesResponse expected, got %T", resp))
|
||||
return
|
||||
}
|
||||
|
||||
for i := range ips.PublicIPAddress {
|
||||
if !callback(&ips.PublicIPAddress[i], nil) {
|
||||
break
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
106
vendor/github.com/exoscale/egoscale/affinity_groups.go
generated
vendored
106
vendor/github.com/exoscale/egoscale/affinity_groups.go
generated
vendored
@@ -11,14 +11,12 @@ import (
|
||||
// Affinity and Anti-Affinity groups provide a way to influence where VMs should run.
|
||||
// See: http://docs.cloudstack.apache.org/projects/cloudstack-administration/en/stable/virtual_machines.html#affinity-groups
|
||||
type AffinityGroup struct {
|
||||
Account string `json:"account,omitempty" doc:"the account owning the affinity group"`
|
||||
Description string `json:"description,omitempty" doc:"the description of the affinity group"`
|
||||
Domain string `json:"domain,omitempty" doc:"the domain name of the affinity group"`
|
||||
DomainID *UUID `json:"domainid,omitempty" doc:"the domain ID of the affinity group"`
|
||||
ID *UUID `json:"id,omitempty" doc:"the ID of the affinity group"`
|
||||
Name string `json:"name,omitempty" doc:"the name of the affinity group"`
|
||||
Type string `json:"type,omitempty" doc:"the type of the affinity group"`
|
||||
VirtualMachineIDs []string `json:"virtualmachineIds,omitempty" doc:"virtual machine Ids associated with this affinity group"`
|
||||
Account string `json:"account,omitempty" doc:"the account owning the affinity group"`
|
||||
Description string `json:"description,omitempty" doc:"the description of the affinity group"`
|
||||
ID *UUID `json:"id,omitempty" doc:"the ID of the affinity group"`
|
||||
Name string `json:"name,omitempty" doc:"the name of the affinity group"`
|
||||
Type string `json:"type,omitempty" doc:"the type of the affinity group"`
|
||||
VirtualMachineIDs []UUID `json:"virtualmachineIds,omitempty" doc:"virtual machine Ids associated with this affinity group"`
|
||||
}
|
||||
|
||||
// ListRequest builds the ListAffinityGroups request
|
||||
@@ -35,10 +33,7 @@ func (ag AffinityGroup) Delete(ctx context.Context, client *Client) error {
|
||||
return fmt.Errorf("an Affinity Group may only be deleted using ID or Name")
|
||||
}
|
||||
|
||||
req := &DeleteAffinityGroup{
|
||||
Account: ag.Account,
|
||||
DomainID: ag.DomainID,
|
||||
}
|
||||
req := &DeleteAffinityGroup{}
|
||||
|
||||
if ag.ID != nil {
|
||||
req.ID = ag.ID
|
||||
@@ -56,19 +51,27 @@ type AffinityGroupType struct {
|
||||
|
||||
// CreateAffinityGroup (Async) represents a new (anti-)affinity group
|
||||
type CreateAffinityGroup struct {
|
||||
Account string `json:"account,omitempty" doc:"an account for the affinity group. Must be used with domainId."`
|
||||
Description string `json:"description,omitempty" doc:"optional description of the affinity group"`
|
||||
DomainID *UUID `json:"domainid,omitempty" doc:"domainId of the account owning the affinity group"`
|
||||
Name string `json:"name" doc:"name of the affinity group"`
|
||||
Description string `json:"description,omitempty" doc:"Optional description of the affinity group"`
|
||||
Name string `json:"name,omitempty" doc:"Name of the affinity group"`
|
||||
Type string `json:"type" doc:"Type of the affinity group from the available affinity/anti-affinity group types"`
|
||||
_ bool `name:"createAffinityGroup" description:"Creates an affinity/anti-affinity group"`
|
||||
}
|
||||
|
||||
func (CreateAffinityGroup) response() interface{} {
|
||||
func (req CreateAffinityGroup) onBeforeSend(params url.Values) error {
|
||||
// Name must be set, but can be empty
|
||||
if req.Name == "" {
|
||||
params.Set("name", "")
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// Response returns the struct to unmarshal
|
||||
func (CreateAffinityGroup) Response() interface{} {
|
||||
return new(AsyncJobResult)
|
||||
}
|
||||
|
||||
func (CreateAffinityGroup) asyncResponse() interface{} {
|
||||
// AsyncResponse returns the struct to unmarshal the async job
|
||||
func (CreateAffinityGroup) AsyncResponse() interface{} {
|
||||
return new(AffinityGroup)
|
||||
}
|
||||
|
||||
@@ -88,75 +91,47 @@ func (req UpdateVMAffinityGroup) onBeforeSend(params url.Values) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (UpdateVMAffinityGroup) response() interface{} {
|
||||
// Response returns the struct to unmarshal
|
||||
func (UpdateVMAffinityGroup) Response() interface{} {
|
||||
return new(AsyncJobResult)
|
||||
}
|
||||
|
||||
func (UpdateVMAffinityGroup) asyncResponse() interface{} {
|
||||
// AsyncResponse returns the struct to unmarshal the async job
|
||||
func (UpdateVMAffinityGroup) AsyncResponse() interface{} {
|
||||
return new(VirtualMachine)
|
||||
}
|
||||
|
||||
// DeleteAffinityGroup (Async) represents an (anti-)affinity group to be deleted
|
||||
type DeleteAffinityGroup struct {
|
||||
Account string `json:"account,omitempty" doc:"the account of the affinity group. Must be specified with domain ID"`
|
||||
DomainID *UUID `json:"domainid,omitempty" doc:"the domain ID of account owning the affinity group"`
|
||||
ID *UUID `json:"id,omitempty" doc:"The ID of the affinity group. Mutually exclusive with name parameter"`
|
||||
Name string `json:"name,omitempty" doc:"The name of the affinity group. Mutually exclusive with ID parameter"`
|
||||
_ bool `name:"deleteAffinityGroup" description:"Deletes affinity group"`
|
||||
ID *UUID `json:"id,omitempty" doc:"The ID of the affinity group. Mutually exclusive with name parameter"`
|
||||
Name string `json:"name,omitempty" doc:"The name of the affinity group. Mutually exclusive with ID parameter"`
|
||||
_ bool `name:"deleteAffinityGroup" description:"Deletes affinity group"`
|
||||
}
|
||||
|
||||
func (DeleteAffinityGroup) response() interface{} {
|
||||
// Response returns the struct to unmarshal
|
||||
func (DeleteAffinityGroup) Response() interface{} {
|
||||
return new(AsyncJobResult)
|
||||
}
|
||||
|
||||
func (DeleteAffinityGroup) asyncResponse() interface{} {
|
||||
return new(booleanResponse)
|
||||
// AsyncResponse returns the struct to unmarshal the async job
|
||||
func (DeleteAffinityGroup) AsyncResponse() interface{} {
|
||||
return new(BooleanResponse)
|
||||
}
|
||||
|
||||
//go:generate go run generate/main.go -interface=Listable ListAffinityGroups
|
||||
|
||||
// ListAffinityGroups represents an (anti-)affinity groups search
|
||||
type ListAffinityGroups struct {
|
||||
Account string `json:"account,omitempty" doc:"list resources by account. Must be used with the domainId parameter."`
|
||||
DomainID *UUID `json:"domainid,omitempty" doc:"list only resources belonging to the domain specified"`
|
||||
ID *UUID `json:"id,omitempty" doc:"list the affinity group by the ID provided"`
|
||||
IsRecursive *bool `json:"isrecursive,omitempty" doc:"defaults to false, but if true, lists all resources from the parent specified by the domainId till leaves."`
|
||||
ID *UUID `json:"id,omitempty" doc:"List the affinity group by the ID provided"`
|
||||
Keyword string `json:"keyword,omitempty" doc:"List by keyword"`
|
||||
ListAll *bool `json:"listall,omitempty" doc:"If set to false, list only resources belonging to the command's caller; if set to true - list resources that the caller is authorized to see. Default value is false"`
|
||||
Name string `json:"name,omitempty" doc:"lists affinity groups by name"`
|
||||
Name string `json:"name,omitempty" doc:"Lists affinity groups by name"`
|
||||
Page int `json:"page,omitempty"`
|
||||
PageSize int `json:"pagesize,omitempty"`
|
||||
Type string `json:"type,omitempty" doc:"lists affinity groups by type"`
|
||||
VirtualMachineID *UUID `json:"virtualmachineid,omitempty" doc:"lists affinity groups by virtual machine ID"`
|
||||
Type string `json:"type,omitempty" doc:"Lists affinity groups by type"`
|
||||
VirtualMachineID *UUID `json:"virtualmachineid,omitempty" doc:"Lists affinity groups by virtual machine ID"`
|
||||
_ bool `name:"listAffinityGroups" description:"Lists affinity groups"`
|
||||
}
|
||||
|
||||
func (ListAffinityGroups) response() interface{} {
|
||||
return new(ListAffinityGroupsResponse)
|
||||
}
|
||||
|
||||
// SetPage sets the current page
|
||||
func (ls *ListAffinityGroups) SetPage(page int) {
|
||||
ls.Page = page
|
||||
}
|
||||
|
||||
// SetPageSize sets the page size
|
||||
func (ls *ListAffinityGroups) SetPageSize(pageSize int) {
|
||||
ls.PageSize = pageSize
|
||||
}
|
||||
|
||||
func (ListAffinityGroups) each(resp interface{}, callback IterateItemFunc) {
|
||||
vms, ok := resp.(*ListAffinityGroupsResponse)
|
||||
if !ok {
|
||||
callback(nil, fmt.Errorf("wrong type. ListAffinityGroupsResponse expected, got %T", resp))
|
||||
return
|
||||
}
|
||||
|
||||
for i := range vms.AffinityGroup {
|
||||
if !callback(&vms.AffinityGroup[i], nil) {
|
||||
break
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// ListAffinityGroupsResponse represents a list of (anti-)affinity groups
|
||||
type ListAffinityGroupsResponse struct {
|
||||
Count int `json:"count"`
|
||||
@@ -171,7 +146,8 @@ type ListAffinityGroupTypes struct {
|
||||
_ bool `name:"listAffinityGroupTypes" description:"Lists affinity group types available"`
|
||||
}
|
||||
|
||||
func (ListAffinityGroupTypes) response() interface{} {
|
||||
// Response returns the struct to unmarshal
|
||||
func (ListAffinityGroupTypes) Response() interface{} {
|
||||
return new(ListAffinityGroupTypesResponse)
|
||||
}
|
||||
|
||||
|
||||
43
vendor/github.com/exoscale/egoscale/affinitygroups_response.go
generated
vendored
Normal file
43
vendor/github.com/exoscale/egoscale/affinitygroups_response.go
generated
vendored
Normal file
@@ -0,0 +1,43 @@
|
||||
// code generated; DO NOT EDIT.
|
||||
|
||||
package egoscale
|
||||
|
||||
import "fmt"
|
||||
|
||||
// Response returns the struct to unmarshal
|
||||
func (ListAffinityGroups) Response() interface{} {
|
||||
return new(ListAffinityGroupsResponse)
|
||||
}
|
||||
|
||||
// ListRequest returns itself
|
||||
func (ls *ListAffinityGroups) ListRequest() (ListCommand, error) {
|
||||
if ls == nil {
|
||||
return nil, fmt.Errorf("%T cannot be nil", ls)
|
||||
}
|
||||
return ls, nil
|
||||
}
|
||||
|
||||
// SetPage sets the current apge
|
||||
func (ls *ListAffinityGroups) SetPage(page int) {
|
||||
ls.Page = page
|
||||
}
|
||||
|
||||
// SetPageSize sets the page size
|
||||
func (ls *ListAffinityGroups) SetPageSize(pageSize int) {
|
||||
ls.PageSize = pageSize
|
||||
}
|
||||
|
||||
// Each triggers the callback for each, valid answer or any non 404 issue
|
||||
func (ListAffinityGroups) Each(resp interface{}, callback IterateItemFunc) {
|
||||
items, ok := resp.(*ListAffinityGroupsResponse)
|
||||
if !ok {
|
||||
callback(nil, fmt.Errorf("wrong type, ListAffinityGroupsResponse was expected, got %T", resp))
|
||||
return
|
||||
}
|
||||
|
||||
for i := range items.AffinityGroup {
|
||||
if !callback(&items.AffinityGroup[i], nil) {
|
||||
break
|
||||
}
|
||||
}
|
||||
}
|
||||
5
vendor/github.com/exoscale/egoscale/apis.go
generated
vendored
5
vendor/github.com/exoscale/egoscale/apis.go
generated
vendored
@@ -3,7 +3,7 @@ package egoscale
|
||||
// API represents an API service
|
||||
type API struct {
|
||||
Description string `json:"description,omitempty" doc:"description of the api"`
|
||||
IsAsync bool `json:"isasync,omitempty" doc:"true if api is asynchronous"`
|
||||
IsAsync bool `json:"isasync" doc:"true if api is asynchronous"`
|
||||
Name string `json:"name,omitempty" doc:"the name of the api command"`
|
||||
Related string `json:"related,omitempty" doc:"comma separated related apis"`
|
||||
Since string `json:"since,omitempty" doc:"version of CloudStack the api was introduced in"`
|
||||
@@ -42,6 +42,7 @@ type ListAPIsResponse struct {
|
||||
API []API `json:"api"`
|
||||
}
|
||||
|
||||
func (*ListAPIs) response() interface{} {
|
||||
// Response returns the struct to unmarshal
|
||||
func (*ListAPIs) Response() interface{} {
|
||||
return new(ListAPIsResponse)
|
||||
}
|
||||
|
||||
39
vendor/github.com/exoscale/egoscale/async_jobs.go
generated
vendored
39
vendor/github.com/exoscale/egoscale/async_jobs.go
generated
vendored
@@ -10,7 +10,7 @@ type AsyncJobResult struct {
|
||||
AccountID *UUID `json:"accountid,omitempty" doc:"the account that executed the async command"`
|
||||
Cmd string `json:"cmd,omitempty" doc:"the async command executed"`
|
||||
Created string `json:"created,omitempty" doc:"the created date of the job"`
|
||||
JobID *UUID `json:"jobid,omitempty" doc:"extra field for the initial async call"`
|
||||
JobID *UUID `json:"jobid" doc:"extra field for the initial async call"`
|
||||
JobInstanceID *UUID `json:"jobinstanceid,omitempty" doc:"the unique ID of the instance/entity object related to the job"`
|
||||
JobInstanceType string `json:"jobinstancetype,omitempty" doc:"the instance/entity object related to the job"`
|
||||
JobProcStatus int `json:"jobprocstatus,omitempty" doc:"the progress information of the PENDING job"`
|
||||
@@ -21,6 +21,16 @@ type AsyncJobResult struct {
|
||||
UserID *UUID `json:"userid,omitempty" doc:"the user that executed the async command"`
|
||||
}
|
||||
|
||||
// ListRequest buils the (empty) ListAsyncJobs request
|
||||
func (a AsyncJobResult) ListRequest() (ListCommand, error) {
|
||||
req := &ListAsyncJobs{
|
||||
StartDate: a.Created,
|
||||
}
|
||||
|
||||
return req, nil
|
||||
}
|
||||
|
||||
// Error builds an error message from the result
|
||||
func (a AsyncJobResult) Error() error {
|
||||
r := new(ErrorResponse)
|
||||
if e := json.Unmarshal(*a.JobResult, r); e != nil {
|
||||
@@ -35,31 +45,26 @@ type QueryAsyncJobResult struct {
|
||||
_ bool `name:"queryAsyncJobResult" description:"Retrieves the current status of asynchronous job."`
|
||||
}
|
||||
|
||||
func (QueryAsyncJobResult) response() interface{} {
|
||||
// Response returns the struct to unmarshal
|
||||
func (QueryAsyncJobResult) Response() interface{} {
|
||||
return new(AsyncJobResult)
|
||||
}
|
||||
|
||||
//go:generate go run generate/main.go -interface=Listable ListAsyncJobs
|
||||
|
||||
// ListAsyncJobs list the asynchronous jobs
|
||||
type ListAsyncJobs struct {
|
||||
Account string `json:"account,omitempty" doc:"list resources by account. Must be used with the domainId parameter."`
|
||||
DomainID *UUID `json:"domainid,omitempty" doc:"list only resources belonging to the domain specified"`
|
||||
IsRecursive *bool `json:"isrecursive,omitempty" doc:"defaults to false, but if true, lists all resources from the parent specified by the domainId till leaves."`
|
||||
Keyword string `json:"keyword,omitempty" doc:"List by keyword"`
|
||||
ListAll *bool `json:"listall,omitempty" doc:"If set to false, list only resources belonging to the command's caller; if set to true - list resources that the caller is authorized to see. Default value is false"`
|
||||
Page int `json:"page,omitempty"`
|
||||
PageSize int `json:"pagesize,omitempty"`
|
||||
StartDate string `json:"startdate,omitempty" doc:"the start date of the async job"`
|
||||
_ bool `name:"listAsyncJobs" description:"Lists all pending asynchronous jobs for the account."`
|
||||
Keyword string `json:"keyword,omitempty" doc:"List by keyword"`
|
||||
Page int `json:"page,omitempty"`
|
||||
PageSize int `json:"pagesize,omitempty"`
|
||||
StartDate string `json:"startdate,omitempty" doc:"the start date of the async job"`
|
||||
_ bool `name:"listAsyncJobs" description:"Lists all pending asynchronous jobs for the account."`
|
||||
}
|
||||
|
||||
// ListAsyncJobsResponse represents a list of job results
|
||||
type ListAsyncJobsResponse struct {
|
||||
Count int `json:"count"`
|
||||
AsyncJobs []AsyncJobResult `json:"asyncjobs"`
|
||||
}
|
||||
|
||||
func (ListAsyncJobs) response() interface{} {
|
||||
return new(ListAsyncJobsResponse)
|
||||
Count int `json:"count"`
|
||||
AsyncJob []AsyncJobResult `json:"asyncjobs"`
|
||||
}
|
||||
|
||||
// Result unmarshals the result of an AsyncJobResult into the given interface
|
||||
|
||||
43
vendor/github.com/exoscale/egoscale/asyncjobs_response.go
generated
vendored
Normal file
43
vendor/github.com/exoscale/egoscale/asyncjobs_response.go
generated
vendored
Normal file
@@ -0,0 +1,43 @@
|
||||
// code generated; DO NOT EDIT.
|
||||
|
||||
package egoscale
|
||||
|
||||
import "fmt"
|
||||
|
||||
// Response returns the struct to unmarshal
|
||||
func (ListAsyncJobs) Response() interface{} {
|
||||
return new(ListAsyncJobsResponse)
|
||||
}
|
||||
|
||||
// ListRequest returns itself
|
||||
func (ls *ListAsyncJobs) ListRequest() (ListCommand, error) {
|
||||
if ls == nil {
|
||||
return nil, fmt.Errorf("%T cannot be nil", ls)
|
||||
}
|
||||
return ls, nil
|
||||
}
|
||||
|
||||
// SetPage sets the current apge
|
||||
func (ls *ListAsyncJobs) SetPage(page int) {
|
||||
ls.Page = page
|
||||
}
|
||||
|
||||
// SetPageSize sets the page size
|
||||
func (ls *ListAsyncJobs) SetPageSize(pageSize int) {
|
||||
ls.PageSize = pageSize
|
||||
}
|
||||
|
||||
// Each triggers the callback for each, valid answer or any non 404 issue
|
||||
func (ListAsyncJobs) Each(resp interface{}, callback IterateItemFunc) {
|
||||
items, ok := resp.(*ListAsyncJobsResponse)
|
||||
if !ok {
|
||||
callback(nil, fmt.Errorf("wrong type, ListAsyncJobsResponse was expected, got %T", resp))
|
||||
return
|
||||
}
|
||||
|
||||
for i := range items.AsyncJob {
|
||||
if !callback(&items.AsyncJob[i], nil) {
|
||||
break
|
||||
}
|
||||
}
|
||||
}
|
||||
117
vendor/github.com/exoscale/egoscale/client.go
generated
vendored
117
vendor/github.com/exoscale/egoscale/client.go
generated
vendored
@@ -13,11 +13,11 @@ import (
|
||||
"time"
|
||||
)
|
||||
|
||||
// Taggable represents a resource which can have tags attached
|
||||
// Taggable represents a resource to which tags can be attached
|
||||
//
|
||||
// This is a helper to fill the resourcetype of a CreateTags call
|
||||
type Taggable interface {
|
||||
// CloudStack resource type of the Taggable type
|
||||
// ResourceType is the name of the Taggable type
|
||||
ResourceType() string
|
||||
}
|
||||
|
||||
@@ -33,16 +33,11 @@ type Listable interface {
|
||||
ListRequest() (ListCommand, error)
|
||||
}
|
||||
|
||||
// Gettable represents an Interface that can be "Get" by the client
|
||||
type Gettable interface {
|
||||
Listable
|
||||
}
|
||||
|
||||
// Client represents the CloudStack API client
|
||||
// Client represents the API client
|
||||
type Client struct {
|
||||
// HTTPClient holds the HTTP client
|
||||
HTTPClient *http.Client
|
||||
// Endpoints is CloudStack API
|
||||
// Endpoint is the HTTP URL
|
||||
Endpoint string
|
||||
// APIKey is the API identifier
|
||||
APIKey string
|
||||
@@ -52,13 +47,15 @@ type Client struct {
|
||||
PageSize int
|
||||
// Timeout represents the default timeout for the async requests
|
||||
Timeout time.Duration
|
||||
// Expiration representation how long a signed payload may be used
|
||||
Expiration time.Duration
|
||||
// RetryStrategy represents the waiting strategy for polling the async requests
|
||||
RetryStrategy RetryStrategyFunc
|
||||
// Logger contains any log, plug your own
|
||||
Logger *log.Logger
|
||||
}
|
||||
|
||||
// RetryStrategyFunc represents a how much time to wait between two calls to CloudStack
|
||||
// RetryStrategyFunc represents a how much time to wait between two calls to the API
|
||||
type RetryStrategyFunc func(int64) time.Duration
|
||||
|
||||
// IterateItemFunc represents the callback to iterate a list of results, if false stops
|
||||
@@ -67,11 +64,12 @@ type IterateItemFunc func(interface{}, error) bool
|
||||
// WaitAsyncJobResultFunc represents the callback to wait a results of an async request, if false stops
|
||||
type WaitAsyncJobResultFunc func(*AsyncJobResult, error) bool
|
||||
|
||||
// NewClient creates a CloudStack API client with default timeout (60)
|
||||
// NewClient creates an API client with default timeout (60)
|
||||
//
|
||||
// Timeout is set to both the HTTP client and the client itself.
|
||||
func NewClient(endpoint, apiKey, apiSecret string) *Client {
|
||||
timeout := 60 * time.Second
|
||||
expiration := 10 * time.Minute
|
||||
|
||||
httpClient := &http.Client{
|
||||
Transport: http.DefaultTransport,
|
||||
@@ -84,6 +82,7 @@ func NewClient(endpoint, apiKey, apiSecret string) *Client {
|
||||
apiSecret: apiSecret,
|
||||
PageSize: 50,
|
||||
Timeout: timeout,
|
||||
Expiration: expiration,
|
||||
RetryStrategy: MonotonicRetryStrategyFunc(2),
|
||||
Logger: log.New(ioutil.Discard, "", 0),
|
||||
}
|
||||
@@ -97,45 +96,52 @@ func NewClient(endpoint, apiKey, apiSecret string) *Client {
|
||||
}
|
||||
|
||||
// Get populates the given resource or fails
|
||||
func (client *Client) Get(g Gettable) error {
|
||||
func (client *Client) Get(ls Listable) (interface{}, error) {
|
||||
ctx, cancel := context.WithTimeout(context.Background(), client.Timeout)
|
||||
defer cancel()
|
||||
|
||||
return client.GetWithContext(ctx, g)
|
||||
return client.GetWithContext(ctx, ls)
|
||||
}
|
||||
|
||||
// GetWithContext populates the given resource or fails
|
||||
func (client *Client) GetWithContext(ctx context.Context, g Gettable) error {
|
||||
gs, err := client.ListWithContext(ctx, g)
|
||||
func (client *Client) GetWithContext(ctx context.Context, ls Listable) (interface{}, error) {
|
||||
gs, err := client.ListWithContext(ctx, ls)
|
||||
if err != nil {
|
||||
return err
|
||||
return nil, err
|
||||
}
|
||||
|
||||
count := len(gs)
|
||||
if count != 1 {
|
||||
req, err := g.ListRequest()
|
||||
req, err := ls.ListRequest()
|
||||
if err != nil {
|
||||
return err
|
||||
return nil, err
|
||||
}
|
||||
params, err := client.Payload(req)
|
||||
if err != nil {
|
||||
return err
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// removing sensitive/useless informations
|
||||
params.Del("expires")
|
||||
params.Del("response")
|
||||
params.Del("signature")
|
||||
params.Del("signatureversion")
|
||||
|
||||
// formatting the query string nicely
|
||||
payload := params.Encode()
|
||||
payload = strings.Replace(payload, "&", ", ", -1)
|
||||
|
||||
if count == 0 {
|
||||
return &ErrorResponse{
|
||||
ErrorCode: ParamError,
|
||||
ErrorText: fmt.Sprintf("not found, query: %s", payload),
|
||||
return nil, &ErrorResponse{
|
||||
CSErrorCode: ServerAPIException,
|
||||
ErrorCode: ParamError,
|
||||
ErrorText: fmt.Sprintf("not found, query: %s", payload),
|
||||
}
|
||||
}
|
||||
return fmt.Errorf("more than one element found: %s", payload)
|
||||
return nil, fmt.Errorf("more than one element found: %s", payload)
|
||||
}
|
||||
|
||||
return Copy(g, gs[0])
|
||||
return gs[0], nil
|
||||
}
|
||||
|
||||
// Delete removes the given resource of fails
|
||||
@@ -160,18 +166,25 @@ func (client *Client) List(g Listable) ([]interface{}, error) {
|
||||
}
|
||||
|
||||
// ListWithContext lists the given resources (and paginate till the end)
|
||||
func (client *Client) ListWithContext(ctx context.Context, g Listable) ([]interface{}, error) {
|
||||
s := make([]interface{}, 0)
|
||||
func (client *Client) ListWithContext(ctx context.Context, g Listable) (s []interface{}, err error) {
|
||||
s = make([]interface{}, 0)
|
||||
|
||||
if g == nil || reflect.ValueOf(g).IsNil() {
|
||||
return s, fmt.Errorf("g Listable shouldn't be nil, got %#v", g)
|
||||
defer func() {
|
||||
if e := recover(); e != nil {
|
||||
if g == nil || reflect.ValueOf(g).IsNil() {
|
||||
err = fmt.Errorf("g Listable shouldn't be nil, got %#v", g)
|
||||
return
|
||||
}
|
||||
|
||||
panic(e)
|
||||
}
|
||||
}()
|
||||
|
||||
req, e := g.ListRequest()
|
||||
if e != nil {
|
||||
err = e
|
||||
return
|
||||
}
|
||||
|
||||
req, err := g.ListRequest()
|
||||
if err != nil {
|
||||
return s, err
|
||||
}
|
||||
|
||||
client.PaginateWithContext(ctx, req, func(item interface{}, e error) bool {
|
||||
if item != nil {
|
||||
s = append(s, item)
|
||||
@@ -181,7 +194,7 @@ func (client *Client) ListWithContext(ctx context.Context, g Listable) ([]interf
|
||||
return false
|
||||
})
|
||||
|
||||
return s, err
|
||||
return
|
||||
}
|
||||
|
||||
// AsyncListWithContext lists the given resources (and paginate till the end)
|
||||
@@ -227,7 +240,6 @@ func (client *Client) AsyncListWithContext(ctx context.Context, g Listable) (<-c
|
||||
errChan <- err
|
||||
return
|
||||
}
|
||||
|
||||
client.PaginateWithContext(ctx, req, func(item interface{}, e error) bool {
|
||||
if item != nil {
|
||||
outChan <- item
|
||||
@@ -242,15 +254,21 @@ func (client *Client) AsyncListWithContext(ctx context.Context, g Listable) (<-c
|
||||
}
|
||||
|
||||
// Paginate runs the ListCommand and paginates
|
||||
func (client *Client) Paginate(req ListCommand, callback IterateItemFunc) {
|
||||
func (client *Client) Paginate(g Listable, callback IterateItemFunc) {
|
||||
ctx, cancel := context.WithTimeout(context.Background(), client.Timeout)
|
||||
defer cancel()
|
||||
|
||||
client.PaginateWithContext(ctx, req, callback)
|
||||
client.PaginateWithContext(ctx, g, callback)
|
||||
}
|
||||
|
||||
// PaginateWithContext runs the ListCommand as long as the ctx is valid
|
||||
func (client *Client) PaginateWithContext(ctx context.Context, req ListCommand, callback IterateItemFunc) {
|
||||
func (client *Client) PaginateWithContext(ctx context.Context, g Listable, callback IterateItemFunc) {
|
||||
req, err := g.ListRequest()
|
||||
if err != nil {
|
||||
callback(nil, err)
|
||||
return
|
||||
}
|
||||
|
||||
pageSize := client.PageSize
|
||||
|
||||
page := 1
|
||||
@@ -260,13 +278,18 @@ func (client *Client) PaginateWithContext(ctx context.Context, req ListCommand,
|
||||
req.SetPageSize(pageSize)
|
||||
resp, err := client.RequestWithContext(ctx, req)
|
||||
if err != nil {
|
||||
// in case of 431, the response is knowingly empty
|
||||
if errResponse, ok := err.(*ErrorResponse); ok && page == 1 && errResponse.ErrorCode == ParamError {
|
||||
break
|
||||
}
|
||||
|
||||
callback(nil, err)
|
||||
break
|
||||
}
|
||||
|
||||
size := 0
|
||||
didErr := false
|
||||
req.each(resp, func(element interface{}, err error) bool {
|
||||
req.Each(resp, func(element interface{}, err error) bool {
|
||||
// If the context was cancelled, kill it in flight
|
||||
if e := ctx.Err(); e != nil {
|
||||
element = nil
|
||||
@@ -290,10 +313,12 @@ func (client *Client) PaginateWithContext(ctx context.Context, req ListCommand,
|
||||
}
|
||||
}
|
||||
|
||||
// APIName returns the CloudStack name of the given command
|
||||
// APIName returns the name of the given command
|
||||
func (client *Client) APIName(command Command) string {
|
||||
// This is due to a limitation of Go<=1.7
|
||||
if _, ok := command.(*AuthorizeSecurityGroupEgress); ok {
|
||||
_, ok := command.(*AuthorizeSecurityGroupEgress)
|
||||
_, okPtr := command.(AuthorizeSecurityGroupEgress)
|
||||
if ok || okPtr {
|
||||
return "authorizeSecurityGroupEgress"
|
||||
}
|
||||
|
||||
@@ -304,7 +329,7 @@ func (client *Client) APIName(command Command) string {
|
||||
return info.Name
|
||||
}
|
||||
|
||||
// APIDescription returns the description of the given CloudStack command
|
||||
// APIDescription returns the description of the given command
|
||||
func (client *Client) APIDescription(command Command) string {
|
||||
info, err := info(command)
|
||||
if err != nil {
|
||||
@@ -315,11 +340,11 @@ func (client *Client) APIDescription(command Command) string {
|
||||
|
||||
// Response returns the response structure of the given command
|
||||
func (client *Client) Response(command Command) interface{} {
|
||||
switch command.(type) {
|
||||
switch c := command.(type) {
|
||||
case AsyncCommand:
|
||||
return (command.(AsyncCommand)).asyncResponse()
|
||||
return c.AsyncResponse()
|
||||
default:
|
||||
return command.response()
|
||||
return command.Response()
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
37
vendor/github.com/exoscale/egoscale/copier.go
generated
vendored
37
vendor/github.com/exoscale/egoscale/copier.go
generated
vendored
@@ -1,37 +0,0 @@
|
||||
package egoscale
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"reflect"
|
||||
)
|
||||
|
||||
// Copy copies the value from from into to. The type of "from" must be convertible into the type of "to".
|
||||
func Copy(to, from interface{}) error {
|
||||
tt := reflect.TypeOf(to)
|
||||
tv := reflect.ValueOf(to)
|
||||
|
||||
ft := reflect.TypeOf(from)
|
||||
fv := reflect.ValueOf(from)
|
||||
|
||||
if tt.Kind() != reflect.Ptr {
|
||||
return fmt.Errorf("must copy to a pointer, got %q", tt.Name())
|
||||
}
|
||||
|
||||
tt = tt.Elem()
|
||||
tv = tv.Elem()
|
||||
|
||||
for {
|
||||
if ft.ConvertibleTo(tt) {
|
||||
break
|
||||
}
|
||||
if ft.Kind() == reflect.Ptr {
|
||||
ft = ft.Elem()
|
||||
fv = fv.Elem()
|
||||
} else {
|
||||
return fmt.Errorf("cannot convert %q into %q", tt.Name(), ft.Name())
|
||||
}
|
||||
}
|
||||
|
||||
tv.Set(fv.Convert(tt))
|
||||
return nil
|
||||
}
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user