forked from SW/traefik
Compare commits
62 Commits
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
0b2a6cecd1 | ||
|
|
fee89273a3 | ||
|
|
463e2285d6 | ||
|
|
c65277a6f0 | ||
|
|
bb14c3e812 | ||
|
|
85580dde3c | ||
|
|
4bb6d1bf4f | ||
|
|
459cabca8e | ||
|
|
dcca37fe29 | ||
|
|
a610f0b2a1 | ||
|
|
ded285be29 | ||
|
|
4b31d3306b | ||
|
|
5a6652fc20 | ||
|
|
91b0e2e4dc | ||
|
|
554bceb75b | ||
|
|
ce76212605 | ||
|
|
d474c87cde | ||
|
|
862772230c | ||
|
|
09fa1e6546 | ||
|
|
c3125104d9 | ||
|
|
31786f7163 | ||
|
|
d7ed42d614 | ||
|
|
4c709c2fcd | ||
|
|
8dac076711 | ||
|
|
0b039499c6 | ||
|
|
2124975432 | ||
|
|
e4725619ea | ||
|
|
d24f78222c | ||
|
|
3f1925e20e | ||
|
|
a8393faf0a | ||
|
|
4b8ece5b42 | ||
|
|
7574bb9226 | ||
|
|
f68b629469 | ||
|
|
f9e9e11035 | ||
|
|
772c9ca4d5 | ||
|
|
208d0fa471 | ||
|
|
9f72b6d1d5 | ||
|
|
29ef007917 | ||
|
|
590a0d67bb | ||
|
|
74ad83f05a | ||
|
|
d707c8ba93 | ||
|
|
640eb62ca1 | ||
|
|
216710864e | ||
|
|
226f20b626 | ||
|
|
151be83bce | ||
|
|
d1a8c7fa78 | ||
|
|
254dc38c3d | ||
|
|
24d084d7e6 | ||
|
|
df5f530058 | ||
|
|
753d173965 | ||
|
|
ffd1f122de | ||
|
|
f98b57fdf4 | ||
|
|
2d37f08864 | ||
|
|
a7dbcc282c | ||
|
|
f4f62e7fb3 | ||
|
|
4cae8bcb10 | ||
|
|
bee370ec6b | ||
|
|
f1d016b893 | ||
|
|
4defbbe848 | ||
|
|
f397342f16 | ||
|
|
989a59cc29 | ||
|
|
c5b71592c8 |
20
.semaphoreci/golang.sh
Executable file
20
.semaphoreci/golang.sh
Executable file
@@ -0,0 +1,20 @@
|
||||
#!/usr/bin/env bash
|
||||
|
||||
set -e
|
||||
|
||||
curl -O https://dl.google.com/go/go1.14.linux-amd64.tar.gz
|
||||
|
||||
tar -xvf go1.14.linux-amd64.tar.gz
|
||||
rm -rf go1.14.linux-amd64.tar.gz
|
||||
|
||||
sudo mkdir -p /usr/local/golang/1.14/go
|
||||
sudo mv go /usr/local/golang/1.14/
|
||||
|
||||
sudo rm /usr/local/bin/go
|
||||
sudo chmod +x /usr/local/golang/1.14/go/bin/go
|
||||
sudo ln -s /usr/local/golang/1.14/go/bin/go /usr/local/bin/go
|
||||
|
||||
export GOROOT="/usr/local/golang/1.14/go"
|
||||
export GOTOOLDIR="/usr/local/golang/1.14/go/pkg/tool/linux_amd64"
|
||||
|
||||
go version
|
||||
@@ -13,6 +13,7 @@ env:
|
||||
- VERSION: $TRAVIS_TAG
|
||||
- CODENAME: maroilles
|
||||
- N_MAKE_JOBS: 2
|
||||
- DOCS_VERIFY_SKIP: true
|
||||
|
||||
script:
|
||||
- echo "Skipping tests... (Tests are executed on SemaphoreCI)"
|
||||
|
||||
122
CHANGELOG.md
122
CHANGELOG.md
@@ -1,5 +1,127 @@
|
||||
# Change Log
|
||||
|
||||
## [v1.7.25](https://github.com/containous/traefik/tree/v1.7.25) (2020-07-15)
|
||||
[All Commits](https://github.com/containous/traefik/compare/v1.7.24...v1.7.25)
|
||||
|
||||
**Bug fixes:**
|
||||
- **[middleware]** Fix ipv6 handling in redirect middleware ([#6901](https://github.com/containous/traefik/pull/6901) by [rtribotte](https://github.com/rtribotte))
|
||||
- **[provider]** Backport "Same Configuration Check" from master ([#6631](https://github.com/containous/traefik/pull/6631) by [rkojedzinszky](https://github.com/rkojedzinszky))
|
||||
|
||||
**Documentation:**
|
||||
- **[k8s]** Add ingress setup for minikube in docs ([#6552](https://github.com/containous/traefik/pull/6552) by [kobayashi](https://github.com/kobayashi))
|
||||
- Doc improvement on entrypoints whitelist for v1.7 ([#6571](https://github.com/containous/traefik/pull/6571) by [ddtmachado](https://github.com/ddtmachado))
|
||||
|
||||
## [v1.7.24](https://github.com/containous/traefik/tree/v1.7.24) (2020-03-25)
|
||||
[All Commits](https://github.com/containous/traefik/compare/v1.7.23...v1.7.24)
|
||||
|
||||
**Bug fixes:**
|
||||
- **[k8s/ingress]** fix: stickiness annotations support. ([#6576](https://github.com/containous/traefik/pull/6576) by [ldez](https://github.com/ldez))
|
||||
|
||||
## [v1.7.23](https://github.com/containous/traefik/tree/v1.7.23) (2020-03-23)
|
||||
[All Commits](https://github.com/containous/traefik/compare/v1.7.22...v1.7.23)
|
||||
|
||||
**Bug fixes:**
|
||||
- **[consul,consulcatalog,docker,ecs,k8s,marathon,mesos,rancher,sticky-session]** Fix sameSite ([#6537](https://github.com/containous/traefik/pull/6537) by [ldez](https://github.com/ldez))
|
||||
- **[server]** Force http/1.1 for upgrade ([#6553](https://github.com/containous/traefik/pull/6553) by [juliens](https://github.com/juliens))
|
||||
- **[tls]** fix: max TLS version. ([#6531](https://github.com/containous/traefik/pull/6531) by [ldez](https://github.com/ldez))
|
||||
|
||||
## [v1.7.22](https://github.com/containous/traefik/tree/v1.7.22) (2020-03-09)
|
||||
[All Commits](https://github.com/containous/traefik/compare/v1.7.21...v1.7.22)
|
||||
|
||||
**Bug fixes:**
|
||||
- **[provider]** Skip redirection with invalid regex syntax. ([#6446](https://github.com/containous/traefik/pull/6446) by [ldez](https://github.com/ldez))
|
||||
- **[server]** Clear closed hijacked h2c connections ([#6357](https://github.com/containous/traefik/pull/6357) by [omerkay](https://github.com/omerkay))
|
||||
|
||||
**Documentation:**
|
||||
- **[acme]** fix: manual provider code name. ([#6456](https://github.com/containous/traefik/pull/6456) by [ldez](https://github.com/ldez))
|
||||
- **[docker]** Fix typo in user guide. ([#6386](https://github.com/containous/traefik/pull/6386) by [ldez](https://github.com/ldez))
|
||||
- **[k8s]** Complete TLS example for Kubernetes Ingress in user guide ([#6457](https://github.com/containous/traefik/pull/6457) by [rtribotte](https://github.com/rtribotte))
|
||||
- **[k8s]** Updated rbac and Daemonset example bloc ([#6375](https://github.com/containous/traefik/pull/6375) by [bjthomas1](https://github.com/bjthomas1))
|
||||
|
||||
## [v1.7.21](https://github.com/containous/traefik/tree/v1.7.21) (2020-02-20)
|
||||
[All Commits](https://github.com/containous/traefik/compare/v1.7.20...v1.7.21)
|
||||
|
||||
**Bug fixes:**
|
||||
- **[acme]** Fix dnspod update. ([#6240](https://github.com/containous/traefik/pull/6240) by [iineva](https://github.com/iineva))
|
||||
- **[acme]** Fix finding proper provided certificate when ACME is enabled ([#5873](https://github.com/containous/traefik/pull/5873) by [yazd](https://github.com/yazd))
|
||||
- **[authentication,middleware]** don't create http client in each request in forward auth ([#6273](https://github.com/containous/traefik/pull/6273) by [juliens](https://github.com/juliens))
|
||||
- **[ecs]** fix: skip ECS container when information are missing instead of panic. ([#6071](https://github.com/containous/traefik/pull/6071) by [ldez](https://github.com/ldez))
|
||||
- **[k8s]** Add edge case for root path with rewrite-target ([#6005](https://github.com/containous/traefik/pull/6005) by [dtomcej](https://github.com/dtomcej))
|
||||
|
||||
**Documentation:**
|
||||
- **[k8s,k8s/ingress]** Update the k8s api version in the documentation ([#6162](https://github.com/containous/traefik/pull/6162) by [jbdoumenjou](https://github.com/jbdoumenjou))
|
||||
- **[middleware]** Improve rate-limiting doc ([#6277](https://github.com/containous/traefik/pull/6277) by [mpl](https://github.com/mpl))
|
||||
- **[provider]** Fix sample for ssl-header in docs ([#6337](https://github.com/containous/traefik/pull/6337) by [pierresteiner](https://github.com/pierresteiner))
|
||||
|
||||
## [v1.7.20](https://github.com/containous/traefik/tree/v1.7.20) (2019-12-09)
|
||||
[All Commits](https://github.com/containous/traefik/compare/v1.7.19...v1.7.20)
|
||||
|
||||
**Bug fixes:**
|
||||
- **[acme]** Truncate key for identification in log ([#5941](https://github.com/containous/traefik/pull/5941) by [dtomcej](https://github.com/dtomcej))
|
||||
- **[middleware]** fix: location header rewrite. ([#5857](https://github.com/containous/traefik/pull/5857) by [ldez](https://github.com/ldez))
|
||||
|
||||
**Documentation:**
|
||||
- Add a warning note regarding optional TLS mutual auth ([#5434](https://github.com/containous/traefik/pull/5434) by [bradjones1](https://github.com/bradjones1))
|
||||
|
||||
## [v1.7.19](https://github.com/containous/traefik/tree/v1.7.19) (2019-10-25)
|
||||
[All Commits](https://github.com/containous/traefik/compare/v1.7.18...v1.7.19)
|
||||
|
||||
**Bug fixes:**
|
||||
- **[k8s,k8s/ingress]** Add functions to support precise float compute of weight ([#5663](https://github.com/containous/traefik/pull/5663) by [rmrfself](https://github.com/rmrfself))
|
||||
- **[middleware]** Fix Location response header http to https when SSL ([#5574](https://github.com/containous/traefik/pull/5574) by [elielgoncalves](https://github.com/elielgoncalves))
|
||||
- **[tls]** Allow Default Certificate to work on macOS 10.15 ([#5662](https://github.com/containous/traefik/pull/5662) by [dtomcej](https://github.com/dtomcej))
|
||||
|
||||
**Documentation:**
|
||||
- **[k8s]** Update DaemonSet apiVersion ([#5682](https://github.com/containous/traefik/pull/5682) by [ialidzhikov](https://github.com/ialidzhikov))
|
||||
|
||||
## [v1.7.18](https://github.com/containous/traefik/tree/v1.7.18) (2019-09-23)
|
||||
[All Commits](https://github.com/containous/traefik/compare/v1.7.17...v1.7.18)
|
||||
|
||||
**Bug fixes:**
|
||||
- **[go,security]** This version is compiled with [Go 1.12.10](https://groups.google.com/d/msg/golang-announce/cszieYyuL9Q/g4Z7pKaqAgAJ), which fixes a vulnerability in previous versions. See the [CVE](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2019-16276) about it for more details.
|
||||
|
||||
## [v1.7.17](https://github.com/containous/traefik/tree/v1.7.17) (2019-09-23)
|
||||
[All Commits](https://github.com/containous/traefik/compare/v1.7.16...v1.7.17)
|
||||
|
||||
**Bug fixes:**
|
||||
- **[logs,middleware]** Avoid closing stdout when the accesslog handler is closed ([#5459](https://github.com/containous/traefik/pull/5459) by [nrwiersma](https://github.com/nrwiersma))
|
||||
- **[middleware]** Actually send header and code during WriteHeader, if needed ([#5404](https://github.com/containous/traefik/pull/5404) by [mpl](https://github.com/mpl))
|
||||
|
||||
**Documentation:**
|
||||
- **[k8s]** Add note clarifying client certificate header ([#5362](https://github.com/containous/traefik/pull/5362) by [bradjones1](https://github.com/bradjones1))
|
||||
- **[webui]** Update docs links. ([#5412](https://github.com/containous/traefik/pull/5412) by [ldez](https://github.com/ldez))
|
||||
- Update Traefik image version. ([#5399](https://github.com/containous/traefik/pull/5399) by [ldez](https://github.com/ldez))
|
||||
|
||||
## [v1.7.16](https://github.com/containous/traefik/tree/v1.7.16) (2019-09-13)
|
||||
[All Commits](https://github.com/containous/traefik/compare/v1.7.15...v1.7.16)
|
||||
|
||||
**Bug fixes:**
|
||||
- **[middleware,websocket]** implement Flusher and Hijacker for codeCatcher ([#5376](https://github.com/containous/traefik/pull/5376) by [mpl](https://github.com/mpl))
|
||||
|
||||
## [v1.7.15](https://github.com/containous/traefik/tree/v1.7.15) (2019-09-12)
|
||||
[All Commits](https://github.com/containous/traefik/compare/v1.7.14...v1.7.15)
|
||||
|
||||
**Bug fixes:**
|
||||
- **[authentication,k8s/ingress]** Kubernetes support for Auth.HeaderField ([#5235](https://github.com/containous/traefik/pull/5235) by [ErikWegner](https://github.com/ErikWegner))
|
||||
- **[k8s,k8s/ingress]** Finish kubernetes throttling refactoring ([#5269](https://github.com/containous/traefik/pull/5269) by [mpl](https://github.com/mpl))
|
||||
- **[k8s]** Throttle Kubernetes config refresh ([#4716](https://github.com/containous/traefik/pull/4716) by [benweissmann](https://github.com/benweissmann))
|
||||
- **[k8s]** Fix wrong handling of insecure tls auth forward ingress annotation ([#5319](https://github.com/containous/traefik/pull/5319) by [majkrzak](https://github.com/majkrzak))
|
||||
- **[middleware]** error pages: do not buffer response when it's not an error ([#5285](https://github.com/containous/traefik/pull/5285) by [mpl](https://github.com/mpl))
|
||||
- **[tls]** Consider default cert domain in certificate store ([#5353](https://github.com/containous/traefik/pull/5353) by [nrwiersma](https://github.com/nrwiersma))
|
||||
- **[tls]** Add TLS minversion constraint ([#5356](https://github.com/containous/traefik/pull/5356) by [dtomcej](https://github.com/dtomcej))
|
||||
|
||||
**Documentation:**
|
||||
- **[acme]** Update Acme doc - Vultr Wildcard & Root ([#5320](https://github.com/containous/traefik/pull/5320) by [ddymko](https://github.com/ddymko))
|
||||
- **[consulcatalog]** Typo in basic auth usersFile label consul-catalog ([#5230](https://github.com/containous/traefik/pull/5230) by [pitan](https://github.com/pitan))
|
||||
- **[logs]** Improve Access Logs Documentation page ([#5238](https://github.com/containous/traefik/pull/5238) by [dduportal](https://github.com/dduportal))
|
||||
|
||||
## [v1.7.14](https://github.com/containous/traefik/tree/v1.7.14) (2019-08-14)
|
||||
[All Commits](https://github.com/containous/traefik/compare/v1.7.13...v1.7.14)
|
||||
|
||||
**Bug fixes:**
|
||||
- Update to go1.12.8 ([#5201](https://github.com/containous/traefik/pull/5201) by [ldez](https://github.com/ldez)). HTTP/2 Denial of Service [CVE-2019-9512](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2019-9512) and [CVE-2019-9514](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2019-9514)
|
||||
- **[server]** Make hijackConnectionTracker.Close thread safe ([#5194](https://github.com/containous/traefik/pull/5194) by [jlevesy](https://github.com/jlevesy))
|
||||
|
||||
## [v1.7.13](https://github.com/containous/traefik/tree/v1.7.13) (2019-08-07)
|
||||
[All Commits](https://github.com/containous/traefik/compare/v1.7.12...v1.7.13)
|
||||
|
||||
|
||||
@@ -13,7 +13,7 @@ You need to run the `binary` target. This will create binaries for Linux platfor
|
||||
$ make binary
|
||||
docker build -t "traefik-dev:no-more-godep-ever" -f build.Dockerfile .
|
||||
Sending build context to Docker daemon 295.3 MB
|
||||
Step 0 : FROM golang:1.11-alpine
|
||||
Step 0 : FROM golang:1.14-alpine
|
||||
---> 8c6473912976
|
||||
Step 1 : RUN go get github.com/golang/dep/cmd/dep
|
||||
[...]
|
||||
@@ -158,7 +158,7 @@ Integration tests must be run from the `integration/` directory and require the
|
||||
|
||||
## Documentation
|
||||
|
||||
The [documentation site](https://docs.traefik.io/) is built with [mkdocs](https://mkdocs.org/)
|
||||
The [documentation site](https://docs.traefik.io/v1.7/) is built with [mkdocs](https://mkdocs.org/)
|
||||
|
||||
### Building Documentation
|
||||
|
||||
|
||||
25
Gopkg.lock
generated
25
Gopkg.lock
generated
@@ -477,12 +477,11 @@
|
||||
version = "v1.1.0"
|
||||
|
||||
[[projects]]
|
||||
digest = "1:fa62421bd924623ac10a160686cc55d529f7274b2caedf7d2c607d14bc50c118"
|
||||
digest = "1:295f7aed734ab28daff7c0df7db42bcccf7a043d4e0a5daf5768129346b25f23"
|
||||
name = "github.com/decker502/dnspod-go"
|
||||
packages = ["."]
|
||||
pruneopts = "NUT"
|
||||
revision = "71fbbdbdf1a7eeac949586de15bf96d416d3dd63"
|
||||
version = "v0.2.0"
|
||||
revision = "071171b22a9b65e4f544b61c143befd54a08a64e"
|
||||
|
||||
[[projects]]
|
||||
digest = "1:7a6852b35eb5bbc184561443762d225116ae630c26a7c4d90546619f1e7d2ad2"
|
||||
@@ -1770,6 +1769,13 @@
|
||||
pruneopts = "NUT"
|
||||
revision = "1d7be4effb13d2d908342d349d71a284a7542693"
|
||||
|
||||
[[projects]]
|
||||
digest = "1:9ca27be3cfd8a452f9814926a5842c6917289da8c21174ee0f9c79d84850b2ed"
|
||||
name = "github.com/shopspring/decimal"
|
||||
packages = ["."]
|
||||
pruneopts = "NUT"
|
||||
revision = "f1972eb1d1f519e2e5f4b51f2dea765e8c93a130"
|
||||
|
||||
[[projects]]
|
||||
digest = "1:01252cd79aac70f16cac02a72a1067dd136e0ad6d5b597d0129cf74c739fd8d1"
|
||||
name = "github.com/sirupsen/logrus"
|
||||
@@ -1908,12 +1914,12 @@
|
||||
revision = "50716a0a853771bb36bfce61a45cdefdb98c2e6e"
|
||||
|
||||
[[projects]]
|
||||
branch = "v1"
|
||||
digest = "1:60888cead16f066c948c078258b27f2885dce91cb6aadacf545b62a1ae1d08cb"
|
||||
digest = "1:649756d307b6d8ddb369d1cca0465b679aa7d1a956ddfa8eb18f8072a1a2b7a4"
|
||||
name = "github.com/unrolled/secure"
|
||||
packages = ["."]
|
||||
pruneopts = "NUT"
|
||||
revision = "a1cf62cc2159fff407728f118c41aece76c397fa"
|
||||
revision = "996bc0cd7e5be6e6a1c5f34b0259bc47c8bcfbc9"
|
||||
version = "v1.0.5"
|
||||
|
||||
[[projects]]
|
||||
digest = "1:e84e99d5f369afaa9a5c41f55b57fa03047ecd3bac2a65861607882693ceea81"
|
||||
@@ -1931,8 +1937,7 @@
|
||||
version = "v0.1.0"
|
||||
|
||||
[[projects]]
|
||||
branch = "master"
|
||||
digest = "1:86f14aadf288fe3ad8ac060bcb2b5083cec3829dd883803486ec834d031060c9"
|
||||
digest = "1:a31155114d2977ef48d2cca6810c4e055545d676bf13b604c2debd42de44cad0"
|
||||
name = "github.com/vulcand/oxy"
|
||||
packages = [
|
||||
"buffer",
|
||||
@@ -1945,7 +1950,8 @@
|
||||
"utils",
|
||||
]
|
||||
pruneopts = "NUT"
|
||||
revision = "0d102f45103cf49a95b5c6e810e092973cbcb68c"
|
||||
revision = "9dbd22c030f2187f590fea61ebe84ab78a8146a2"
|
||||
version = "v1.1.0"
|
||||
|
||||
[[projects]]
|
||||
digest = "1:ca6bac407fedc14fbeeba861dd33a821ba3a1624c10126ec6003b0a28d4139c5"
|
||||
@@ -2612,6 +2618,7 @@
|
||||
"github.com/rancher/go-rancher-metadata/metadata",
|
||||
"github.com/rancher/go-rancher/v2",
|
||||
"github.com/ryanuber/go-glob",
|
||||
"github.com/shopspring/decimal",
|
||||
"github.com/sirupsen/logrus",
|
||||
"github.com/stretchr/testify/assert",
|
||||
"github.com/stretchr/testify/mock",
|
||||
|
||||
16
Gopkg.toml
16
Gopkg.toml
@@ -167,15 +167,15 @@
|
||||
version = "1.1.0"
|
||||
|
||||
[[constraint]]
|
||||
branch = "v1"
|
||||
name = "github.com/unrolled/secure"
|
||||
version = "1.0.5"
|
||||
|
||||
[[constraint]]
|
||||
name = "github.com/vdemeester/shakers"
|
||||
version = "0.1.0"
|
||||
|
||||
[[constraint]]
|
||||
branch = "master"
|
||||
version = "v1.1.0"
|
||||
name = "github.com/vulcand/oxy"
|
||||
|
||||
[[constraint]]
|
||||
@@ -264,3 +264,15 @@
|
||||
[[constraint]]
|
||||
name = "github.com/google/uuid"
|
||||
version = "0.2.0"
|
||||
|
||||
[[constraint]]
|
||||
name = "github.com/shopspring/decimal"
|
||||
revision = "f1972eb1d1f519e2e5f4b51f2dea765e8c93a130"
|
||||
|
||||
[[override]]
|
||||
name = "contrib.go.opencensus.io/exporter/ocagent"
|
||||
version = "0.4.12"
|
||||
|
||||
[[override]]
|
||||
name = "github.com/decker502/dnspod-go"
|
||||
revision = "071171b22a9b65e4f544b61c143befd54a08a64e"
|
||||
|
||||
2
Makefile
2
Makefile
@@ -76,7 +76,7 @@ test-integration: build ## run the integration tests
|
||||
TEST_HOST=1 ./script/make.sh test-integration
|
||||
|
||||
validate: build ## validate code, vendor and autogen
|
||||
$(DOCKER_RUN_TRAEFIK) ./script/make.sh validate-gofmt validate-govet validate-golint validate-misspell validate-vendor validate-autogen
|
||||
$(DOCKER_RUN_TRAEFIK) ./script/make.sh validate-gofmt validate-golint validate-misspell validate-vendor validate-autogen
|
||||
|
||||
build: dist
|
||||
docker build $(DOCKER_BUILD_ARGS) -t "$(TRAEFIK_DEV_IMAGE)" -f build.Dockerfile .
|
||||
|
||||
30
README.md
30
README.md
@@ -4,7 +4,7 @@
|
||||
</p>
|
||||
|
||||
[](https://semaphoreci.com/containous/traefik)
|
||||
[](https://docs.traefik.io)
|
||||
[](https://docs.traefik.io/v1.7)
|
||||
[](http://goreportcard.com/report/containous/traefik)
|
||||
[](https://microbadger.com/images/traefik)
|
||||
[](https://github.com/containous/traefik/blob/master/LICENSE.md)
|
||||
@@ -70,22 +70,22 @@ _(But if you'd rather configure some of your routes manually, Traefik supports t
|
||||
|
||||
## Supported Backends
|
||||
|
||||
- [Docker](https://docs.traefik.io/configuration/backends/docker) / [Swarm mode](https://docs.traefik.io/configuration/backends/docker#docker-swarm-mode)
|
||||
- [Kubernetes](https://docs.traefik.io/configuration/backends/kubernetes)
|
||||
- [Mesos](https://docs.traefik.io/configuration/backends/mesos) / [Marathon](https://docs.traefik.io/configuration/backends/marathon)
|
||||
- [Rancher](https://docs.traefik.io/configuration/backends/rancher) (API, Metadata)
|
||||
- [Azure Service Fabric](https://docs.traefik.io/configuration/backends/servicefabric)
|
||||
- [Consul Catalog](https://docs.traefik.io/configuration/backends/consulcatalog)
|
||||
- [Consul](https://docs.traefik.io/configuration/backends/consul) / [Etcd](https://docs.traefik.io/configuration/backends/etcd) / [Zookeeper](https://docs.traefik.io/configuration/backends/zookeeper) / [BoltDB](https://docs.traefik.io/configuration/backends/boltdb)
|
||||
- [Eureka](https://docs.traefik.io/configuration/backends/eureka)
|
||||
- [Amazon ECS](https://docs.traefik.io/configuration/backends/ecs)
|
||||
- [Amazon DynamoDB](https://docs.traefik.io/configuration/backends/dynamodb)
|
||||
- [File](https://docs.traefik.io/configuration/backends/file)
|
||||
- [Rest](https://docs.traefik.io/configuration/backends/rest)
|
||||
- [Docker](https://docs.traefik.io/v1.7/configuration/backends/docker) / [Swarm mode](https://docs.traefik.io/v1.7/configuration/backends/docker#docker-swarm-mode)
|
||||
- [Kubernetes](https://docs.traefik.io/v1.7/configuration/backends/kubernetes)
|
||||
- [Mesos](https://docs.traefik.io/v1.7/configuration/backends/mesos) / [Marathon](https://docs.traefik.io/v1.7/configuration/backends/marathon)
|
||||
- [Rancher](https://docs.traefik.io/v1.7/configuration/backends/rancher) (API, Metadata)
|
||||
- [Azure Service Fabric](https://docs.traefik.io/v1.7/configuration/backends/servicefabric)
|
||||
- [Consul Catalog](https://docs.traefik.io/v1.7/configuration/backends/consulcatalog)
|
||||
- [Consul](https://docs.traefik.io/v1.7/configuration/backends/consul) / [Etcd](https://docs.traefik.io/v1.7/configuration/backends/etcd) / [Zookeeper](https://docs.traefik.io/v1.7/configuration/backends/zookeeper) / [BoltDB](https://docs.traefik.io/v1.7/configuration/backends/boltdb)
|
||||
- [Eureka](https://docs.traefik.io/v1.7/configuration/backends/eureka)
|
||||
- [Amazon ECS](https://docs.traefik.io/v1.7/configuration/backends/ecs)
|
||||
- [Amazon DynamoDB](https://docs.traefik.io/v1.7/configuration/backends/dynamodb)
|
||||
- [File](https://docs.traefik.io/v1.7/configuration/backends/file)
|
||||
- [Rest](https://docs.traefik.io/v1.7/configuration/backends/rest)
|
||||
|
||||
## Quickstart
|
||||
|
||||
To get your hands on Traefik, you can use the [5-Minute Quickstart](http://docs.traefik.io/#the-traefik-quickstart-using-docker) in our documentation (you will need Docker).
|
||||
To get your hands on Traefik, you can use the [5-Minute Quickstart](http://docs.traefik.io/v1.7/#the-traefik-quickstart-using-docker) in our documentation (you will need Docker).
|
||||
|
||||
Alternatively, if you don't want to install anything on your computer, you can try Traefik online in this great [Katacoda tutorial](https://www.katacoda.com/courses/traefik/deploy-load-balancer) that shows how to load balance requests between multiple Docker containers.
|
||||
|
||||
@@ -100,7 +100,7 @@ You can access the simple HTML frontend of Traefik.
|
||||
|
||||
## Documentation
|
||||
|
||||
You can find the complete documentation at [https://docs.traefik.io](https://docs.traefik.io).
|
||||
You can find the complete documentation at [https://docs.traefik.io/v1.7](https://docs.traefik.io/v1.7).
|
||||
A collection of contributions around Traefik can be found at [https://awesome.traefik.io](https://awesome.traefik.io).
|
||||
|
||||
## Support
|
||||
|
||||
@@ -111,7 +111,12 @@ func (a *Account) GetPrivateKey() crypto.PrivateKey {
|
||||
return privateKey
|
||||
}
|
||||
|
||||
log.Errorf("Cannot unmarshall private key %+v", a.PrivateKey)
|
||||
keySnippet := ""
|
||||
if len(a.PrivateKey) >= 16 {
|
||||
keySnippet = string(a.PrivateKey[:16])
|
||||
}
|
||||
|
||||
log.Errorf("Cannot unmarshall private key beginning with %s", keySnippet)
|
||||
return nil
|
||||
}
|
||||
|
||||
|
||||
@@ -159,6 +159,9 @@ var _templatesConsul_catalogTmpl = []byte(`[backends]
|
||||
{{if $loadBalancer.Stickiness }}
|
||||
[backends."backend-{{ $backendName }}".loadBalancer.stickiness]
|
||||
cookieName = "{{ $loadBalancer.Stickiness.CookieName }}"
|
||||
secure = {{ $loadBalancer.Stickiness.Secure }}
|
||||
httpOnly = {{ $loadBalancer.Stickiness.HTTPOnly }}
|
||||
sameSite = "{{ $loadBalancer.Stickiness.SameSite }}"
|
||||
{{end}}
|
||||
{{end}}
|
||||
|
||||
@@ -654,6 +657,9 @@ var _templatesDockerTmpl = []byte(`{{$backendServers := .Servers}}
|
||||
{{if $loadBalancer.Stickiness }}
|
||||
[backends."backend-{{ $backendName }}".loadBalancer.stickiness]
|
||||
cookieName = "{{ $loadBalancer.Stickiness.CookieName }}"
|
||||
secure = {{ $loadBalancer.Stickiness.Secure }}
|
||||
httpOnly = {{ $loadBalancer.Stickiness.HTTPOnly }}
|
||||
sameSite = "{{ $loadBalancer.Stickiness.SameSite }}"
|
||||
{{end}}
|
||||
{{end}}
|
||||
|
||||
@@ -1000,6 +1006,9 @@ var _templatesEcsTmpl = []byte(`[backends]
|
||||
{{if $loadBalancer.Stickiness }}
|
||||
[backends."backend-{{ $serviceName }}".loadBalancer.stickiness]
|
||||
cookieName = "{{ $loadBalancer.Stickiness.CookieName }}"
|
||||
secure = {{ $loadBalancer.Stickiness.Secure }}
|
||||
httpOnly = {{ $loadBalancer.Stickiness.HTTPOnly }}
|
||||
sameSite = "{{ $loadBalancer.Stickiness.SameSite }}"
|
||||
{{end}}
|
||||
{{end}}
|
||||
|
||||
@@ -1325,6 +1334,9 @@ var _templatesKubernetesTmpl = []byte(`[backends]
|
||||
{{if $backend.LoadBalancer.Stickiness }}
|
||||
[backends."{{ $backendName }}".loadBalancer.stickiness]
|
||||
cookieName = "{{ $backend.LoadBalancer.Stickiness.CookieName }}"
|
||||
secure = {{ $backend.LoadBalancer.Stickiness.Secure }}
|
||||
httpOnly = {{ $backend.LoadBalancer.Stickiness.HTTPOnly }}
|
||||
sameSite = "{{ $backend.LoadBalancer.Stickiness.SameSite }}"
|
||||
{{end}}
|
||||
|
||||
{{if $backend.MaxConn }}
|
||||
@@ -1365,7 +1377,9 @@ var _templatesKubernetesTmpl = []byte(`[backends]
|
||||
|
||||
{{if $frontend.Auth }}
|
||||
[frontends."{{ $frontendName }}".auth]
|
||||
headerField = "X-WebAuth-User"
|
||||
{{if $frontend.Auth.HeaderField }}
|
||||
headerField = "{{ $frontend.Auth.HeaderField }}"
|
||||
{{end}}
|
||||
|
||||
{{if $frontend.Auth.Basic }}
|
||||
[frontends."{{ $frontendName }}".auth.basic]
|
||||
@@ -1580,6 +1594,9 @@ var _templatesKvTmpl = []byte(`[backends]
|
||||
{{if $loadBalancer.Stickiness }}
|
||||
[backends."{{ $backendName }}".loadBalancer.stickiness]
|
||||
cookieName = "{{ $loadBalancer.Stickiness.CookieName }}"
|
||||
secure = {{ $loadBalancer.Stickiness.Secure }}
|
||||
httpOnly = {{ $loadBalancer.Stickiness.HTTPOnly }}
|
||||
sameSite = "{{ $loadBalancer.Stickiness.SameSite }}"
|
||||
{{end}}
|
||||
{{end}}
|
||||
|
||||
@@ -1968,6 +1985,9 @@ var _templatesMarathonTmpl = []byte(`{{ $apps := .Applications }}
|
||||
{{if $loadBalancer.Stickiness }}
|
||||
[backends."{{ $backendName }}".loadBalancer.stickiness]
|
||||
cookieName = "{{ $loadBalancer.Stickiness.CookieName }}"
|
||||
secure = {{ $loadBalancer.Stickiness.Secure }}
|
||||
httpOnly = {{ $loadBalancer.Stickiness.HTTPOnly }}
|
||||
sameSite = "{{ $loadBalancer.Stickiness.SameSite }}"
|
||||
{{end}}
|
||||
{{end}}
|
||||
|
||||
@@ -2300,6 +2320,9 @@ var _templatesMesosTmpl = []byte(`[backends]
|
||||
{{if $loadBalancer.Stickiness }}
|
||||
[backends."backend-{{ $backendName }}".loadBalancer.stickiness]
|
||||
cookieName = "{{ $loadBalancer.Stickiness.CookieName }}"
|
||||
secure = {{ $loadBalancer.Stickiness.Secure }}
|
||||
httpOnly = {{ $loadBalancer.Stickiness.HTTPOnly }}
|
||||
sameSite = "{{ $loadBalancer.Stickiness.SameSite }}"
|
||||
{{end}}
|
||||
{{end}}
|
||||
|
||||
@@ -2686,6 +2709,9 @@ var _templatesRancherTmpl = []byte(`{{ $backendServers := .Backends }}
|
||||
{{if $loadBalancer.Stickiness }}
|
||||
[backends."backend-{{ $backendName }}".loadBalancer.stickiness]
|
||||
cookieName = "{{ $loadBalancer.Stickiness.CookieName }}"
|
||||
secure = {{ $loadBalancer.Stickiness.Secure }}
|
||||
httpOnly = {{ $loadBalancer.Stickiness.HTTPOnly }}
|
||||
sameSite = "{{ $loadBalancer.Stickiness.SameSite }}"
|
||||
{{end}}
|
||||
{{end}}
|
||||
|
||||
|
||||
@@ -1,4 +1,4 @@
|
||||
FROM golang:1.11-alpine
|
||||
FROM golang:1.14-alpine
|
||||
|
||||
RUN apk --update upgrade \
|
||||
&& apk --no-cache --no-progress add git mercurial bash gcc musl-dev curl tar ca-certificates tzdata \
|
||||
@@ -6,7 +6,6 @@ RUN apk --update upgrade \
|
||||
&& rm -rf /var/cache/apk/*
|
||||
|
||||
RUN go get golang.org/x/lint/golint \
|
||||
&& go get github.com/kisielk/errcheck \
|
||||
&& go get github.com/client9/misspell/cmd/misspell
|
||||
|
||||
# Which docker version to test on
|
||||
|
||||
@@ -352,14 +352,14 @@ func stats(globalConfiguration *configuration.GlobalConfiguration) {
|
||||
Stats collection is enabled.
|
||||
Many thanks for contributing to Traefik's improvement by allowing us to receive anonymous information from your configuration.
|
||||
Help us improve Traefik by leaving this feature on :)
|
||||
More details on: https://docs.traefik.io/basics/#collected-data
|
||||
More details on: https://docs.traefik.io/v1.7/basics/#collected-data
|
||||
`)
|
||||
collect(globalConfiguration)
|
||||
} else {
|
||||
log.Info(`
|
||||
Stats collection is disabled.
|
||||
Help us improve Traefik by turning this feature on :)
|
||||
More details on: https://docs.traefik.io/basics/#collected-data
|
||||
More details on: https://docs.traefik.io/v1.7/basics/#collected-data
|
||||
`)
|
||||
}
|
||||
}
|
||||
|
||||
@@ -12,6 +12,7 @@ import (
|
||||
"github.com/containous/traefik/provider"
|
||||
acmeprovider "github.com/containous/traefik/provider/acme"
|
||||
"github.com/containous/traefik/provider/file"
|
||||
"github.com/containous/traefik/tls"
|
||||
"github.com/stretchr/testify/assert"
|
||||
)
|
||||
|
||||
@@ -269,3 +270,67 @@ func TestInitACMEProvider(t *testing.T) {
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestSetEffectiveConfigurationTLSMinVersion(t *testing.T) {
|
||||
testCases := []struct {
|
||||
desc string
|
||||
provided EntryPoint
|
||||
expected EntryPoint
|
||||
}{
|
||||
{
|
||||
desc: "Entrypoint with no TLS",
|
||||
provided: EntryPoint{
|
||||
Address: ":80",
|
||||
},
|
||||
expected: EntryPoint{
|
||||
Address: ":80",
|
||||
ForwardedHeaders: &ForwardedHeaders{Insecure: true},
|
||||
},
|
||||
},
|
||||
{
|
||||
desc: "Entrypoint with TLS Specifying MinVersion",
|
||||
provided: EntryPoint{
|
||||
Address: ":443",
|
||||
TLS: &tls.TLS{
|
||||
MinVersion: "VersionTLS12",
|
||||
},
|
||||
},
|
||||
expected: EntryPoint{
|
||||
Address: ":443",
|
||||
ForwardedHeaders: &ForwardedHeaders{Insecure: true},
|
||||
TLS: &tls.TLS{
|
||||
MinVersion: "VersionTLS12",
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
desc: "Entrypoint with TLS without Specifying MinVersion",
|
||||
provided: EntryPoint{
|
||||
Address: ":443",
|
||||
TLS: &tls.TLS{},
|
||||
},
|
||||
expected: EntryPoint{
|
||||
Address: ":443",
|
||||
ForwardedHeaders: &ForwardedHeaders{Insecure: true},
|
||||
TLS: &tls.TLS{},
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
for _, test := range testCases {
|
||||
test := test
|
||||
t.Run(test.desc, func(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
gc := &GlobalConfiguration{
|
||||
EntryPoints: map[string]*EntryPoint{
|
||||
"foo": &test.provided,
|
||||
},
|
||||
}
|
||||
|
||||
gc.SetEffectiveConfiguration(defaultConfigFile)
|
||||
|
||||
assert.Equal(t, &test.expected, gc.EntryPoints["foo"])
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
[Unit]
|
||||
Description=Traefik
|
||||
Documentation=https://docs.traefik.io
|
||||
Documentation=https://docs.traefik.io/v1.7
|
||||
#After=network-online.target
|
||||
#AssertFileIsExecutable=/usr/bin/traefik
|
||||
#AssertPathExists=/etc/traefik/traefik.toml
|
||||
|
||||
@@ -452,6 +452,27 @@ If not, a new backend will be assigned.
|
||||
# Default: a sha1 (6 chars)
|
||||
#
|
||||
# cookieName = "my_cookie"
|
||||
|
||||
# Customize secure option
|
||||
#
|
||||
# Optional
|
||||
# Default: false
|
||||
#
|
||||
# secure = true
|
||||
|
||||
# Customize http only option
|
||||
#
|
||||
# Optional
|
||||
# Default: false
|
||||
#
|
||||
# httpOnly = true
|
||||
|
||||
# Customize same site option.
|
||||
# Can be: "none", "lax", "strict"
|
||||
#
|
||||
# Optional
|
||||
#
|
||||
# sameSite = "none"
|
||||
```
|
||||
|
||||
The deprecated way:
|
||||
|
||||
@@ -313,7 +313,7 @@ For example, `CF_API_EMAIL_FILE=/run/secrets/traefik_cf-api-email` could be used
|
||||
| [Lightsail](https://aws.amazon.com/lightsail/) | `lightsail` | `AWS_ACCESS_KEY_ID`, `AWS_SECRET_ACCESS_KEY`, `DNS_ZONE` | Not tested yet |
|
||||
| [Linode](https://www.linode.com) | `linode` | `LINODE_API_KEY` | Not tested yet |
|
||||
| [Linode v4](https://www.linode.com) | `linodev4` | `LINODE_TOKEN` | Not tested yet |
|
||||
| manual | - | none, but you need to run Traefik interactively, turn on `acmeLogging` to see instructions and press <kbd>Enter</kbd>. | YES |
|
||||
| manual | `manual` | none, but you need to run Traefik interactively, turn on `acmeLogging` to see instructions and press <kbd>Enter</kbd>. | YES |
|
||||
| [MyDNS.jp](https://www.mydns.jp/) | `mydnsjp` | `MYDNSJP_MASTER_ID`, `MYDNSJP_PASSWORD` | YES |
|
||||
| [Namecheap](https://www.namecheap.com) | `namecheap` | `NAMECHEAP_API_USER`, `NAMECHEAP_API_KEY` | YES |
|
||||
| [Namesilo](https://www.namesilo.com/) | `namesilo` | `NAMESILO_API_KEY` | YES |
|
||||
@@ -336,7 +336,7 @@ For example, `CF_API_EMAIL_FILE=/run/secrets/traefik_cf-api-email` could be used
|
||||
| [VegaDNS](https://github.com/shupp/VegaDNS-API) | `vegadns` | `SECRET_VEGADNS_KEY`, `SECRET_VEGADNS_SECRET`, `VEGADNS_URL` | Not tested yet |
|
||||
| [Versio](https://www.versio.nl/domeinnamen) | `versio` | `VERSIO_USERNAME`, `VERSIO_PASSWORD` | YES |
|
||||
| [Vscale](https://vscale.io/) | `vscale` | `VSCALE_API_TOKEN` | YES |
|
||||
| [VULTR](https://www.vultr.com) | `vultr` | `VULTR_API_KEY` | Not tested yet |
|
||||
| [VULTR](https://www.vultr.com) | `vultr` | `VULTR_API_KEY` | YES |
|
||||
| [Zone.ee](https://www.zone.ee) | `zoneee` | `ZONEEE_API_USER`, `ZONEEE_API_KEY` | YES |
|
||||
|
||||
- (1): more information about the HTTP message format can be found [here](https://go-acme.github.io/lego/dns/httpreq/)
|
||||
|
||||
@@ -124,16 +124,19 @@ Additional settings can be defined using Consul Catalog tags.
|
||||
| `<prefix>.backend.loadbalancer.method=drr` | Overrides the default `wrr` load balancer algorithm. |
|
||||
| `<prefix>.backend.loadbalancer.stickiness=true` | Enables backend sticky sessions. |
|
||||
| `<prefix>.backend.loadbalancer.stickiness.cookieName=NAME` | Sets the cookie name manually for sticky sessions. |
|
||||
| `<prefix>.backend.loadbalancer.stickiness.secure=true` | Sets secure cookie option for sticky sessions. |
|
||||
| `<prefix>.backend.loadbalancer.stickiness.httpOnly=true` | Sets http only cookie option for sticky sessions. |
|
||||
| `<prefix>.backend.loadbalancer.stickiness.sameSite=none` | Sets same site cookie option for sticky sessions. (`none`, `lax`, `strict`) |
|
||||
| `<prefix>.backend.loadbalancer.sticky=true` | Enables backend sticky sessions. (DEPRECATED) |
|
||||
| `<prefix>.backend.maxconn.amount=10` | Sets a maximum number of connections to the backend.<br>Must be used in conjunction with the below label to take effect. |
|
||||
| `<prefix>.backend.maxconn.extractorfunc=client.ip` | Sets the function to be used against the request to determine what to limit maximum connections to the backend by.<br>Must be used in conjunction with the above label to take effect. |
|
||||
| `<prefix>.frontend.auth.basic=EXPR` | Sets basic authentication to this frontend in CSV format: `User:Hash,User:Hash` (DEPRECATED). |
|
||||
| `<prefix>.frontend.auth.basic.removeHeader=true` | If set to `true`, removes the `Authorization` header. |
|
||||
| `<prefix>.frontend.auth.basic.users=EXPR` | Sets basic authentication to this frontend in CSV format: `User:Hash,User:Hash`. |
|
||||
| `<prefix>.frontend.auth.basic.usersfile=/path/.htpasswd` | Sets basic authentication with an external file; if users and usersFile are provided, both are merged, with external file contents having precedence. |
|
||||
| `<prefix>.frontend.auth.basic.usersFile=/path/.htpasswd` | Sets basic authentication with an external file; if users and usersFile are provided, both are merged, with external file contents having precedence. |
|
||||
| `<prefix>.frontend.auth.digest.removeHeader=true` | If set to `true`, removes the `Authorization` header. |
|
||||
| `<prefix>.frontend.auth.digest.users=EXPR` | Sets digest authentication to this frontend in CSV format: `User:Realm:Hash,User:Realm:Hash`. |
|
||||
| `<prefix>.frontend.auth.digest.usersfile=/path/.htdigest` | Sets digest authentication with an external file; if users and usersFile are provided, both are merged, with external file contents having precedence. |
|
||||
| `<prefix>.frontend.auth.digest.usersFile=/path/.htdigest` | Sets digest authentication with an external file; if users and usersFile are provided, both are merged, with external file contents having precedence. |
|
||||
| `<prefix>.frontend.auth.forward.address=https://example.com` | Sets the URL of the authentication server. |
|
||||
| `<prefix>.frontend.auth.forward.authResponseHeaders=EXPR` | Sets the forward authentication authResponseHeaders in CSV format: `X-Auth-User,X-Auth-Header` |
|
||||
| `<prefix>.frontend.auth.forward.tls.ca=/path/ca.pem` | Sets the Certificate Authority (CA) for the TLS connection with the authentication server. |
|
||||
@@ -224,7 +227,7 @@ If you need to support multiple frontends for a service, for example when having
|
||||
| `<prefix>.frontend.headers.SSLTemporaryRedirect=true` | Forces the frontend to redirect to SSL if a non-SSL request is sent, but by sending a 302 instead of a 301. |
|
||||
| `<prefix>.frontend.headers.SSLHost=HOST` | This setting configures the hostname that redirects will be based on. Default is "", which is the same host as the request. |
|
||||
| `<prefix>.frontend.headers.SSLForceHost=true` | If `SSLForceHost` is `true` and `SSLHost` is set, requests will be forced to use `SSLHost` even the ones that are already using SSL. Default is false. |
|
||||
| `<prefix>.frontend.headers.SSLProxyHeaders=EXPR` | Header combinations that would signify a proper SSL Request (Such as `X-Forwarded-For:https`).<br>Format: <code>HEADER:value||HEADER2:value2</code> |
|
||||
| `<prefix>.frontend.headers.SSLProxyHeaders=EXPR` | Header combinations that would signify a proper SSL Request (Such as `X-Forwarded-Proto:https`).<br>Format: <code>HEADER:value||HEADER2:value2</code> |
|
||||
| `<prefix>.frontend.headers.STSSeconds=315360000` | Sets the max-age of the STS header. |
|
||||
| `<prefix>.frontend.headers.STSIncludeSubdomains=true` | Adds the `IncludeSubdomains` section of the STS header. |
|
||||
| `<prefix>.frontend.headers.STSPreload=true` | Adds the preload flag to the STS header. |
|
||||
|
||||
@@ -313,6 +313,9 @@ Labels can be used on containers to override default behavior.
|
||||
| `traefik.backend.loadbalancer.method=drr` | Overrides the default `wrr` load balancer algorithm |
|
||||
| `traefik.backend.loadbalancer.stickiness=true` | Enables backend sticky sessions |
|
||||
| `traefik.backend.loadbalancer.stickiness.cookieName=NAME` | Sets the cookie name manually for sticky sessions |
|
||||
| `traefik.backend.loadbalancer.stickiness.secure=true` | Sets secure cookie option for sticky sessions. |
|
||||
| `traefik.backend.loadbalancer.stickiness.httpOnly=true` | Sets http only cookie option for sticky sessions. |
|
||||
| `traefik.backend.loadbalancer.stickiness.sameSite=none` | Sets same site cookie option for sticky sessions. (`none`, `lax`, `strict`) |
|
||||
| `traefik.backend.loadbalancer.sticky=true` | Enables backend sticky sessions (DEPRECATED) |
|
||||
| `traefik.backend.loadbalancer.swarm=true` | Uses Swarm's inbuilt load balancer (only relevant under Swarm Mode) [3]. |
|
||||
| `traefik.backend.maxconn.amount=10` | Sets a maximum number of connections to the backend.<br>Must be used in conjunction with the below label to take effect. |
|
||||
@@ -412,7 +415,7 @@ It also means that Traefik will manipulate only one backend, not one backend per
|
||||
| `traefik.frontend.headers.SSLTemporaryRedirect=true` | Forces the frontend to redirect to SSL if a non-SSL request is sent, but by sending a 302 instead of a 301. |
|
||||
| `traefik.frontend.headers.SSLHost=HOST` | This setting configures the hostname that redirects will be based on. Default is "", which is the same host as the request. |
|
||||
| `traefik.frontend.headers.SSLForceHost=true` | If `SSLForceHost` is `true` and `SSLHost` is set, requests will be forced to use `SSLHost` even the ones that are already using SSL. Default is false. |
|
||||
| `traefik.frontend.headers.SSLProxyHeaders=EXPR` | Header combinations that would signify a proper SSL Request (Such as `X-Forwarded-For:https`).<br>Format: <code>HEADER:value||HEADER2:value2</code> |
|
||||
| `traefik.frontend.headers.SSLProxyHeaders=EXPR` | Header combinations that would signify a proper SSL Request (Such as `X-Forwarded-Proto:https`).<br>Format: <code>HEADER:value||HEADER2:value2</code> |
|
||||
| `traefik.frontend.headers.STSSeconds=315360000` | Sets the max-age of the STS header. |
|
||||
| `traefik.frontend.headers.STSIncludeSubdomains=true` | Adds the `IncludeSubdomains` section of the STS header. |
|
||||
| `traefik.frontend.headers.STSPreload=true` | Adds the preload flag to the STS header. |
|
||||
|
||||
@@ -160,6 +160,9 @@ Labels can be used on task containers to override default behaviour:
|
||||
| `traefik.backend.loadbalancer.method=drr` | Overrides the default `wrr` load balancer algorithm |
|
||||
| `traefik.backend.loadbalancer.stickiness=true` | Enables backend sticky sessions |
|
||||
| `traefik.backend.loadbalancer.stickiness.cookieName=NAME` | Sets the cookie manually name for sticky sessions |
|
||||
| `traefik.backend.loadbalancer.stickiness.secure=true` | Sets secure cookie option for sticky sessions. |
|
||||
| `traefik.backend.loadbalancer.stickiness.httpOnly=true` | Sets http only cookie option for sticky sessions. |
|
||||
| `traefik.backend.loadbalancer.stickiness.sameSite=none` | Sets same site cookie option for sticky sessions. (`none`, `lax`, `strict`) |
|
||||
| `traefik.backend.loadbalancer.sticky=true` | Enables backend sticky sessions (DEPRECATED) |
|
||||
| `traefik.backend.maxconn.amount=10` | Sets a maximum number of connections to the backend.<br>Must be used in conjunction with the below label to take effect. |
|
||||
| `traefik.backend.maxconn.extractorfunc=client.ip` | Sets the function to be used against the request to determine what to limit maximum connections to the backend by.<br>Must be used in conjunction with the above label to take effect. |
|
||||
|
||||
@@ -31,6 +31,9 @@ Traefik can be configured with a file.
|
||||
method = "drr"
|
||||
[backends.backend1.loadBalancer.stickiness]
|
||||
cookieName = "foobar"
|
||||
secure = true
|
||||
httpOnly = true
|
||||
sameSite = "foobar"
|
||||
|
||||
[backends.backend1.maxConn]
|
||||
amount = 10
|
||||
|
||||
@@ -73,6 +73,14 @@ See also [Kubernetes user guide](/user-guide/kubernetes).
|
||||
#
|
||||
# enablePassTLSCert = true
|
||||
|
||||
# Throttle how frequently we refresh our configuration from Ingresses when there
|
||||
# are frequent changes.
|
||||
#
|
||||
# Optional
|
||||
# Default: 0 (no throttling)
|
||||
#
|
||||
# throttleDuration = 10s
|
||||
|
||||
# Override default configuration template.
|
||||
#
|
||||
# Optional
|
||||
@@ -210,10 +218,14 @@ infos:
|
||||
serialnumber: true
|
||||
```
|
||||
|
||||
If `pem` is set, it will add a `X-Forwarded-Tls-Client-Cert` header that contains the escaped pem as value.
|
||||
If `pem` is set, it will add a `X-Forwarded-Tls-Client-Cert` header that contains the escaped pem as value.
|
||||
If at least one flag of the `infos` part is set, it will add a `X-Forwarded-Tls-Client-Cert-Infos` header that contains an escaped string composed of the client certificate data selected by the infos flags.
|
||||
This infos part is composed like the following example (not escaped):
|
||||
```Subject="C=FR,ST=SomeState,L=Lyon,O=Cheese,CN=*.cheese.org",NB=1531900816,NA=1563436816,SAN=*.cheese.org,*.cheese.net,cheese.in,test@cheese.org,test@cheese.net,10.0.1.0,10.0.1.2```
|
||||
```
|
||||
Subject="C=FR,ST=SomeState,L=Lyon,O=Cheese,CN=*.cheese.org",NB=1531900816,NA=1563436816,SAN=*.cheese.org,*.cheese.net,cheese.in,test@cheese.org,test@cheese.net,10.0.1.0,10.0.1.2
|
||||
```
|
||||
|
||||
Note these options work only with certificates issued by CAs included in the applicable [EntryPoint ClientCA section](/configuration/entrypoints/#tls-mutual-authentication); certificates from other CAs are not parsed or passed through as-is.
|
||||
|
||||
<4> `traefik.ingress.kubernetes.io/rate-limit` example:
|
||||
|
||||
@@ -231,7 +243,7 @@ rateset:
|
||||
```
|
||||
|
||||
<5> `traefik.ingress.kubernetes.io/rule-type`
|
||||
Note: `ReplacePath` is deprecated in this annotation, use the `traefik.ingress.kubernetes.io/request-modifier` annotation instead. Default: `PathPrefix`.
|
||||
Note: `ReplacePath` is deprecated in this annotation, use the `traefik.ingress.kubernetes.io/request-modifier` annotation instead. Default: `PathPrefix`.
|
||||
|
||||
<6> `traefik.ingress.kubernetes.io/service-weights`:
|
||||
Service weights enable to split traffic across multiple backing services in a fine-grained manner.
|
||||
@@ -329,7 +341,7 @@ The following security annotations are applicable on the Ingress object:
|
||||
| `ingress.kubernetes.io/ssl-temporary-redirect: "true"` | Forces the frontend to redirect to SSL if a non-SSL request is sent, but by sending a 302 instead of a 301. |
|
||||
| `ingress.kubernetes.io/ssl-host: HOST` | This setting configures the hostname that redirects will be based on. Default is "", which is the same host as the request. |
|
||||
| `ingress.kubernetes.io/ssl-force-host: "true"` | If `SSLForceHost` is `true` and `SSLHost` is set, requests will be forced to use `SSLHost` even the ones that are already using SSL. Default is false. |
|
||||
| `ingress.kubernetes.io/ssl-proxy-headers: EXPR` | Header combinations that would signify a proper SSL Request (Such as `X-Forwarded-For:https`). Format: <code>HEADER:value||HEADER2:value2</code> |
|
||||
| `ingress.kubernetes.io/ssl-proxy-headers: EXPR` | Header combinations that would signify a proper SSL Request (Such as `X-Forwarded-Proto:https`). Format: <code>HEADER:value||HEADER2:value2</code> |
|
||||
|
||||
### Authentication
|
||||
|
||||
|
||||
@@ -218,6 +218,9 @@ The following labels can be defined on Marathon applications. They adjust the be
|
||||
| `traefik.backend.loadbalancer.method=drr` | Overrides the default `wrr` load balancer algorithm |
|
||||
| `traefik.backend.loadbalancer.stickiness=true` | Enables backend sticky sessions |
|
||||
| `traefik.backend.loadbalancer.stickiness.cookieName=NAME` | Sets the cookie name manually for sticky sessions |
|
||||
| `traefik.backend.loadbalancer.stickiness.secure=true` | Sets secure cookie option for sticky sessions. |
|
||||
| `traefik.backend.loadbalancer.stickiness.httpOnly=true` | Sets http only cookie option for sticky sessions. |
|
||||
| `traefik.backend.loadbalancer.stickiness.sameSite=none` | Sets same site cookie option for sticky sessions. (`none`, `lax`, `strict`) |
|
||||
| `traefik.backend.loadbalancer.sticky=true` | Enables backend sticky sessions (DEPRECATED) |
|
||||
| `traefik.backend.maxconn.amount=10` | Sets a maximum number of connections to the backend.<br>Must be used in conjunction with the below label to take effect. |
|
||||
| `traefik.backend.maxconn.extractorfunc=client.ip` | Sets the function to be used against the request to determine what to limit maximum connections to the backend by.<br>Must be used in conjunction with the above label to take effect. |
|
||||
@@ -303,7 +306,7 @@ The following labels can be defined on Marathon applications. They adjust the be
|
||||
| `traefik.frontend.headers.SSLTemporaryRedirect=true` | Forces the frontend to redirect to SSL if a non-SSL request is sent, but by sending a 302 instead of a 301. |
|
||||
| `traefik.frontend.headers.SSLHost=HOST` | This setting configures the hostname that redirects will be based on. Default is "", which is the same host as the request. |
|
||||
| `traefik.frontend.headers.SSLForceHost=true` | If `SSLForceHost` is `true` and `SSLHost` is set, requests will be forced to use `SSLHost` even the ones that are already using SSL. Default is false. |
|
||||
| `traefik.frontend.headers.SSLProxyHeaders=EXPR` | Header combinations that would signify a proper SSL Request (Such as `X-Forwarded-For:https`).<br>Format: <code>HEADER:value||HEADER2:value2</code> |
|
||||
| `traefik.frontend.headers.SSLProxyHeaders=EXPR` | Header combinations that would signify a proper SSL Request (Such as `X-Forwarded-Proto:https`).<br>Format: <code>HEADER:value||HEADER2:value2</code> |
|
||||
| `traefik.frontend.headers.STSSeconds=315360000` | Sets the max-age of the STS header. |
|
||||
| `traefik.frontend.headers.STSIncludeSubdomains=true` | Adds the `IncludeSubdomains` section of the STS header. |
|
||||
| `traefik.frontend.headers.STSPreload=true` | Adds the preload flag to the STS header. |
|
||||
|
||||
@@ -132,6 +132,9 @@ The following labels can be defined on Mesos tasks. They adjust the behavior for
|
||||
| `traefik.backend.loadbalancer.method=drr` | Overrides the default `wrr` load balancer algorithm |
|
||||
| `traefik.backend.loadbalancer.stickiness=true` | Enables backend sticky sessions |
|
||||
| `traefik.backend.loadbalancer.stickiness.cookieName=NAME` | Sets the cookie manually name for sticky sessions |
|
||||
| `traefik.backend.loadbalancer.stickiness.secure=true` | Sets secure cookie option for sticky sessions. |
|
||||
| `traefik.backend.loadbalancer.stickiness.httpOnly=true` | Sets http only cookie option for sticky sessions. |
|
||||
| `traefik.backend.loadbalancer.stickiness.sameSite=none` | Sets same site cookie option for sticky sessions. (`none`, `lax`, `strict`) |
|
||||
| `traefik.backend.maxconn.amount=10` | Sets a maximum number of connections to the backend.<br>Must be used in conjunction with the below label to take effect. |
|
||||
| `traefik.backend.maxconn.extractorfunc=client.ip` | Sets the function to be used against the request to determine what to limit maximum connections to the backend by.<br>Must be used in conjunction with the above label to take effect. |
|
||||
| `traefik.frontend.auth.basic=EXPR` | Sets basic authentication to this frontend in CSV format: `User:Hash,User:Hash` (DEPRECATED). |
|
||||
@@ -215,7 +218,7 @@ The following labels can be defined on Mesos tasks. They adjust the behavior for
|
||||
| `traefik.frontend.headers.SSLTemporaryRedirect=true` | Forces the frontend to redirect to SSL if a non-SSL request is sent, but by sending a 302 instead of a 301. |
|
||||
| `traefik.frontend.headers.SSLHost=HOST` | This setting configures the hostname that redirects will be based on. Default is "", which is the same host as the request. |
|
||||
| `traefik.frontend.headers.SSLForceHost=true` | If `SSLForceHost` is `true` and `SSLHost` is set, requests will be forced to use `SSLHost` even the ones that are already using SSL. Default is false. |
|
||||
| `traefik.frontend.headers.SSLProxyHeaders=EXPR` | Header combinations that would signify a proper SSL Request (Such as `X-Forwarded-For:https`).<br>Format: <code>HEADER:value||HEADER2:value2</code> |
|
||||
| `traefik.frontend.headers.SSLProxyHeaders=EXPR` | Header combinations that would signify a proper SSL Request (Such as `X-Forwarded-Proto:https`).<br>Format: <code>HEADER:value||HEADER2:value2</code> |
|
||||
| `traefik.frontend.headers.STSSeconds=315360000` | Sets the max-age of the STS header. |
|
||||
| `traefik.frontend.headers.STSIncludeSubdomains=true` | Adds the `IncludeSubdomains` section of the STS header. |
|
||||
| `traefik.frontend.headers.STSPreload=true` | Adds the preload flag to the STS header. |
|
||||
|
||||
@@ -167,6 +167,9 @@ Labels can be used on task containers to override default behavior:
|
||||
| `traefik.backend.loadbalancer.method=drr` | Overrides the default `wrr` load balancer algorithm |
|
||||
| `traefik.backend.loadbalancer.stickiness=true` | Enables backend sticky sessions |
|
||||
| `traefik.backend.loadbalancer.stickiness.cookieName=NAME` | Sets the cookie name manually for sticky sessions |
|
||||
| `traefik.backend.loadbalancer.stickiness.secure=true` | Sets secure cookie option for sticky sessions. |
|
||||
| `traefik.backend.loadbalancer.stickiness.httpOnly=true` | Sets http only cookie option for sticky sessions. |
|
||||
| `traefik.backend.loadbalancer.stickiness.sameSite=none` | Sets same site cookie option for sticky sessions. (`none`, `lax`, `strict`) |
|
||||
| `traefik.backend.loadbalancer.sticky=true` | Enables backend sticky sessions (DEPRECATED) |
|
||||
| `traefik.backend.maxconn.amount=10` | Sets a maximum number of connections to the backend.<br>Must be used in conjunction with the below label to take effect. |
|
||||
| `traefik.backend.maxconn.extractorfunc=client.ip` | Sets the function to be used against the request to determine what to limit maximum connections to the backend by.<br>Must be used in conjunction with the above label to take effect. |
|
||||
@@ -250,7 +253,7 @@ Labels can be used on task containers to override default behavior:
|
||||
| `traefik.frontend.headers.SSLTemporaryRedirect=true` | Forces the frontend to redirect to SSL if a non-SSL request is sent, but by sending a 302 instead of a 301. |
|
||||
| `traefik.frontend.headers.SSLHost=HOST` | This setting configures the hostname that redirects will be based on. Default is "", which is the same host as the request. |
|
||||
| `traefik.frontend.headers.SSLForceHost=true` | If `SSLForceHost` is `true` and `SSLHost` is set, requests will be forced to use `SSLHost` even the ones that are already using SSL. Default is false. |
|
||||
| `traefik.frontend.headers.SSLProxyHeaders=EXPR` | Header combinations that would signify a proper SSL Request (Such as `X-Forwarded-For:https`).<br>Format: <code>HEADER:value||HEADER2:value2</code> |
|
||||
| `traefik.frontend.headers.SSLProxyHeaders=EXPR` | Header combinations that would signify a proper SSL Request (Such as `X-Forwarded-Proto:https`).<br>Format: <code>HEADER:value||HEADER2:value2</code> |
|
||||
| `traefik.frontend.headers.STSSeconds=315360000` | Sets the max-age of the STS header. |
|
||||
| `traefik.frontend.headers.STSIncludeSubdomains=true` | Adds the `IncludeSubdomains` section of the STS header. |
|
||||
| `traefik.frontend.headers.STSPreload=true` | Adds the preload flag to the STS header. |
|
||||
|
||||
@@ -145,7 +145,7 @@ Labels, set through extensions or the property manager, can be used on services
|
||||
| `traefik.frontend.headers.SSLRedirect=true` | Forces the frontend to redirect to SSL if a non-SSL request is sent. |
|
||||
| `traefik.frontend.headers.SSLTemporaryRedirect=true` | Forces the frontend to redirect to SSL if a non-SSL request is sent, but by sending a 302 instead of a 301. |
|
||||
| `traefik.frontend.headers.SSLHost=HOST` | This setting configures the hostname that redirects will be based on. Default is "", which is the same host as the request. |
|
||||
| `traefik.frontend.headers.SSLProxyHeaders=EXPR` | Header combinations that would signify a proper SSL Request (Such as `X-Forwarded-For:https`).<br>Format: <code>HEADER:value||HEADER2:value2</code> |
|
||||
| `traefik.frontend.headers.SSLProxyHeaders=EXPR` | Header combinations that would signify a proper SSL Request (Such as `X-Forwarded-Proto:https`).<br>Format: <code>HEADER:value||HEADER2:value2</code> |
|
||||
| `traefik.frontend.headers.STSSeconds=315360000` | Sets the max-age of the STS header. |
|
||||
| `traefik.frontend.headers.STSIncludeSubdomains=true` | Adds the `IncludeSubdomains` section of the STS header. |
|
||||
| `traefik.frontend.headers.STSPreload=true` | Adds the preload flag to the STS header. |
|
||||
|
||||
@@ -249,8 +249,14 @@ Multiple sets of rates can be added to each frontend, but the time periods must
|
||||
```
|
||||
|
||||
In the above example, frontend1 is configured to limit requests by the client's ip address.
|
||||
An average of 100 requests every 10 seconds is allowed and an average of 5 requests every 3 seconds.
|
||||
These can "burst" up to 200 and 10 in each period respectively.
|
||||
A sustained rate of 100 requests every 10 seconds (10 req/s) is allowed for rateset1, and 5 requests every 3 seconds (~1.67 req/s) for rateset2.
|
||||
In addition, these can "burst" up to 200 and 10 in each period respectively.
|
||||
|
||||
Another way to describe the above parameters, is to use the [leaky bucket](https://en.wikipedia.org/wiki/Leaky_bucket) analogy:
|
||||
for rateset1, the size of the bucket is 200 drops, and it is leaking at a rate of 10 drop/s.
|
||||
If the incoming rate of drops falling into the bucket gets high enough that the bucket gets filled,
|
||||
any subsequent drop overflows out of the bucket (i.e. the request is discarded).
|
||||
This situation holds until the incoming rate gets low enough again, and remains that way, for the water level in the bucket to go down.
|
||||
|
||||
Valid values for `extractorfunc` are:
|
||||
* `client.ip`
|
||||
|
||||
@@ -97,7 +97,7 @@ In compose file the entrypoint syntax is different. Notice how quotes are used:
|
||||
|
||||
```yaml
|
||||
traefik:
|
||||
image: traefik
|
||||
image: traefik:v1.7
|
||||
command:
|
||||
- --defaultentrypoints=powpow
|
||||
- "--entryPoints=Name:powpow Address::42 Compress:true"
|
||||
@@ -105,7 +105,7 @@ traefik:
|
||||
or
|
||||
```yaml
|
||||
traefik:
|
||||
image: traefik
|
||||
image: traefik:v1.7
|
||||
command: --defaultentrypoints=powpow --entryPoints='Name:powpow Address::42 Compress:true'
|
||||
```
|
||||
|
||||
@@ -239,11 +239,14 @@ TLS Mutual Authentication can be `optional` or not.
|
||||
* If `optional = true`, if a certificate is provided, verifies if it is signed by a specified Certificate Authority (CA). Otherwise proceeds without any certificate.
|
||||
* If `optional = false`, Traefik will only accept clients that present a certificate signed by a specified Certificate Authority (CA).
|
||||
|
||||
!!! warning
|
||||
While the TLS [1.1](https://tools.ietf.org/html/rfc4346#section-7.4.6) and [1.2](https://tools.ietf.org/html/rfc5246#section-7.4.6) RFCs specify that clients should proceed with handshaking by sending an empty list should they have no certs for the CAs specified by the server, not all do so in practice.
|
||||
Use this feature with caution should you require maximum compatibility with a wide variety of client user agents which may not strictly implement these specs.
|
||||
|
||||
`ClientCAFiles` can be configured with multiple `CA:s` in the same file or use multiple files containing one or several `CA:s`.
|
||||
The `CA:s` has to be in PEM format.
|
||||
|
||||
By default, `ClientCAFiles` is not optional, all clients will be required to present a valid cert.
|
||||
The requirement will apply to all server certs in the entrypoint.
|
||||
By default, `ClientCAFiles` is not optional, all clients will be required to present a valid cert. The requirement will apply to all server certs in the entrypoint.
|
||||
|
||||
In the example below both `snitest.com` and `snitest.org` will require client certs
|
||||
|
||||
@@ -482,6 +485,13 @@ To enable IP white listing at the entry point level.
|
||||
# useXForwardedFor = true
|
||||
```
|
||||
|
||||
By setting the `useXForwardedFor` option, the `sourceRange` addresses will be matched against the request header `X-Forwarded-For` address list, from left to right.
|
||||
|
||||
!!! danger
|
||||
When using Traefik behind another load-balancer, its own internal address will be appended in the `X-Forwarded-For` header.
|
||||
Be sure to carefully configure the `sourceRange` as adding the internal network CIDR,
|
||||
or the load-balancer address directly, will cause all requests coming from it to pass through.
|
||||
|
||||
## ProxyProtocol
|
||||
|
||||
To enable [ProxyProtocol](https://www.haproxy.org/download/1.8/doc/proxy-protocol.txt) support.
|
||||
|
||||
@@ -105,10 +105,10 @@ logLevel = "ERROR"
|
||||
|
||||
## Access Logs
|
||||
|
||||
Access logs are written when `[accessLog]` is defined.
|
||||
By default it will write to stdout and produce logs in the textual [Common Log Format (CLF)](#clf-common-log-format), extended with additional fields.
|
||||
Access logs are written when the entry `[accessLog]` is defined (or the command line flag `--accesslog`).
|
||||
By default it writes to stdout and produces logs in the textual [Common Log Format (CLF)](#clf-common-log-format), extended with additional fields.
|
||||
|
||||
To enable access logs using the default settings just add the `[accessLog]` entry:
|
||||
To enable access logs using the default settings, add the `[accessLog]` entry in your `traefik.toml` configuration file:
|
||||
|
||||
```toml
|
||||
[accessLog]
|
||||
@@ -175,21 +175,41 @@ format = "json" # Default: "common"
|
||||
minDuration = "10ms"
|
||||
```
|
||||
|
||||
To customize logs format, you must switch to the JSON format:
|
||||
### CLF - Common Log Format
|
||||
|
||||
By default, Traefik use the CLF (`common`) as access log format.
|
||||
|
||||
```html
|
||||
<remote_IP_address> - <client_user_name_if_available> [<timestamp>] "<request_method> <request_path> <request_protocol>" <origin_server_HTTP_status> <origin_server_content_size> "<request_referrer>" "<request_user_agent>" <number_of_requests_received_since_Traefik_started> "<Traefik_frontend_name>" "<Traefik_backend_URL>" <request_duration_in_ms>ms
|
||||
```
|
||||
|
||||
### Customize Fields
|
||||
|
||||
You can customize the fields written in the access logs.
|
||||
The list of available fields is found below: [List of All Available Fields](#list-of-all-available-fields).
|
||||
|
||||
Each field has a "mode" which defines if it is written or not in the access log lines.
|
||||
The possible values for the mode are:
|
||||
|
||||
* `keep`: the field and its value are written on the access log line. This is the default behavior.
|
||||
* `drop`: the field is not written at all on the access log.
|
||||
|
||||
To customize the fields, you must:
|
||||
|
||||
* Switch to the JSON format (mandatory)
|
||||
* Define the "default mode" for all fields (default is `keep`)
|
||||
* OR Define the fields which does not follow the default mode
|
||||
|
||||
```toml
|
||||
[accessLog]
|
||||
filePath = "/path/to/access.log"
|
||||
format = "json" # Default: "common"
|
||||
|
||||
[accessLog.filters]
|
||||
|
||||
# statusCodes keep only access logs with status codes in the specified range
|
||||
#
|
||||
# Optional
|
||||
# Default: []
|
||||
#
|
||||
statusCodes = ["200", "300-302"]
|
||||
# Access Log Format
|
||||
#
|
||||
# Optional
|
||||
# Default: "common"
|
||||
#
|
||||
# Accepted values "common", "json"
|
||||
#
|
||||
format = "json"
|
||||
|
||||
[accessLog.fields]
|
||||
|
||||
@@ -206,6 +226,43 @@ format = "json" # Default: "common"
|
||||
[accessLog.fields.names]
|
||||
"ClientUsername" = "drop"
|
||||
# ...
|
||||
```
|
||||
|
||||
### Customize Headers
|
||||
|
||||
Access logs prints the headers of each request, as fields of the access log line.
|
||||
You can customize which and how the headers are printed, likewise the other fields (see ["Customize Fields" section](#customize-fields)).
|
||||
|
||||
Each header has a "mode" which defines how it is written in the access log lines.
|
||||
The possible values for the mode are:
|
||||
|
||||
* `keep`: the header and its value are written on the access log line. This is the default behavior.
|
||||
* `drop`: the header is not written at all on the access log.
|
||||
* `redacted`: the header is written, but its value is redacted to avoid leaking sensitive information.
|
||||
|
||||
To customize the headers, you must:
|
||||
|
||||
* Switch to the JSON format (mandatory)
|
||||
* Define the "default mode" for all headers (default is `keep`)
|
||||
* OR Define the headers which does not follow the default mode
|
||||
|
||||
!!! important
|
||||
The headers are written with the prefix `request_` in the access log.
|
||||
This prefix must not be included when specifying a header in the TOML configuration.
|
||||
|
||||
* Do: `"User-Agent" = "drop"`
|
||||
* Don't: `"redacted_User-Agent" = "drop"`
|
||||
|
||||
```toml
|
||||
[accessLog]
|
||||
# Access Log Format
|
||||
#
|
||||
# Optional
|
||||
# Default: "common"
|
||||
#
|
||||
# Accepted values "common", "json"
|
||||
#
|
||||
format = "json"
|
||||
|
||||
[accessLog.fields.headers]
|
||||
# defaultMode
|
||||
@@ -224,7 +281,7 @@ format = "json" # Default: "common"
|
||||
# ...
|
||||
```
|
||||
|
||||
### List of all available fields
|
||||
### List of All Available Fields
|
||||
|
||||
| Field | Description |
|
||||
|-------------------------|---------------------------------------------------------------------------------------------------------------------------------------------------------------------|
|
||||
@@ -259,6 +316,8 @@ format = "json" # Default: "common"
|
||||
| `Overhead` | The processing time overhead caused by Traefik. |
|
||||
| `RetryAttempts` | The amount of attempts the request was retried. |
|
||||
|
||||
### Depreciation Notice
|
||||
|
||||
Deprecated way (before 1.4):
|
||||
|
||||
!!! danger "DEPRECATED"
|
||||
@@ -272,14 +331,6 @@ Deprecated way (before 1.4):
|
||||
accessLogsFile = "log/access.log"
|
||||
```
|
||||
|
||||
### CLF - Common Log Format
|
||||
|
||||
By default, Traefik use the CLF (`common`) as access log format.
|
||||
|
||||
```html
|
||||
<remote_IP_address> - <client_user_name_if_available> [<timestamp>] "<request_method> <request_path> <request_protocol>" <origin_server_HTTP_status> <origin_server_content_size> "<request_referrer>" "<request_user_agent>" <number_of_requests_received_since_Traefik_started> "<Traefik_frontend_name>" "<Traefik_backend_URL>" <request_duration_in_ms>ms
|
||||
```
|
||||
|
||||
## Log Rotation
|
||||
|
||||
Traefik will close and reopen its log files, assuming they're configured, on receipt of a USR1 signal.
|
||||
|
||||
@@ -77,7 +77,7 @@ version: '3'
|
||||
|
||||
services:
|
||||
reverse-proxy:
|
||||
image: traefik # The official Traefik docker image
|
||||
image: traefik:v1.7 # The official Traefik docker image
|
||||
command: --api --docker # Enables the web UI and tells Traefik to listen to docker
|
||||
ports:
|
||||
- "80:80" # The HTTP port
|
||||
|
||||
7
docs/theme/partials/integrations/analytics.html
vendored
Normal file
7
docs/theme/partials/integrations/analytics.html
vendored
Normal file
@@ -0,0 +1,7 @@
|
||||
<!-- Google Tag Manager -->
|
||||
<script>(function(w,d,s,l,i){w[l]=w[l]||[];w[l].push({'gtm.start':
|
||||
new Date().getTime(),event:'gtm.js'});var f=d.getElementsByTagName(s)[0],
|
||||
j=d.createElement(s),dl=l!='dataLayer'?'&l='+l:'';j.async=true;j.src=
|
||||
'https://www.googletagmanager.com/gtm.js?id='+i+dl;f.parentNode.insertBefore(j,f);
|
||||
})(window,document,'script','dataLayer','GTM-NMWC63S');</script>
|
||||
<!-- End Google Tag Manager -->
|
||||
@@ -91,7 +91,7 @@ To watch docker events, add `--docker.watch`.
|
||||
version: "3"
|
||||
services:
|
||||
traefik:
|
||||
image: traefik:<stable version from https://hub.docker.com/_/traefik>
|
||||
image: traefik:<stable v1.7 from https://hub.docker.com/_/traefik>
|
||||
command:
|
||||
- "--api"
|
||||
- "--entrypoints=Name:http Address::80 Redirect.EntryPoint:https"
|
||||
@@ -156,7 +156,7 @@ The initializer in a docker-compose file will be:
|
||||
|
||||
```yaml
|
||||
traefik_init:
|
||||
image: traefik:<stable version from https://hub.docker.com/_/traefik>
|
||||
image: traefik:<stable v1.7 from https://hub.docker.com/_/traefik>
|
||||
command:
|
||||
- "storeconfig"
|
||||
- "--api"
|
||||
@@ -177,7 +177,7 @@ And now, the Traefik part will only have the Consul configuration.
|
||||
|
||||
```yaml
|
||||
traefik:
|
||||
image: traefik:<stable version from https://hub.docker.com/_/traefik>
|
||||
image: traefik:<stable v1.7 from https://hub.docker.com/_/traefik>
|
||||
depends_on:
|
||||
- traefik_init
|
||||
- consul
|
||||
@@ -200,7 +200,7 @@ The new configuration will be stored in Consul, and you need to restart the Trae
|
||||
version: "3.4"
|
||||
services:
|
||||
traefik_init:
|
||||
image: traefik:<stable version from https://hub.docker.com/_/traefik>
|
||||
image: traefik:<stable v1.7 from https://hub.docker.com/_/traefik>
|
||||
command:
|
||||
- "storeconfig"
|
||||
- "--api"
|
||||
@@ -229,7 +229,7 @@ services:
|
||||
depends_on:
|
||||
- consul
|
||||
traefik:
|
||||
image: traefik:<stable version from https://hub.docker.com/_/traefik>
|
||||
image: traefik:<stable v1.7 from https://hub.docker.com/_/traefik>
|
||||
depends_on:
|
||||
- traefik_init
|
||||
- consul
|
||||
|
||||
@@ -50,7 +50,7 @@ version: '2'
|
||||
|
||||
services:
|
||||
traefik:
|
||||
image: traefik:<stable version from https://hub.docker.com/_/traefik>
|
||||
image: traefik:<stable v1.7 from https://hub.docker.com/_/traefik>
|
||||
restart: always
|
||||
ports:
|
||||
- 80:80
|
||||
@@ -235,13 +235,11 @@ Let's take a look at the labels themselves for the `app` service, which is a HTT
|
||||
- "traefik.admin.port=9443"
|
||||
```
|
||||
|
||||
We use both `container labels` and `service labels`.
|
||||
We use both `container labels` and `segment labels`.
|
||||
|
||||
#### Container labels
|
||||
|
||||
First, we specify the `backend` name which corresponds to the actual service we're routing **to**.
|
||||
|
||||
We also tell Traefik to use the `web` network to route HTTP traffic to this container.
|
||||
We tell Traefik to use the `web` network to route HTTP traffic to this container.
|
||||
With the `traefik.enable` label, we tell Traefik to include this container in its internal configuration.
|
||||
|
||||
With the `frontend.rule` label, we tell Traefik that we want to route to this container if the incoming HTTP request contains the `Host` `app.my-awesome-app.org`.
|
||||
@@ -249,20 +247,20 @@ Essentially, this is the actual rule used for Layer-7 load balancing.
|
||||
|
||||
Finally but not unimportantly, we tell Traefik to route **to** port `9000`, since that is the actual TCP/IP port the container actually listens on.
|
||||
|
||||
### Service labels
|
||||
#### Segment labels
|
||||
|
||||
`Service labels` allow managing many routes for the same container.
|
||||
`Segment labels` allow managing many routes for the same container.
|
||||
|
||||
When both `container labels` and `service labels` are defined, `container labels` are just used as default values for missing `service labels` but no frontend/backend are going to be defined only with these labels.
|
||||
Obviously, labels `traefik.frontend.rule` and `traefik.port` described above, will only be used to complete information set in `service labels` during the container frontends/backends creation.
|
||||
When both `container labels` and `segment labels` are defined, `container labels` are just used as default values for missing `segment labels` but no frontend/backend are going to be defined only with these labels.
|
||||
Obviously, labels `traefik.frontend.rule` and `traefik.port` described above, will only be used to complete information set in `segment labels` during the container frontends/backends creation.
|
||||
|
||||
In the example, two service names are defined : `basic` and `admin`.
|
||||
In the example, two segment names are defined : `basic` and `admin`.
|
||||
They allow creating two frontends and two backends.
|
||||
|
||||
- `basic` has only one `service label` : `traefik.basic.protocol`.
|
||||
- `basic` has only one `segment label` : `traefik.basic.protocol`.
|
||||
Traefik will use values set in `traefik.frontend.rule` and `traefik.port` to create the `basic` frontend and backend.
|
||||
The frontend listens to incoming HTTP requests which contain the `Host` `app.my-awesome-app.org` and redirect them in `HTTP` to the port `9000` of the backend.
|
||||
- `admin` has all the `services labels` needed to create the `admin` frontend and backend (`traefik.admin.frontend.rule`, `traefik.admin.protocol`, `traefik.admin.port`).
|
||||
- `admin` has all the `segment labels` needed to create the `admin` frontend and backend (`traefik.admin.frontend.rule`, `traefik.admin.protocol`, `traefik.admin.port`).
|
||||
Traefik will create a frontend to listen to incoming HTTP requests which contain the `Host` `admin-app.my-awesome-app.org` and redirect them in `HTTPS` to the port `9443` of the backend.
|
||||
|
||||
#### Gotchas and tips
|
||||
|
||||
@@ -10,10 +10,14 @@ The config files used in this guide can be found in the [examples directory](htt
|
||||
|
||||
1. A working Kubernetes cluster. If you want to follow along with this guide, you should setup [minikube](https://kubernetes.io/docs/getting-started-guides/minikube/) on your machine, as it is the quickest way to get a local Kubernetes cluster setup for experimentation and development.
|
||||
|
||||
2. Setup ingress as an add-on. It can be enabled by the following command:
|
||||
|
||||
`minikube addons enable ingress`
|
||||
|
||||
!!! note
|
||||
The guide is likely not fully adequate for a production-ready setup.
|
||||
|
||||
2. The `kubectl` binary should be [installed on your workstation](https://kubernetes.io/docs/getting-started-guides/minikube/#download-kubectl).
|
||||
3. The `kubectl` binary should be [installed on your workstation](https://kubernetes.io/docs/getting-started-guides/minikube/#download-kubectl).
|
||||
|
||||
### Role Based Access Control configuration (Kubernetes 1.6+ only)
|
||||
|
||||
@@ -53,6 +57,12 @@ rules:
|
||||
- get
|
||||
- list
|
||||
- watch
|
||||
- apiGroups:
|
||||
- extensions
|
||||
resources:
|
||||
- ingresses/status
|
||||
verbs:
|
||||
- update
|
||||
---
|
||||
kind: ClusterRoleBinding
|
||||
apiVersion: rbac.authorization.k8s.io/v1beta1
|
||||
@@ -98,7 +108,7 @@ metadata:
|
||||
namespace: kube-system
|
||||
---
|
||||
kind: Deployment
|
||||
apiVersion: extensions/v1beta1
|
||||
apiVersion: apps/v1
|
||||
metadata:
|
||||
name: traefik-ingress-controller
|
||||
namespace: kube-system
|
||||
@@ -118,7 +128,7 @@ spec:
|
||||
serviceAccountName: traefik-ingress-controller
|
||||
terminationGracePeriodSeconds: 60
|
||||
containers:
|
||||
- image: traefik
|
||||
- image: traefik:v1.7
|
||||
name: traefik-ingress-lb
|
||||
ports:
|
||||
- name: http
|
||||
@@ -164,13 +174,17 @@ metadata:
|
||||
namespace: kube-system
|
||||
---
|
||||
kind: DaemonSet
|
||||
apiVersion: extensions/v1beta1
|
||||
apiVersion: apps/v1
|
||||
metadata:
|
||||
name: traefik-ingress-controller
|
||||
namespace: kube-system
|
||||
labels:
|
||||
k8s-app: traefik-ingress-lb
|
||||
spec:
|
||||
selector:
|
||||
matchLabels:
|
||||
k8s-app: traefik-ingress-lb
|
||||
name: traefik-ingress-lb
|
||||
template:
|
||||
metadata:
|
||||
labels:
|
||||
@@ -180,7 +194,7 @@ spec:
|
||||
serviceAccountName: traefik-ingress-controller
|
||||
terminationGracePeriodSeconds: 60
|
||||
containers:
|
||||
- image: traefik
|
||||
- image: traefik:v1.7
|
||||
name: traefik-ingress-lb
|
||||
ports:
|
||||
- name: http
|
||||
@@ -188,6 +202,7 @@ spec:
|
||||
hostPort: 80
|
||||
- name: admin
|
||||
containerPort: 8080
|
||||
hostPort: 8080
|
||||
securityContext:
|
||||
capabilities:
|
||||
drop:
|
||||
@@ -358,7 +373,7 @@ spec:
|
||||
kubectl apply -f https://raw.githubusercontent.com/containous/traefik/v1.7/examples/k8s/ui.yaml
|
||||
```
|
||||
|
||||
Now lets setup an entry in our `/etc/hosts` file to route `traefik-ui.minikube` to our cluster.
|
||||
Now let's setup an entry in our `/etc/hosts` file to route `traefik-ui.minikube` to our cluster.
|
||||
|
||||
In production you would want to set up real DNS entries.
|
||||
You can get the IP address of your minikube instance by running `minikube ip`:
|
||||
@@ -382,6 +397,23 @@ You can add a TLS entrypoint by adding the following `args` to the container spe
|
||||
--entrypoints=Name:https Address::443 TLS
|
||||
--entrypoints=Name:http Address::80
|
||||
```
|
||||
|
||||
Now let's add the TLS port either to the deployment:
|
||||
|
||||
```
|
||||
ports:
|
||||
- name: https
|
||||
containerPort: 443
|
||||
```
|
||||
|
||||
or to the daemon set:
|
||||
|
||||
```
|
||||
ports:
|
||||
- name: https
|
||||
containerPort: 443
|
||||
hostPort: 443
|
||||
```
|
||||
|
||||
To setup an HTTPS-protected ingress, you can leverage the TLS feature of the ingress resource.
|
||||
|
||||
@@ -503,7 +535,7 @@ First lets start by launching the pods for the cheese websites.
|
||||
```yaml
|
||||
---
|
||||
kind: Deployment
|
||||
apiVersion: extensions/v1beta1
|
||||
apiVersion: apps/v1
|
||||
metadata:
|
||||
name: stilton
|
||||
labels:
|
||||
@@ -529,7 +561,7 @@ spec:
|
||||
- containerPort: 80
|
||||
---
|
||||
kind: Deployment
|
||||
apiVersion: extensions/v1beta1
|
||||
apiVersion: apps/v1
|
||||
metadata:
|
||||
name: cheddar
|
||||
labels:
|
||||
@@ -555,7 +587,7 @@ spec:
|
||||
- containerPort: 80
|
||||
---
|
||||
kind: Deployment
|
||||
apiVersion: extensions/v1beta1
|
||||
apiVersion: apps/v1
|
||||
metadata:
|
||||
name: wensleydale
|
||||
labels:
|
||||
|
||||
@@ -139,7 +139,7 @@ Here is the [docker-compose file](https://docs.docker.com/compose/compose-file/)
|
||||
|
||||
```yaml
|
||||
traefik:
|
||||
image: traefik:<stable version from https://hub.docker.com/_/traefik>
|
||||
image: traefik:<stable v1.7 from https://hub.docker.com/_/traefik>
|
||||
command: --consul --consul.endpoint=127.0.0.1:8500
|
||||
ports:
|
||||
- "80:80"
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
traefik:
|
||||
image: traefik
|
||||
image: traefik:v1.7
|
||||
command: --api --rancher --rancher.domain=rancher.localhost --rancher.endpoint=http://example.com --rancher.accesskey=XXXXXXX --rancher.secretkey=YYYYYY --logLevel=DEBUG
|
||||
ports:
|
||||
- "80:80"
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
traefik:
|
||||
image: traefik
|
||||
image: traefik:v1.7
|
||||
command: -c /dev/null --api --docker --docker.domain=docker.localhost --logLevel=DEBUG
|
||||
ports:
|
||||
- "80:80"
|
||||
|
||||
@@ -6,7 +6,7 @@ metadata:
|
||||
namespace: kube-system
|
||||
---
|
||||
kind: Deployment
|
||||
apiVersion: extensions/v1beta1
|
||||
apiVersion: apps/v1
|
||||
metadata:
|
||||
name: traefik-ingress-controller
|
||||
namespace: kube-system
|
||||
@@ -26,7 +26,7 @@ spec:
|
||||
serviceAccountName: traefik-ingress-controller
|
||||
terminationGracePeriodSeconds: 60
|
||||
containers:
|
||||
- image: traefik
|
||||
- image: traefik:v1.7
|
||||
name: traefik-ingress-lb
|
||||
ports:
|
||||
- name: http
|
||||
|
||||
@@ -6,13 +6,17 @@ metadata:
|
||||
namespace: kube-system
|
||||
---
|
||||
kind: DaemonSet
|
||||
apiVersion: extensions/v1beta1
|
||||
apiVersion: apps/v1
|
||||
metadata:
|
||||
name: traefik-ingress-controller
|
||||
namespace: kube-system
|
||||
labels:
|
||||
k8s-app: traefik-ingress-lb
|
||||
spec:
|
||||
selector:
|
||||
matchLabels:
|
||||
k8s-app: traefik-ingress-lb
|
||||
name: traefik-ingress-lb
|
||||
template:
|
||||
metadata:
|
||||
labels:
|
||||
@@ -22,7 +26,7 @@ spec:
|
||||
serviceAccountName: traefik-ingress-controller
|
||||
terminationGracePeriodSeconds: 60
|
||||
containers:
|
||||
- image: traefik
|
||||
- image: traefik:v1.7
|
||||
name: traefik-ingress-lb
|
||||
ports:
|
||||
- name: http
|
||||
|
||||
@@ -13,7 +13,7 @@ version: '3'
|
||||
|
||||
services:
|
||||
reverse-proxy:
|
||||
image: traefik # The official Traefik docker image
|
||||
image: traefik:v1.7 # The official Traefik docker image
|
||||
command: --api --docker # Enables the web UI and tells Traefik to listen to docker
|
||||
ports:
|
||||
- "80:80" # The HTTP port
|
||||
@@ -101,7 +101,7 @@ IP: 172.27.0.4
|
||||
|
||||
### 4 — Enjoy Traefik's Magic
|
||||
|
||||
Now that you have a basic understanding of how Traefik can automatically create the routes to your services and load balance them, it might be time to dive into [the documentation](https://docs.traefik.io/) and let Traefik work for you!
|
||||
Whatever your infrastructure is, there is probably [an available Traefik backend](https://docs.traefik.io/#supported-backends) that will do the job.
|
||||
Now that you have a basic understanding of how Traefik can automatically create the routes to your services and load balance them, it might be time to dive into [the documentation](https://docs.traefik.io/v1.7/) and let Traefik work for you!
|
||||
Whatever your infrastructure is, there is probably [an available Traefik backend](https://docs.traefik.io/v1.7/#supported-backends) that will do the job.
|
||||
|
||||
Our recommendation would be to see for yourself how simple it is to enable HTTPS with [Traefik's let's encrypt integration](https://docs.traefik.io/user-guide/examples/#lets-encrypt-support) using the dedicated [user guide](https://docs.traefik.io/user-guide/docker-and-lets-encrypt/).
|
||||
Our recommendation would be to see for yourself how simple it is to enable HTTPS with [Traefik's let's encrypt integration](https://docs.traefik.io/v1.7/user-guide/examples/#lets-encrypt-support) using the dedicated [user guide](https://docs.traefik.io/v1.7/user-guide/docker-and-lets-encrypt/).
|
||||
|
||||
@@ -3,7 +3,7 @@ version: '3'
|
||||
services:
|
||||
# The reverse proxy service (Traefik)
|
||||
reverse-proxy:
|
||||
image: traefik # The official Traefik docker image
|
||||
image: traefik:v1.7 # The official Traefik docker image
|
||||
command: --api --docker # Enables the web UI and tells Traefik to listen to docker
|
||||
ports:
|
||||
- "80:80" # The HTTP port
|
||||
|
||||
@@ -12,7 +12,7 @@ RUN yarn install
|
||||
RUN npm run build
|
||||
|
||||
# BUILD
|
||||
FROM golang:1.11-alpine as gobuild
|
||||
FROM golang:1.14-alpine as gobuild
|
||||
|
||||
RUN apk --update upgrade \
|
||||
&& apk --no-cache --no-progress add git mercurial bash gcc musl-dev curl tar ca-certificates tzdata \
|
||||
|
||||
28
h2c/h2c.go
28
h2c/h2c.go
@@ -57,20 +57,20 @@ func (s Server) Serve(l net.Listener) error {
|
||||
if http2VerboseLogs {
|
||||
log.Debugf("Attempting h2c with prior knowledge.")
|
||||
}
|
||||
conn, err := initH2CWithPriorKnowledge(w)
|
||||
conn, err := initH2CWithPriorKnowledge(w, s.closeHijackedConnQuietly)
|
||||
if err != nil {
|
||||
if http2VerboseLogs {
|
||||
log.Debugf("Error h2c with prior knowledge: %v", err)
|
||||
}
|
||||
return
|
||||
}
|
||||
defer conn.Close()
|
||||
defer s.closeHijackedConnQuietly(conn)
|
||||
h2cSrv := &http2.Server{}
|
||||
h2cSrv.ServeConn(conn, &http2.ServeConnOpts{Handler: originalHandler})
|
||||
return
|
||||
}
|
||||
if conn, err := h2cUpgrade(w, r); err == nil {
|
||||
defer conn.Close()
|
||||
if conn, err := h2cUpgrade(w, r, s.closeHijackedConnQuietly); err == nil {
|
||||
defer s.closeHijackedConnQuietly(conn)
|
||||
h2cSrv := &http2.Server{}
|
||||
h2cSrv.ServeConn(conn, &http2.ServeConnOpts{Handler: originalHandler})
|
||||
return
|
||||
@@ -80,12 +80,24 @@ func (s Server) Serve(l net.Listener) error {
|
||||
return s.Server.Serve(l)
|
||||
}
|
||||
|
||||
func (s Server) closeHijackedConnQuietly(conn net.Conn) {
|
||||
connStateKey := conn
|
||||
if rwConn, ok := conn.(*rwConn); ok {
|
||||
connStateKey = rwConn.Conn
|
||||
}
|
||||
|
||||
s.ConnState(connStateKey, http.StateClosed)
|
||||
if err := conn.Close(); err != nil {
|
||||
log.Debugf("Error closing hijacked connection: %v", err)
|
||||
}
|
||||
}
|
||||
|
||||
// initH2CWithPriorKnowledge implements creating a h2c connection with prior
|
||||
// knowledge (Section 3.4) and creates a net.Conn suitable for http2.ServeConn.
|
||||
// All we have to do is look for the client preface that is suppose to be part
|
||||
// of the body, and reforward the client preface on the net.Conn this function
|
||||
// creates.
|
||||
func initH2CWithPriorKnowledge(w http.ResponseWriter) (net.Conn, error) {
|
||||
func initH2CWithPriorKnowledge(w http.ResponseWriter, onFailureAfterHijack func(conn net.Conn)) (net.Conn, error) {
|
||||
hijacker, ok := w.(http.Hijacker)
|
||||
if !ok {
|
||||
return nil, errors.New("hijack not supported")
|
||||
@@ -100,6 +112,7 @@ func initH2CWithPriorKnowledge(w http.ResponseWriter) (net.Conn, error) {
|
||||
buf := make([]byte, len(expectedBody))
|
||||
n, err := io.ReadFull(rw, buf)
|
||||
if err != nil {
|
||||
onFailureAfterHijack(conn)
|
||||
return nil, fmt.Errorf("fail to read body: %v", err)
|
||||
}
|
||||
|
||||
@@ -112,7 +125,7 @@ func initH2CWithPriorKnowledge(w http.ResponseWriter) (net.Conn, error) {
|
||||
return c, nil
|
||||
}
|
||||
|
||||
conn.Close()
|
||||
onFailureAfterHijack(conn)
|
||||
if http2VerboseLogs {
|
||||
log.Printf(
|
||||
"Missing the request body portion of the client preface. Wanted: %v Got: %v",
|
||||
@@ -139,7 +152,7 @@ func drainClientPreface(r io.Reader) error {
|
||||
}
|
||||
|
||||
// h2cUpgrade establishes a h2c connection using the HTTP/1 upgrade (Section 3.2).
|
||||
func h2cUpgrade(w http.ResponseWriter, r *http.Request) (net.Conn, error) {
|
||||
func h2cUpgrade(w http.ResponseWriter, r *http.Request, onFailureAfterHijack func(conn net.Conn)) (net.Conn, error) {
|
||||
if !isH2CUpgrade(r.Header) {
|
||||
return nil, errors.New("non-conforming h2c headers")
|
||||
}
|
||||
@@ -167,6 +180,7 @@ func h2cUpgrade(w http.ResponseWriter, r *http.Request) (net.Conn, error) {
|
||||
// A conforming client will now send an H2 client preface which need to drain
|
||||
// since we already sent this.
|
||||
if err := drainClientPreface(rw); err != nil {
|
||||
onFailureAfterHijack(conn)
|
||||
return nil, err
|
||||
}
|
||||
|
||||
|
||||
@@ -341,24 +341,32 @@ func (s *HTTPSSuite) TestWithClientCertificateAuthenticationMultipeCAs(c *check.
|
||||
err = try.GetRequest("http://127.0.0.1:8080/api/providers", 500*time.Millisecond, try.BodyContains("Host:snitest.org"))
|
||||
c.Assert(err, checker.IsNil)
|
||||
|
||||
req, err := http.NewRequest(http.MethodGet, "https://127.0.0.1:4443", nil)
|
||||
c.Assert(err, checker.IsNil)
|
||||
req.Host = "snitest.com"
|
||||
|
||||
tlsConfig := &tls.Config{
|
||||
InsecureSkipVerify: true,
|
||||
ServerName: "snitest.com",
|
||||
Certificates: []tls.Certificate{},
|
||||
}
|
||||
|
||||
client := http.Client{
|
||||
Transport: &http.Transport{TLSClientConfig: tlsConfig},
|
||||
Timeout: 1 * time.Second,
|
||||
}
|
||||
|
||||
// Connection without client certificate should fail
|
||||
_, err = tls.Dial("tcp", "127.0.0.1:4443", tlsConfig)
|
||||
c.Assert(err, checker.NotNil, check.Commentf("should not be allowed to connect to server"))
|
||||
_, err = client.Do(req)
|
||||
c.Assert(err, checker.NotNil)
|
||||
|
||||
// Connect with client signed by ca1
|
||||
cert, err := tls.LoadX509KeyPair("fixtures/https/clientca/client1.crt", "fixtures/https/clientca/client1.key")
|
||||
c.Assert(err, checker.IsNil, check.Commentf("unable to load client certificate and key"))
|
||||
tlsConfig.Certificates = append(tlsConfig.Certificates, cert)
|
||||
|
||||
conn, err := tls.Dial("tcp", "127.0.0.1:4443", tlsConfig)
|
||||
c.Assert(err, checker.IsNil, check.Commentf("failed to connect to server"))
|
||||
|
||||
conn.Close()
|
||||
_, err = client.Do(req)
|
||||
c.Assert(err, checker.IsNil)
|
||||
|
||||
// Connect with client signed by ca2
|
||||
tlsConfig = &tls.Config{
|
||||
@@ -370,10 +378,13 @@ func (s *HTTPSSuite) TestWithClientCertificateAuthenticationMultipeCAs(c *check.
|
||||
c.Assert(err, checker.IsNil, check.Commentf("unable to load client certificate and key"))
|
||||
tlsConfig.Certificates = append(tlsConfig.Certificates, cert)
|
||||
|
||||
conn, err = tls.Dial("tcp", "127.0.0.1:4443", tlsConfig)
|
||||
c.Assert(err, checker.IsNil, check.Commentf("failed to connect to server"))
|
||||
client = http.Client{
|
||||
Transport: &http.Transport{TLSClientConfig: tlsConfig},
|
||||
Timeout: 1 * time.Second,
|
||||
}
|
||||
|
||||
conn.Close()
|
||||
_, err = client.Do(req)
|
||||
c.Assert(err, checker.IsNil)
|
||||
|
||||
// Connect with client signed by ca3 should fail
|
||||
tlsConfig = &tls.Config{
|
||||
@@ -385,8 +396,13 @@ func (s *HTTPSSuite) TestWithClientCertificateAuthenticationMultipeCAs(c *check.
|
||||
c.Assert(err, checker.IsNil, check.Commentf("unable to load client certificate and key"))
|
||||
tlsConfig.Certificates = append(tlsConfig.Certificates, cert)
|
||||
|
||||
_, err = tls.Dial("tcp", "127.0.0.1:4443", tlsConfig)
|
||||
c.Assert(err, checker.NotNil, check.Commentf("should not be allowed to connect to server"))
|
||||
client = http.Client{
|
||||
Transport: &http.Transport{TLSClientConfig: tlsConfig},
|
||||
Timeout: 1 * time.Second,
|
||||
}
|
||||
|
||||
_, err = client.Do(req)
|
||||
c.Assert(err, checker.NotNil)
|
||||
}
|
||||
|
||||
// TestWithClientCertificateAuthentication
|
||||
@@ -402,24 +418,32 @@ func (s *HTTPSSuite) TestWithClientCertificateAuthenticationMultipeCAsMultipleFi
|
||||
err = try.GetRequest("http://127.0.0.1:8080/api/providers", 1000*time.Millisecond, try.BodyContains("Host:snitest.org"))
|
||||
c.Assert(err, checker.IsNil)
|
||||
|
||||
req, err := http.NewRequest(http.MethodGet, "https://127.0.0.1:4443", nil)
|
||||
c.Assert(err, checker.IsNil)
|
||||
req.Host = "snitest.com"
|
||||
|
||||
tlsConfig := &tls.Config{
|
||||
InsecureSkipVerify: true,
|
||||
ServerName: "snitest.com",
|
||||
Certificates: []tls.Certificate{},
|
||||
}
|
||||
|
||||
client := http.Client{
|
||||
Transport: &http.Transport{TLSClientConfig: tlsConfig},
|
||||
Timeout: 1 * time.Second,
|
||||
}
|
||||
|
||||
// Connection without client certificate should fail
|
||||
_, err = tls.Dial("tcp", "127.0.0.1:4443", tlsConfig)
|
||||
c.Assert(err, checker.NotNil, check.Commentf("should not be allowed to connect to server"))
|
||||
_, err = client.Do(req)
|
||||
c.Assert(err, checker.NotNil)
|
||||
|
||||
// Connect with client signed by ca1
|
||||
cert, err := tls.LoadX509KeyPair("fixtures/https/clientca/client1.crt", "fixtures/https/clientca/client1.key")
|
||||
c.Assert(err, checker.IsNil, check.Commentf("unable to load client certificate and key"))
|
||||
tlsConfig.Certificates = append(tlsConfig.Certificates, cert)
|
||||
|
||||
conn, err := tls.Dial("tcp", "127.0.0.1:4443", tlsConfig)
|
||||
c.Assert(err, checker.IsNil, check.Commentf("failed to connect to server"))
|
||||
|
||||
conn.Close()
|
||||
_, err = client.Do(req)
|
||||
c.Assert(err, checker.IsNil)
|
||||
|
||||
// Connect with client signed by ca2
|
||||
tlsConfig = &tls.Config{
|
||||
@@ -431,9 +455,13 @@ func (s *HTTPSSuite) TestWithClientCertificateAuthenticationMultipeCAsMultipleFi
|
||||
c.Assert(err, checker.IsNil, check.Commentf("unable to load client certificate and key"))
|
||||
tlsConfig.Certificates = append(tlsConfig.Certificates, cert)
|
||||
|
||||
conn, err = tls.Dial("tcp", "127.0.0.1:4443", tlsConfig)
|
||||
c.Assert(err, checker.IsNil, check.Commentf("failed to connect to server"))
|
||||
conn.Close()
|
||||
client = http.Client{
|
||||
Transport: &http.Transport{TLSClientConfig: tlsConfig},
|
||||
Timeout: 1 * time.Second,
|
||||
}
|
||||
|
||||
_, err = client.Do(req)
|
||||
c.Assert(err, checker.IsNil)
|
||||
|
||||
// Connect with client signed by ca3 should fail
|
||||
tlsConfig = &tls.Config{
|
||||
@@ -445,8 +473,13 @@ func (s *HTTPSSuite) TestWithClientCertificateAuthenticationMultipeCAsMultipleFi
|
||||
c.Assert(err, checker.IsNil, check.Commentf("unable to load client certificate and key"))
|
||||
tlsConfig.Certificates = append(tlsConfig.Certificates, cert)
|
||||
|
||||
_, err = tls.Dial("tcp", "127.0.0.1:4443", tlsConfig)
|
||||
c.Assert(err, checker.NotNil, check.Commentf("should not be allowed to connect to server"))
|
||||
client = http.Client{
|
||||
Transport: &http.Transport{TLSClientConfig: tlsConfig},
|
||||
Timeout: 1 * time.Second,
|
||||
}
|
||||
|
||||
_, err = client.Do(req)
|
||||
c.Assert(err, checker.NotNil)
|
||||
}
|
||||
|
||||
func (s *HTTPSSuite) TestWithRootCAsContentForHTTPSOnBackend(c *check.C) {
|
||||
|
||||
@@ -25,11 +25,6 @@ var host = flag.Bool("host", false, "run host integration tests")
|
||||
var showLog = flag.Bool("tlog", false, "always show Traefik logs")
|
||||
|
||||
func Test(t *testing.T) {
|
||||
check.TestingT(t)
|
||||
}
|
||||
|
||||
func init() {
|
||||
flag.Parse()
|
||||
if !*integration {
|
||||
log.Info("Integration tests disabled.")
|
||||
return
|
||||
@@ -70,6 +65,8 @@ func init() {
|
||||
check.Suite(&ProxyProtocolSuite{})
|
||||
check.Suite(&Etcd3Suite{})
|
||||
}
|
||||
|
||||
check.TestingT(t)
|
||||
}
|
||||
|
||||
var traefikBinary = "../dist/traefik"
|
||||
|
||||
@@ -3,6 +3,7 @@ package accesslog
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"io"
|
||||
"net"
|
||||
"net/http"
|
||||
"net/url"
|
||||
@@ -32,6 +33,19 @@ const (
|
||||
JSONFormat = "json"
|
||||
)
|
||||
|
||||
type noopCloser struct {
|
||||
*os.File
|
||||
}
|
||||
|
||||
func (n noopCloser) Write(p []byte) (int, error) {
|
||||
return n.File.Write(p)
|
||||
}
|
||||
|
||||
func (n noopCloser) Close() error {
|
||||
// noop
|
||||
return nil
|
||||
}
|
||||
|
||||
type logHandlerParams struct {
|
||||
logDataTable *LogData
|
||||
crr *captureRequestReader
|
||||
@@ -42,7 +56,7 @@ type logHandlerParams struct {
|
||||
type LogHandler struct {
|
||||
config *types.AccessLog
|
||||
logger *logrus.Logger
|
||||
file *os.File
|
||||
file io.WriteCloser
|
||||
mu sync.Mutex
|
||||
httpCodeRanges types.HTTPCodeRanges
|
||||
logHandlerChan chan logHandlerParams
|
||||
@@ -51,7 +65,7 @@ type LogHandler struct {
|
||||
|
||||
// NewLogHandler creates a new LogHandler
|
||||
func NewLogHandler(config *types.AccessLog) (*LogHandler, error) {
|
||||
file := os.Stdout
|
||||
var file io.WriteCloser = noopCloser{os.Stdout}
|
||||
if len(config.FilePath) > 0 {
|
||||
f, err := openAccessLogFile(config.FilePath)
|
||||
if err != nil {
|
||||
@@ -205,14 +219,15 @@ func (l *LogHandler) Close() error {
|
||||
// Rotate closes and reopens the log file to allow for rotation
|
||||
// by an external source.
|
||||
func (l *LogHandler) Rotate() error {
|
||||
var err error
|
||||
|
||||
if l.file != nil {
|
||||
defer func(f *os.File) {
|
||||
f.Close()
|
||||
}(l.file)
|
||||
if l.config.FilePath == "" {
|
||||
return nil
|
||||
}
|
||||
|
||||
if l.file != nil {
|
||||
defer func(f io.Closer) { _ = f.Close() }(l.file)
|
||||
}
|
||||
|
||||
var err error
|
||||
l.file, err = os.OpenFile(l.config.FilePath, os.O_RDWR|os.O_CREATE|os.O_APPEND, 0664)
|
||||
if err != nil {
|
||||
return err
|
||||
|
||||
@@ -3,8 +3,10 @@ package auth
|
||||
import (
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
"net"
|
||||
"net/http"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
goauth "github.com/abbot/go-http-auth"
|
||||
"github.com/containous/traefik/log"
|
||||
@@ -61,7 +63,10 @@ func NewAuthenticator(authConfig *types.Auth, tracingMiddleware *tracing.Tracing
|
||||
tracingAuth.name = "Auth Digest"
|
||||
tracingAuth.clientSpanKind = false
|
||||
} else if authConfig.Forward != nil {
|
||||
tracingAuth.handler = createAuthForwardHandler(authConfig)
|
||||
tracingAuth.handler, err = createAuthForwardHandler(authConfig)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
tracingAuth.name = "Auth Forward"
|
||||
tracingAuth.clientSpanKind = true
|
||||
}
|
||||
@@ -74,10 +79,38 @@ func NewAuthenticator(authConfig *types.Auth, tracingMiddleware *tracing.Tracing
|
||||
return authenticator, nil
|
||||
}
|
||||
|
||||
func createAuthForwardHandler(authConfig *types.Auth) negroni.HandlerFunc {
|
||||
func createAuthForwardHandler(authConfig *types.Auth) (negroni.HandlerFunc, error) {
|
||||
// Ensure our request client does not follow redirects
|
||||
client := http.Client{
|
||||
CheckRedirect: func(r *http.Request, via []*http.Request) error {
|
||||
return http.ErrUseLastResponse
|
||||
},
|
||||
Timeout: 30 * time.Second,
|
||||
}
|
||||
|
||||
if authConfig.Forward.TLS != nil {
|
||||
tlsConfig, err := authConfig.Forward.TLS.CreateTLSConfig()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
client.Transport = &http.Transport{
|
||||
Proxy: http.ProxyFromEnvironment,
|
||||
DialContext: (&net.Dialer{
|
||||
Timeout: 30 * time.Second,
|
||||
KeepAlive: 30 * time.Second,
|
||||
DualStack: true,
|
||||
}).DialContext,
|
||||
MaxIdleConns: 100,
|
||||
IdleConnTimeout: 90 * time.Second,
|
||||
TLSHandshakeTimeout: 10 * time.Second,
|
||||
ExpectContinueTimeout: 1 * time.Second,
|
||||
TLSClientConfig: tlsConfig,
|
||||
}
|
||||
}
|
||||
return negroni.HandlerFunc(func(w http.ResponseWriter, r *http.Request, next http.HandlerFunc) {
|
||||
Forward(authConfig.Forward, w, r, next)
|
||||
})
|
||||
Forward(authConfig.Forward, client, w, r, next)
|
||||
}), nil
|
||||
}
|
||||
func createAuthDigestHandler(digestAuth *goauth.DigestAuth, authConfig *types.Auth) negroni.HandlerFunc {
|
||||
return negroni.HandlerFunc(func(w http.ResponseWriter, r *http.Request, next http.HandlerFunc) {
|
||||
|
||||
@@ -18,28 +18,8 @@ const (
|
||||
xForwardedMethod = "X-Forwarded-Method"
|
||||
)
|
||||
|
||||
// Forward the authentication to a external server
|
||||
func Forward(config *types.Forward, w http.ResponseWriter, r *http.Request, next http.HandlerFunc) {
|
||||
// Ensure our request client does not follow redirects
|
||||
httpClient := http.Client{
|
||||
CheckRedirect: func(r *http.Request, via []*http.Request) error {
|
||||
return http.ErrUseLastResponse
|
||||
},
|
||||
}
|
||||
|
||||
if config.TLS != nil {
|
||||
tlsConfig, err := config.TLS.CreateTLSConfig()
|
||||
if err != nil {
|
||||
tracing.SetErrorAndDebugLog(r, "Unable to configure TLS to call %s. Cause %s", config.Address, err)
|
||||
w.WriteHeader(http.StatusInternalServerError)
|
||||
return
|
||||
}
|
||||
|
||||
httpClient.Transport = &http.Transport{
|
||||
TLSClientConfig: tlsConfig,
|
||||
}
|
||||
}
|
||||
|
||||
// Forward the authentication to an external server
|
||||
func Forward(config *types.Forward, httpClient http.Client, w http.ResponseWriter, r *http.Request, next http.HandlerFunc) {
|
||||
forwardReq, err := http.NewRequest(http.MethodGet, config.Address, http.NoBody)
|
||||
tracing.LogRequest(tracing.GetSpan(r), forwardReq)
|
||||
if err != nil {
|
||||
|
||||
@@ -19,7 +19,10 @@ import (
|
||||
)
|
||||
|
||||
// Compile time validation that the response recorder implements http interfaces correctly.
|
||||
var _ middlewares.Stateful = &responseRecorderWithCloseNotify{}
|
||||
var (
|
||||
_ middlewares.Stateful = &responseRecorderWithCloseNotify{}
|
||||
_ middlewares.Stateful = &codeCatcherWithCloseNotify{}
|
||||
)
|
||||
|
||||
// Handler is a middleware that provides the custom error pages
|
||||
type Handler struct {
|
||||
@@ -74,25 +77,29 @@ func (h *Handler) ServeHTTP(w http.ResponseWriter, req *http.Request, next http.
|
||||
return
|
||||
}
|
||||
|
||||
recorder := newResponseRecorder(w)
|
||||
next.ServeHTTP(recorder, req)
|
||||
catcher := newCodeCatcher(w, h.httpCodeRanges)
|
||||
next.ServeHTTP(catcher, req)
|
||||
if !catcher.isFilteredCode() {
|
||||
return
|
||||
}
|
||||
|
||||
// check the recorder code against the configured http status code ranges
|
||||
code := catcher.getCode()
|
||||
for _, block := range h.httpCodeRanges {
|
||||
if recorder.GetCode() >= block[0] && recorder.GetCode() <= block[1] {
|
||||
log.Errorf("Caught HTTP Status Code %d, returning error page", recorder.GetCode())
|
||||
if code >= block[0] && code <= block[1] {
|
||||
log.Errorf("Caught HTTP Status Code %d, returning error page", code)
|
||||
|
||||
var query string
|
||||
if len(h.backendQuery) > 0 {
|
||||
query = "/" + strings.TrimPrefix(h.backendQuery, "/")
|
||||
query = strings.Replace(query, "{status}", strconv.Itoa(recorder.GetCode()), -1)
|
||||
query = strings.Replace(query, "{status}", strconv.Itoa(code), -1)
|
||||
}
|
||||
|
||||
pageReq, err := newRequest(h.backendURL + query)
|
||||
if err != nil {
|
||||
log.Error(err)
|
||||
w.WriteHeader(recorder.GetCode())
|
||||
fmt.Fprint(w, http.StatusText(recorder.GetCode()))
|
||||
w.WriteHeader(code)
|
||||
fmt.Fprint(w, http.StatusText(code))
|
||||
return
|
||||
}
|
||||
|
||||
@@ -102,16 +109,11 @@ func (h *Handler) ServeHTTP(w http.ResponseWriter, req *http.Request, next http.
|
||||
h.backendHandler.ServeHTTP(recorderErrorPage, pageReq.WithContext(req.Context()))
|
||||
|
||||
utils.CopyHeaders(w.Header(), recorderErrorPage.Header())
|
||||
w.WriteHeader(recorder.GetCode())
|
||||
w.WriteHeader(code)
|
||||
w.Write(recorderErrorPage.GetBody().Bytes())
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
// did not catch a configured status code so proceed with the request
|
||||
utils.CopyHeaders(w.Header(), recorder.Header())
|
||||
w.WriteHeader(recorder.GetCode())
|
||||
w.Write(recorder.GetBody().Bytes())
|
||||
}
|
||||
|
||||
func newRequest(baseURL string) (*http.Request, error) {
|
||||
@@ -129,6 +131,133 @@ func newRequest(baseURL string) (*http.Request, error) {
|
||||
return req, nil
|
||||
}
|
||||
|
||||
type responseInterceptor interface {
|
||||
http.ResponseWriter
|
||||
http.Flusher
|
||||
getCode() int
|
||||
isFilteredCode() bool
|
||||
}
|
||||
|
||||
// codeCatcher is a response writer that detects as soon as possible whether the
|
||||
// response is a code within the ranges of codes it watches for. If it is, it
|
||||
// simply drops the data from the response. Otherwise, it forwards it directly to
|
||||
// the original client (its responseWriter) without any buffering.
|
||||
type codeCatcher struct {
|
||||
headerMap http.Header
|
||||
code int
|
||||
httpCodeRanges types.HTTPCodeRanges
|
||||
firstWrite bool
|
||||
caughtFilteredCode bool
|
||||
responseWriter http.ResponseWriter
|
||||
headersSent bool
|
||||
err error
|
||||
}
|
||||
|
||||
type codeCatcherWithCloseNotify struct {
|
||||
*codeCatcher
|
||||
}
|
||||
|
||||
// CloseNotify returns a channel that receives at most a
|
||||
// single value (true) when the client connection has gone away.
|
||||
func (cc *codeCatcherWithCloseNotify) CloseNotify() <-chan bool {
|
||||
return cc.responseWriter.(http.CloseNotifier).CloseNotify()
|
||||
}
|
||||
|
||||
func newCodeCatcher(rw http.ResponseWriter, httpCodeRanges types.HTTPCodeRanges) responseInterceptor {
|
||||
catcher := &codeCatcher{
|
||||
headerMap: make(http.Header),
|
||||
code: http.StatusOK, // If backend does not call WriteHeader on us, we consider it's a 200.
|
||||
responseWriter: rw,
|
||||
httpCodeRanges: httpCodeRanges,
|
||||
firstWrite: true,
|
||||
}
|
||||
if _, ok := rw.(http.CloseNotifier); ok {
|
||||
return &codeCatcherWithCloseNotify{catcher}
|
||||
}
|
||||
return catcher
|
||||
}
|
||||
|
||||
func (cc *codeCatcher) Header() http.Header {
|
||||
if cc.headerMap == nil {
|
||||
cc.headerMap = make(http.Header)
|
||||
}
|
||||
|
||||
return cc.headerMap
|
||||
}
|
||||
|
||||
func (cc *codeCatcher) getCode() int {
|
||||
return cc.code
|
||||
}
|
||||
|
||||
// isFilteredCode returns whether the codeCatcher received a response code among the ones it is watching,
|
||||
// and for which the response should be deferred to the error handler.
|
||||
func (cc *codeCatcher) isFilteredCode() bool {
|
||||
return cc.caughtFilteredCode
|
||||
}
|
||||
|
||||
func (cc *codeCatcher) Write(buf []byte) (int, error) {
|
||||
if !cc.firstWrite {
|
||||
if cc.caughtFilteredCode {
|
||||
// We don't care about the contents of the response,
|
||||
// since we want to serve the ones from the error page,
|
||||
// so we just drop them.
|
||||
return len(buf), nil
|
||||
}
|
||||
return cc.responseWriter.Write(buf)
|
||||
}
|
||||
cc.firstWrite = false
|
||||
|
||||
// If WriteHeader was already called from the caller, this is a NOOP.
|
||||
// Otherwise, cc.code is actually a 200 here.
|
||||
cc.WriteHeader(cc.code)
|
||||
|
||||
if cc.caughtFilteredCode {
|
||||
return len(buf), nil
|
||||
}
|
||||
return cc.responseWriter.Write(buf)
|
||||
}
|
||||
|
||||
func (cc *codeCatcher) WriteHeader(code int) {
|
||||
if cc.headersSent || cc.caughtFilteredCode {
|
||||
return
|
||||
}
|
||||
|
||||
cc.code = code
|
||||
for _, block := range cc.httpCodeRanges {
|
||||
if cc.code >= block[0] && cc.code <= block[1] {
|
||||
cc.caughtFilteredCode = true
|
||||
break
|
||||
}
|
||||
}
|
||||
// it will be up to the other response recorder to send the headers,
|
||||
// so it is out of our hands now.
|
||||
if cc.caughtFilteredCode {
|
||||
return
|
||||
}
|
||||
utils.CopyHeaders(cc.responseWriter.Header(), cc.Header())
|
||||
cc.responseWriter.WriteHeader(cc.code)
|
||||
cc.headersSent = true
|
||||
}
|
||||
|
||||
// Hijack hijacks the connection
|
||||
func (cc *codeCatcher) Hijack() (net.Conn, *bufio.ReadWriter, error) {
|
||||
if hj, ok := cc.responseWriter.(http.Hijacker); ok {
|
||||
return hj.Hijack()
|
||||
}
|
||||
return nil, nil, fmt.Errorf("%T is not a http.Hijacker", cc.responseWriter)
|
||||
}
|
||||
|
||||
// Flush sends any buffered data to the client.
|
||||
func (cc *codeCatcher) Flush() {
|
||||
// If WriteHeader was already called from the caller, this is a NOOP.
|
||||
// Otherwise, cc.code is actually a 200 here.
|
||||
cc.WriteHeader(cc.code)
|
||||
|
||||
if flusher, ok := cc.responseWriter.(http.Flusher); ok {
|
||||
flusher.Flush()
|
||||
}
|
||||
}
|
||||
|
||||
type responseRecorder interface {
|
||||
http.ResponseWriter
|
||||
http.Flusher
|
||||
|
||||
@@ -34,6 +34,30 @@ func TestHandler(t *testing.T) {
|
||||
assert.Contains(t, recorder.Body.String(), http.StatusText(http.StatusOK))
|
||||
},
|
||||
},
|
||||
{
|
||||
desc: "no error, but not a 200",
|
||||
errorPage: &types.ErrorPage{Backend: "error", Query: "/test", Status: []string{"500-501", "503-599"}},
|
||||
backendCode: http.StatusPartialContent,
|
||||
backendErrorHandler: http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
|
||||
fmt.Fprintln(w, "My error page.")
|
||||
}),
|
||||
validate: func(t *testing.T, recorder *httptest.ResponseRecorder) {
|
||||
assert.Equal(t, http.StatusPartialContent, recorder.Code, "HTTP status")
|
||||
assert.Contains(t, recorder.Body.String(), http.StatusText(http.StatusPartialContent))
|
||||
},
|
||||
},
|
||||
{
|
||||
desc: "a 304, so no Write called",
|
||||
errorPage: &types.ErrorPage{Backend: "error", Query: "/test", Status: []string{"500-501", "503-599"}},
|
||||
backendCode: http.StatusNotModified,
|
||||
backendErrorHandler: http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
|
||||
fmt.Fprintln(w, "whatever, should not be called")
|
||||
}),
|
||||
validate: func(t *testing.T, recorder *httptest.ResponseRecorder) {
|
||||
assert.Equal(t, http.StatusNotModified, recorder.Code, "HTTP status")
|
||||
assert.Contains(t, recorder.Body.String(), "")
|
||||
},
|
||||
},
|
||||
{
|
||||
desc: "in the range",
|
||||
errorPage: &types.ErrorPage{Backend: "error", Query: "/test", Status: []string{"500-501", "503-599"}},
|
||||
@@ -108,6 +132,9 @@ func TestHandler(t *testing.T) {
|
||||
|
||||
handler := http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
|
||||
w.WriteHeader(test.backendCode)
|
||||
if test.backendCode == http.StatusNotModified {
|
||||
return
|
||||
}
|
||||
fmt.Fprintln(w, http.StatusText(test.backendCode))
|
||||
})
|
||||
|
||||
|
||||
@@ -17,7 +17,7 @@ import (
|
||||
)
|
||||
|
||||
const (
|
||||
defaultRedirectRegex = `^(?:https?:\/\/)?([\w\._-]+)(?::\d+)?(.*)$`
|
||||
defaultRedirectRegex = `^(?:https?:\/\/)?(\[[\w:.]+\]|[\w\._-]+)(?::\d+)?(.*)$`
|
||||
)
|
||||
|
||||
// NewEntryPointHandler create a new redirection handler base on entry point
|
||||
|
||||
@@ -84,6 +84,34 @@ func TestNewEntryPointHandler(t *testing.T) {
|
||||
url: "http://foo:80",
|
||||
errorExpected: true,
|
||||
},
|
||||
{
|
||||
desc: "IPV6 HTTP to HTTP",
|
||||
entryPoint: &configuration.EntryPoint{Address: ":8080"},
|
||||
url: "http://[::1]",
|
||||
expectedURL: "http://[::1]:8080",
|
||||
expectedStatus: http.StatusFound,
|
||||
},
|
||||
{
|
||||
desc: "IPV6 HTTP to HTTPS",
|
||||
entryPoint: &configuration.EntryPoint{Address: ":443", TLS: &tls.TLS{}},
|
||||
url: "http://[::1]",
|
||||
expectedURL: "https://[::1]:443",
|
||||
expectedStatus: http.StatusFound,
|
||||
},
|
||||
{
|
||||
desc: "IPV6 HTTP with port 80 to HTTP",
|
||||
entryPoint: &configuration.EntryPoint{Address: ":8080"},
|
||||
url: "http://[::1]:80",
|
||||
expectedURL: "http://[::1]:8080",
|
||||
expectedStatus: http.StatusFound,
|
||||
},
|
||||
{
|
||||
desc: "IPV6 HTTP with port 80 to HTTPS",
|
||||
entryPoint: &configuration.EntryPoint{Address: ":443", TLS: &tls.TLS{}},
|
||||
url: "http://[::1]:80",
|
||||
expectedURL: "https://[::1]:443",
|
||||
expectedStatus: http.StatusFound,
|
||||
},
|
||||
}
|
||||
|
||||
for _, test := range testCases {
|
||||
|
||||
@@ -5,6 +5,7 @@ import (
|
||||
"net/http"
|
||||
"net/http/httptest"
|
||||
"net/http/httptrace"
|
||||
"strconv"
|
||||
"strings"
|
||||
"testing"
|
||||
|
||||
@@ -35,7 +36,7 @@ func TestRetry(t *testing.T) {
|
||||
desc: "no retry when max request attempts is one",
|
||||
maxRequestAttempts: 1,
|
||||
wantRetryAttempts: 0,
|
||||
wantResponseStatus: http.StatusInternalServerError,
|
||||
wantResponseStatus: http.StatusBadGateway,
|
||||
amountFaultyEndpoints: 1,
|
||||
},
|
||||
{
|
||||
@@ -56,7 +57,7 @@ func TestRetry(t *testing.T) {
|
||||
desc: "max attempts exhausted delivers the 5xx response",
|
||||
maxRequestAttempts: 3,
|
||||
wantRetryAttempts: 2,
|
||||
wantResponseStatus: http.StatusInternalServerError,
|
||||
wantResponseStatus: http.StatusBadGateway,
|
||||
amountFaultyEndpoints: 3,
|
||||
},
|
||||
}
|
||||
@@ -82,17 +83,18 @@ func TestRetry(t *testing.T) {
|
||||
t.Fatalf("Error creating load balancer: %s", err)
|
||||
}
|
||||
|
||||
basePort := 33444
|
||||
// out of range port
|
||||
basePort := 1133444
|
||||
for i := 0; i < test.amountFaultyEndpoints; i++ {
|
||||
// 192.0.2.0 is a non-routable IP for testing purposes.
|
||||
// See: https://stackoverflow.com/questions/528538/non-routable-ip-address/18436928#18436928
|
||||
// We only use the port specification here because the URL is used as identifier
|
||||
// in the load balancer and using the exact same URL would not add a new server.
|
||||
loadBalancer.UpsertServer(testhelpers.MustParseURL("http://192.0.2.0:" + string(basePort+i)))
|
||||
_ = loadBalancer.UpsertServer(testhelpers.MustParseURL("http://192.0.2.0:" + strconv.Itoa(basePort+i)))
|
||||
}
|
||||
|
||||
// add the functioning server to the end of the load balancer list
|
||||
loadBalancer.UpsertServer(testhelpers.MustParseURL(backendServer.URL))
|
||||
_ = loadBalancer.UpsertServer(testhelpers.MustParseURL(backendServer.URL))
|
||||
|
||||
retryListener := &countingRetryListener{}
|
||||
retry := NewRetry(test.maxRequestAttempts, loadBalancer, retryListener)
|
||||
@@ -154,17 +156,18 @@ func TestRetryWebsocket(t *testing.T) {
|
||||
t.Fatalf("Error creating load balancer: %s", err)
|
||||
}
|
||||
|
||||
basePort := 33444
|
||||
// out of range port
|
||||
basePort := 1133444
|
||||
for i := 0; i < test.amountFaultyEndpoints; i++ {
|
||||
// 192.0.2.0 is a non-routable IP for testing purposes.
|
||||
// See: https://stackoverflow.com/questions/528538/non-routable-ip-address/18436928#18436928
|
||||
// We only use the port specification here because the URL is used as identifier
|
||||
// in the load balancer and using the exact same URL would not add a new server.
|
||||
loadBalancer.UpsertServer(testhelpers.MustParseURL("http://192.0.2.0:" + string(basePort+i)))
|
||||
_ = loadBalancer.UpsertServer(testhelpers.MustParseURL("http://192.0.2.0:" + strconv.Itoa(basePort+i)))
|
||||
}
|
||||
|
||||
// add the functioning server to the end of the load balancer list
|
||||
loadBalancer.UpsertServer(testhelpers.MustParseURL(backendServer.URL))
|
||||
_ = loadBalancer.UpsertServer(testhelpers.MustParseURL(backendServer.URL))
|
||||
|
||||
retryListener := &countingRetryListener{}
|
||||
retry := NewRetry(test.maxRequestAttempts, loadBalancer, retryListener)
|
||||
|
||||
@@ -28,6 +28,7 @@ theme:
|
||||
|
||||
copyright: "Copyright © 2016-2019 Containous"
|
||||
|
||||
# only to force the use of the analytics partials
|
||||
google_analytics:
|
||||
- 'UA-51880359-3'
|
||||
- 'docs.traefik.io'
|
||||
|
||||
@@ -57,7 +57,12 @@ func (a *Account) GetPrivateKey() crypto.PrivateKey {
|
||||
return privateKey
|
||||
}
|
||||
|
||||
log.Errorf("Cannot unmarshal private key %+v", a.PrivateKey)
|
||||
keySnippet := ""
|
||||
if len(a.PrivateKey) >= 16 {
|
||||
keySnippet = string(a.PrivateKey[:16])
|
||||
}
|
||||
|
||||
log.Errorf("Cannot unmarshall private key beginning with %s", keySnippet)
|
||||
return nil
|
||||
}
|
||||
|
||||
|
||||
@@ -254,7 +254,8 @@ func TestProviderBuildConfiguration(t *testing.T) {
|
||||
},
|
||||
},
|
||||
},
|
||||
}, {
|
||||
},
|
||||
{
|
||||
desc: "Should build config with a digest auth",
|
||||
nodes: []catalogUpdate{
|
||||
{
|
||||
@@ -416,6 +417,9 @@ func TestProviderBuildConfiguration(t *testing.T) {
|
||||
label.TraefikBackendLoadBalancerSticky + "=true",
|
||||
label.TraefikBackendLoadBalancerStickiness + "=true",
|
||||
label.TraefikBackendLoadBalancerStickinessCookieName + "=chocolate",
|
||||
label.TraefikBackendLoadBalancerStickinessSecure + "=true",
|
||||
label.TraefikBackendLoadBalancerStickinessHTTPOnly + "=true",
|
||||
label.TraefikBackendLoadBalancerStickinessSameSite + "=none",
|
||||
label.TraefikBackendMaxConnAmount + "=666",
|
||||
label.TraefikBackendMaxConnExtractorFunc + "=client.ip",
|
||||
label.TraefikBackendBufferingMaxResponseBodyBytes + "=10485760",
|
||||
@@ -700,6 +704,9 @@ func TestProviderBuildConfiguration(t *testing.T) {
|
||||
Sticky: true,
|
||||
Stickiness: &types.Stickiness{
|
||||
CookieName: "chocolate",
|
||||
Secure: true,
|
||||
HTTPOnly: true,
|
||||
SameSite: "none",
|
||||
},
|
||||
},
|
||||
MaxConn: &types.MaxConn{
|
||||
|
||||
@@ -463,6 +463,9 @@ func TestDockerBuildConfiguration(t *testing.T) {
|
||||
label.TraefikBackendLoadBalancerSticky: "true",
|
||||
label.TraefikBackendLoadBalancerStickiness: "true",
|
||||
label.TraefikBackendLoadBalancerStickinessCookieName: "chocolate",
|
||||
label.TraefikBackendLoadBalancerStickinessSecure: "true",
|
||||
label.TraefikBackendLoadBalancerStickinessHTTPOnly: "true",
|
||||
label.TraefikBackendLoadBalancerStickinessSameSite: "none",
|
||||
label.TraefikBackendMaxConnAmount: "666",
|
||||
label.TraefikBackendMaxConnExtractorFunc: "client.ip",
|
||||
label.TraefikBackendBufferingMaxResponseBodyBytes: "10485760",
|
||||
@@ -711,6 +714,9 @@ func TestDockerBuildConfiguration(t *testing.T) {
|
||||
Sticky: true,
|
||||
Stickiness: &types.Stickiness{
|
||||
CookieName: "chocolate",
|
||||
Secure: true,
|
||||
HTTPOnly: true,
|
||||
SameSite: "none",
|
||||
},
|
||||
},
|
||||
MaxConn: &types.MaxConn{
|
||||
|
||||
@@ -412,6 +412,9 @@ func TestSwarmBuildConfiguration(t *testing.T) {
|
||||
label.TraefikBackendLoadBalancerSticky: "true",
|
||||
label.TraefikBackendLoadBalancerStickiness: "true",
|
||||
label.TraefikBackendLoadBalancerStickinessCookieName: "chocolate",
|
||||
label.TraefikBackendLoadBalancerStickinessSecure: "true",
|
||||
label.TraefikBackendLoadBalancerStickinessHTTPOnly: "true",
|
||||
label.TraefikBackendLoadBalancerStickinessSameSite: "none",
|
||||
label.TraefikBackendMaxConnAmount: "666",
|
||||
label.TraefikBackendMaxConnExtractorFunc: "client.ip",
|
||||
label.TraefikBackendBufferingMaxResponseBodyBytes: "10485760",
|
||||
@@ -611,6 +614,9 @@ func TestSwarmBuildConfiguration(t *testing.T) {
|
||||
Sticky: true,
|
||||
Stickiness: &types.Stickiness{
|
||||
CookieName: "chocolate",
|
||||
Secure: true,
|
||||
HTTPOnly: true,
|
||||
SameSite: "none",
|
||||
},
|
||||
},
|
||||
MaxConn: &types.MaxConn{
|
||||
|
||||
@@ -353,6 +353,9 @@ func TestBuildConfiguration(t *testing.T) {
|
||||
label.TraefikBackendLoadBalancerSticky: aws.String("true"),
|
||||
label.TraefikBackendLoadBalancerStickiness: aws.String("true"),
|
||||
label.TraefikBackendLoadBalancerStickinessCookieName: aws.String("chocolate"),
|
||||
label.TraefikBackendLoadBalancerStickinessSecure: aws.String("true"),
|
||||
label.TraefikBackendLoadBalancerStickinessHTTPOnly: aws.String("true"),
|
||||
label.TraefikBackendLoadBalancerStickinessSameSite: aws.String("none"),
|
||||
label.TraefikBackendMaxConnAmount: aws.String("666"),
|
||||
label.TraefikBackendMaxConnExtractorFunc: aws.String("client.ip"),
|
||||
label.TraefikBackendBufferingMaxResponseBodyBytes: aws.String("10485760"),
|
||||
@@ -475,6 +478,9 @@ func TestBuildConfiguration(t *testing.T) {
|
||||
Sticky: true,
|
||||
Stickiness: &types.Stickiness{
|
||||
CookieName: "chocolate",
|
||||
Secure: true,
|
||||
HTTPOnly: true,
|
||||
SameSite: "none",
|
||||
},
|
||||
},
|
||||
MaxConn: &types.MaxConn{
|
||||
@@ -662,6 +668,9 @@ func TestBuildConfiguration(t *testing.T) {
|
||||
label.TraefikBackendLoadBalancerSticky: aws.String("true"),
|
||||
label.TraefikBackendLoadBalancerStickiness: aws.String("true"),
|
||||
label.TraefikBackendLoadBalancerStickinessCookieName: aws.String("chocolate"),
|
||||
label.TraefikBackendLoadBalancerStickinessSecure: aws.String("true"),
|
||||
label.TraefikBackendLoadBalancerStickinessHTTPOnly: aws.String("true"),
|
||||
label.TraefikBackendLoadBalancerStickinessSameSite: aws.String("none"),
|
||||
label.TraefikBackendMaxConnAmount: aws.String("666"),
|
||||
label.TraefikBackendMaxConnExtractorFunc: aws.String("client.ip"),
|
||||
label.TraefikBackendBufferingMaxResponseBodyBytes: aws.String("10485760"),
|
||||
@@ -750,6 +759,9 @@ func TestBuildConfiguration(t *testing.T) {
|
||||
label.TraefikBackendLoadBalancerSticky: aws.String("true"),
|
||||
label.TraefikBackendLoadBalancerStickiness: aws.String("true"),
|
||||
label.TraefikBackendLoadBalancerStickinessCookieName: aws.String("chocolate"),
|
||||
label.TraefikBackendLoadBalancerStickinessSecure: aws.String("true"),
|
||||
label.TraefikBackendLoadBalancerStickinessHTTPOnly: aws.String("true"),
|
||||
label.TraefikBackendLoadBalancerStickinessSameSite: aws.String("none"),
|
||||
label.TraefikBackendMaxConnAmount: aws.String("666"),
|
||||
label.TraefikBackendMaxConnExtractorFunc: aws.String("client.ip"),
|
||||
label.TraefikBackendBufferingMaxResponseBodyBytes: aws.String("10485760"),
|
||||
@@ -839,6 +851,9 @@ func TestBuildConfiguration(t *testing.T) {
|
||||
Sticky: true,
|
||||
Stickiness: &types.Stickiness{
|
||||
CookieName: "chocolate",
|
||||
Secure: true,
|
||||
HTTPOnly: true,
|
||||
SameSite: "none",
|
||||
},
|
||||
},
|
||||
MaxConn: &types.MaxConn{
|
||||
|
||||
@@ -309,6 +309,11 @@ func (p *Provider) listInstances(ctx context.Context, client *awsClient) ([]ecsI
|
||||
state: aws.StringValue(task.LastStatus),
|
||||
}
|
||||
} else {
|
||||
if containerInstance == nil {
|
||||
log.Errorf("Unable to find container instance information for %s", aws.StringValue(container.Name))
|
||||
continue
|
||||
}
|
||||
|
||||
var ports []portMapping
|
||||
for _, mapping := range container.NetworkBindings {
|
||||
if mapping != nil {
|
||||
|
||||
@@ -30,6 +30,9 @@ const (
|
||||
annotationKubernetesLoadBalancerMethod = "ingress.kubernetes.io/load-balancer-method"
|
||||
annotationKubernetesAffinity = "ingress.kubernetes.io/affinity"
|
||||
annotationKubernetesSessionCookieName = "ingress.kubernetes.io/session-cookie-name"
|
||||
annotationKubernetesSessionSecure = "ingress.kubernetes.io/session-secure"
|
||||
annotationKubernetesSessionHTTPOnly = "ingress.kubernetes.io/session-http-only"
|
||||
annotationKubernetesSessionSameSite = "ingress.kubernetes.io/session-same-site"
|
||||
annotationKubernetesRuleType = "ingress.kubernetes.io/rule-type"
|
||||
annotationKubernetesRedirectEntryPoint = "ingress.kubernetes.io/redirect-entry-point"
|
||||
annotationKubernetesRedirectPermanent = "ingress.kubernetes.io/redirect-permanent"
|
||||
|
||||
@@ -161,6 +161,23 @@ spec:
|
||||
servicePort: 80
|
||||
path: /api
|
||||
|
||||
---
|
||||
apiVersion: extensions/v1beta1
|
||||
kind: Ingress
|
||||
metadata:
|
||||
annotations:
|
||||
ingress.kubernetes.io/rewrite-target: /app
|
||||
namespace: testing
|
||||
spec:
|
||||
rules:
|
||||
- host: rewritetargetrootpath
|
||||
http:
|
||||
paths:
|
||||
- backend:
|
||||
serviceName: service1
|
||||
servicePort: 80
|
||||
path: /
|
||||
|
||||
---
|
||||
apiVersion: extensions/v1beta1
|
||||
kind: Ingress
|
||||
|
||||
@@ -16,6 +16,7 @@ import (
|
||||
"time"
|
||||
|
||||
"github.com/cenk/backoff"
|
||||
"github.com/containous/flaeg"
|
||||
"github.com/containous/traefik/job"
|
||||
"github.com/containous/traefik/log"
|
||||
"github.com/containous/traefik/provider"
|
||||
@@ -68,6 +69,7 @@ type Provider struct {
|
||||
LabelSelector string `description:"Kubernetes Ingress label selector to use" export:"true"`
|
||||
IngressClass string `description:"Value of kubernetes.io/ingress.class annotation to watch for" export:"true"`
|
||||
IngressEndpoint *IngressEndpoint `description:"Kubernetes Ingress Endpoint"`
|
||||
ThrottleDuration flaeg.Duration `description:"Ingress refresh throttle duration"`
|
||||
lastConfiguration safe.Safe
|
||||
}
|
||||
|
||||
@@ -137,16 +139,29 @@ func (p *Provider) Provide(configurationChan chan<- types.ConfigMessage, pool *s
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
throttleDuration := time.Duration(p.ThrottleDuration)
|
||||
throttledChan := throttleEvents(throttleDuration, stop, eventsChan)
|
||||
if throttledChan != nil {
|
||||
eventsChan = throttledChan
|
||||
}
|
||||
|
||||
for {
|
||||
select {
|
||||
case <-stop:
|
||||
return nil
|
||||
case event := <-eventsChan:
|
||||
// Note that event is the *first* event that came in during this
|
||||
// throttling interval -- if we're hitting our throttle, we may have
|
||||
// dropped events. This is fine, because we don't treat different
|
||||
// event types differently. But if we do in the future, we'll need to
|
||||
// track more information about the dropped events.
|
||||
log.Debugf("Received Kubernetes event kind %T", event)
|
||||
templateObjects, err := p.loadIngresses(k8sClient)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if reflect.DeepEqual(p.lastConfiguration.Get(), templateObjects) {
|
||||
log.Debugf("Skipping Kubernetes event kind %T", event)
|
||||
} else {
|
||||
@@ -156,6 +171,11 @@ func (p *Provider) Provide(configurationChan chan<- types.ConfigMessage, pool *s
|
||||
Configuration: p.loadConfig(*templateObjects),
|
||||
}
|
||||
}
|
||||
|
||||
// If we're throttling, we sleep here for the throttle duration to
|
||||
// enforce that we don't refresh faster than our throttle. time.Sleep
|
||||
// returns immediately if p.ThrottleDuration is 0 (no throttle).
|
||||
time.Sleep(throttleDuration)
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -599,6 +619,39 @@ func (p *Provider) addGlobalBackend(cl Client, i *extensionsv1beta1.Ingress, tem
|
||||
return nil
|
||||
}
|
||||
|
||||
func throttleEvents(throttleDuration time.Duration, stop chan bool, eventsChan <-chan interface{}) chan interface{} {
|
||||
if throttleDuration == 0 {
|
||||
return nil
|
||||
}
|
||||
// Create a buffered channel to hold the pending event (if we're delaying processing the event due to throttling)
|
||||
eventsChanBuffered := make(chan interface{}, 1)
|
||||
|
||||
// Run a goroutine that reads events from eventChan and does a
|
||||
// non-blocking write to pendingEvent. This guarantees that writing to
|
||||
// eventChan will never block, and that pendingEvent will have
|
||||
// something in it if there's been an event since we read from that channel.
|
||||
go func() {
|
||||
for {
|
||||
select {
|
||||
case <-stop:
|
||||
return
|
||||
case nextEvent := <-eventsChan:
|
||||
select {
|
||||
case eventsChanBuffered <- nextEvent:
|
||||
default:
|
||||
// We already have an event in eventsChanBuffered, so we'll
|
||||
// do a refresh as soon as our throttle allows us to. It's fine
|
||||
// to drop the event and keep whatever's in the buffer -- we
|
||||
// don't do different things for different events
|
||||
log.Debugf("Dropping event kind %T due to throttling", nextEvent)
|
||||
}
|
||||
}
|
||||
}
|
||||
}()
|
||||
|
||||
return eventsChanBuffered
|
||||
}
|
||||
|
||||
func getRuleForPath(pa extensionsv1beta1.HTTPIngressPath, i *extensionsv1beta1.Ingress) (string, error) {
|
||||
if len(pa.Path) == 0 {
|
||||
return "", nil
|
||||
@@ -621,6 +674,12 @@ func getRuleForPath(pa extensionsv1beta1.HTTPIngressPath, i *extensionsv1beta1.I
|
||||
return "", fmt.Errorf("rewrite-target must not be used together with annotation %q", annotationKubernetesRuleType)
|
||||
}
|
||||
rewriteTargetRule := fmt.Sprintf("ReplacePathRegex: ^%s(.*) %s$1", pa.Path, strings.TrimRight(rewriteTarget, "/"))
|
||||
if pa.Path == "/" {
|
||||
// If path = /, then just append the cap group, as if we don't cap the path as part of the regex,
|
||||
// then when we strip the right / from the rewrite target, it ends up being missed, as removed but never returned
|
||||
// this only happens when path = / because it is the only case where TrimRight will catch a leading /.
|
||||
rewriteTargetRule = fmt.Sprintf("ReplacePathRegex: ^(.*) %s$1", strings.TrimRight(rewriteTarget, "/"))
|
||||
}
|
||||
rules = append(rules, rewriteTargetRule)
|
||||
}
|
||||
|
||||
@@ -940,12 +999,12 @@ func getForwardAuthConfig(i *extensionsv1beta1.Ingress, k8sClient Client) (*type
|
||||
}
|
||||
|
||||
authSecretName := getStringValue(i.Annotations, annotationKubernetesAuthForwardTLSSecret, "")
|
||||
if len(authSecretName) > 0 {
|
||||
authSecretCert, authSecretKey, err := loadAuthTLSSecret(i.Namespace, authSecretName, k8sClient)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to load auth secret: %s", err)
|
||||
}
|
||||
authSecretCert, authSecretKey, err := loadAuthTLSSecret(i.Namespace, authSecretName, k8sClient)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to load auth secret: %s", err)
|
||||
}
|
||||
|
||||
if authSecretCert != "" || authSecretKey != "" {
|
||||
forwardAuth.TLS = &types.ClientTLS{
|
||||
Cert: authSecretCert,
|
||||
Key: authSecretKey,
|
||||
@@ -953,10 +1012,20 @@ func getForwardAuthConfig(i *extensionsv1beta1.Ingress, k8sClient Client) (*type
|
||||
}
|
||||
}
|
||||
|
||||
if forwardAuth.TLS == nil && label.Has(i.Annotations, getAnnotationName(i.Annotations, annotationKubernetesAuthForwardTLSInsecure)) {
|
||||
forwardAuth.TLS = &types.ClientTLS{
|
||||
InsecureSkipVerify: getBoolValue(i.Annotations, annotationKubernetesAuthForwardTLSInsecure, false),
|
||||
}
|
||||
}
|
||||
|
||||
return forwardAuth, nil
|
||||
}
|
||||
|
||||
func loadAuthTLSSecret(namespace, secretName string, k8sClient Client) (string, string, error) {
|
||||
if len(secretName) == 0 {
|
||||
return "", "", nil
|
||||
}
|
||||
|
||||
secret, exists, err := k8sClient.GetSecret(namespace, secretName)
|
||||
if err != nil {
|
||||
return "", "", fmt.Errorf("failed to fetch secret %q/%q: %s", namespace, secretName, err)
|
||||
@@ -1082,14 +1151,16 @@ func getLoadBalancer(service *corev1.Service) *types.LoadBalancer {
|
||||
}
|
||||
|
||||
func getStickiness(service *corev1.Service) *types.Stickiness {
|
||||
if getBoolValue(service.Annotations, annotationKubernetesAffinity, false) {
|
||||
stickiness := &types.Stickiness{}
|
||||
if cookieName := getStringValue(service.Annotations, annotationKubernetesSessionCookieName, ""); len(cookieName) > 0 {
|
||||
stickiness.CookieName = cookieName
|
||||
}
|
||||
return stickiness
|
||||
if !getBoolValue(service.Annotations, annotationKubernetesAffinity, false) {
|
||||
return nil
|
||||
}
|
||||
|
||||
return &types.Stickiness{
|
||||
CookieName: getStringValue(service.Annotations, annotationKubernetesSessionCookieName, ""),
|
||||
Secure: getBoolValue(service.Annotations, annotationKubernetesSessionSecure, false),
|
||||
HTTPOnly: getBoolValue(service.Annotations, annotationKubernetesSessionHTTPOnly, false),
|
||||
SameSite: getStringValue(service.Annotations, annotationKubernetesSessionSameSite, ""),
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func getHeader(i *extensionsv1beta1.Ingress) *types.Headers {
|
||||
|
||||
@@ -415,6 +415,12 @@ func TestProvider_loadIngresses(t *testing.T) {
|
||||
server("http://example.com", weight(1))),
|
||||
lbMethod("wrr"),
|
||||
),
|
||||
backend("rewritetargetrootpath/",
|
||||
servers(
|
||||
server("http://example.com", weight(1)),
|
||||
server("http://example.com", weight(1))),
|
||||
lbMethod("wrr"),
|
||||
),
|
||||
backend("error-pages/errorpages",
|
||||
servers(
|
||||
server("http://example.com", weight(1)),
|
||||
@@ -523,6 +529,12 @@ func TestProvider_loadIngresses(t *testing.T) {
|
||||
route("/whitelist-source-range", "PathPrefix:/whitelist-source-range"),
|
||||
route("test", "Host:test")),
|
||||
),
|
||||
frontend("rewritetargetrootpath/",
|
||||
passHostHeader(),
|
||||
routes(
|
||||
route("/", "PathPrefix:/;ReplacePathRegex: ^(.*) /app$1"),
|
||||
route("rewritetargetrootpath", "Host:rewritetargetrootpath")),
|
||||
),
|
||||
frontend("rewrite/api",
|
||||
passHostHeader(),
|
||||
routes(
|
||||
@@ -596,7 +608,7 @@ func TestProvider_loadIngresses(t *testing.T) {
|
||||
passHostHeader(),
|
||||
redirectRegex("root2/$", "root2/root2"),
|
||||
routes(
|
||||
route("/", "PathPrefix:/;ReplacePathRegex: ^/(.*) /abc$1"),
|
||||
route("/", "PathPrefix:/;ReplacePathRegex: ^(.*) /abc$1"),
|
||||
route("root2", "Host:root2"),
|
||||
),
|
||||
),
|
||||
|
||||
@@ -3,6 +3,8 @@ package kubernetes
|
||||
import (
|
||||
"strconv"
|
||||
"strings"
|
||||
|
||||
"github.com/shopspring/decimal"
|
||||
)
|
||||
|
||||
const defaultPercentageValuePrecision = 3
|
||||
@@ -43,5 +45,6 @@ func newPercentageValueFromString(rawValue string) (percentageValue, error) {
|
||||
|
||||
// newPercentageValueFromFloat64 reads percentage value from float64
|
||||
func newPercentageValueFromFloat64(f float64) percentageValue {
|
||||
return percentageValue(f * (1000 * 100))
|
||||
value := decimal.NewFromFloat(f).Mul(decimal.NewFromFloat(1000 * 100))
|
||||
return percentageValue(value.IntPart())
|
||||
}
|
||||
|
||||
@@ -74,46 +74,54 @@ func TestNewPercentageValueFromString(t *testing.T) {
|
||||
}{
|
||||
{
|
||||
value: "1%",
|
||||
expectError: false,
|
||||
expectedString: "1.000%",
|
||||
expectedFloat64: 0.01,
|
||||
},
|
||||
{
|
||||
value: "0.5",
|
||||
expectError: false,
|
||||
expectedString: "0.500%",
|
||||
expectedFloat64: 0.005,
|
||||
},
|
||||
{
|
||||
value: "99%",
|
||||
expectError: false,
|
||||
expectedString: "99.000%",
|
||||
expectedFloat64: 0.99,
|
||||
},
|
||||
{
|
||||
value: "99.9%",
|
||||
expectError: false,
|
||||
expectedString: "99.900%",
|
||||
expectedFloat64: 0.999,
|
||||
},
|
||||
{
|
||||
value: "-99.9%",
|
||||
expectError: false,
|
||||
expectedString: "-99.900%",
|
||||
expectedFloat64: -0.999,
|
||||
},
|
||||
{
|
||||
value: "-99.99999%",
|
||||
expectError: false,
|
||||
expectedString: "-99.999%",
|
||||
expectedFloat64: -0.99999,
|
||||
},
|
||||
{
|
||||
value: "0%",
|
||||
expectError: false,
|
||||
expectedString: "0.000%",
|
||||
expectedFloat64: 0,
|
||||
},
|
||||
{
|
||||
value: "2.3%",
|
||||
expectedString: "2.300%",
|
||||
expectedFloat64: 0.023,
|
||||
},
|
||||
{
|
||||
value: "5.1%",
|
||||
expectedString: "5.100%",
|
||||
expectedFloat64: 0.051,
|
||||
},
|
||||
{
|
||||
value: "83.85%",
|
||||
expectedString: "83.850%",
|
||||
expectedFloat64: 0.83850,
|
||||
},
|
||||
{
|
||||
value: "%",
|
||||
expectError: true,
|
||||
|
||||
@@ -14,6 +14,9 @@ const (
|
||||
pathBackendLoadBalancerSticky = "/loadbalancer/sticky"
|
||||
pathBackendLoadBalancerStickiness = "/loadbalancer/stickiness"
|
||||
pathBackendLoadBalancerStickinessCookieName = "/loadbalancer/stickiness/cookiename"
|
||||
pathBackendLoadBalancerStickinessSecure = "/loadbalancer/stickiness/secure"
|
||||
pathBackendLoadBalancerStickinessHTTPOnly = "/loadbalancer/stickiness/httponly"
|
||||
pathBackendLoadBalancerStickinessSameSite = "/loadbalancer/stickiness/samesite"
|
||||
pathBackendMaxConnAmount = "/maxconn/amount"
|
||||
pathBackendMaxConnExtractorFunc = "/maxconn/extractorfunc"
|
||||
pathBackendServers = "/servers/"
|
||||
|
||||
@@ -276,6 +276,9 @@ func (p *Provider) getLoadBalancer(rootPath string) *types.LoadBalancer {
|
||||
if p.getBool(false, rootPath, pathBackendLoadBalancerStickiness) {
|
||||
lb.Stickiness = &types.Stickiness{
|
||||
CookieName: p.get("", rootPath, pathBackendLoadBalancerStickinessCookieName),
|
||||
Secure: p.getBool(false, rootPath, pathBackendLoadBalancerStickinessSecure),
|
||||
HTTPOnly: p.getBool(false, rootPath, pathBackendLoadBalancerStickinessHTTPOnly),
|
||||
SameSite: p.get("", rootPath, pathBackendLoadBalancerStickinessSameSite),
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@@ -260,6 +260,9 @@ func TestProviderBuildConfiguration(t *testing.T) {
|
||||
withPair(pathBackendLoadBalancerSticky, "true"),
|
||||
withPair(pathBackendLoadBalancerStickiness, "true"),
|
||||
withPair(pathBackendLoadBalancerStickinessCookieName, "tomate"),
|
||||
withPair(pathBackendLoadBalancerStickinessSecure, "true"),
|
||||
withPair(pathBackendLoadBalancerStickinessHTTPOnly, "true"),
|
||||
withPair(pathBackendLoadBalancerStickinessSameSite, "none"),
|
||||
withPair(pathBackendHealthCheckScheme, "http"),
|
||||
withPair(pathBackendHealthCheckPath, "/health"),
|
||||
withPair(pathBackendHealthCheckPort, "80"),
|
||||
@@ -389,6 +392,9 @@ func TestProviderBuildConfiguration(t *testing.T) {
|
||||
Sticky: true,
|
||||
Stickiness: &types.Stickiness{
|
||||
CookieName: "tomate",
|
||||
Secure: true,
|
||||
HTTPOnly: true,
|
||||
SameSite: "none",
|
||||
},
|
||||
},
|
||||
MaxConn: &types.MaxConn{
|
||||
@@ -1949,12 +1955,18 @@ func TestProviderGetLoadBalancer(t *testing.T) {
|
||||
withPair(pathBackendLoadBalancerMethod, "drr"),
|
||||
withPair(pathBackendLoadBalancerSticky, "true"),
|
||||
withPair(pathBackendLoadBalancerStickiness, "true"),
|
||||
withPair(pathBackendLoadBalancerStickinessCookieName, "aubergine"))),
|
||||
withPair(pathBackendLoadBalancerStickinessCookieName, "aubergine"),
|
||||
withPair(pathBackendLoadBalancerStickinessSecure, "true"),
|
||||
withPair(pathBackendLoadBalancerStickinessHTTPOnly, "true"),
|
||||
withPair(pathBackendLoadBalancerStickinessSameSite, "none"))),
|
||||
expected: &types.LoadBalancer{
|
||||
Method: "drr",
|
||||
Sticky: true,
|
||||
Stickiness: &types.Stickiness{
|
||||
CookieName: "aubergine",
|
||||
Secure: true,
|
||||
HTTPOnly: true,
|
||||
SameSite: "none",
|
||||
},
|
||||
},
|
||||
},
|
||||
|
||||
@@ -26,6 +26,9 @@ const (
|
||||
DefaultBackendLoadBalancerMethod = "wrr"
|
||||
DefaultBackendMaxconnExtractorFunc = "request.host"
|
||||
DefaultBackendLoadbalancerStickinessCookieName = ""
|
||||
DefaultBackendLoadbalancerStickinessSecure = false
|
||||
DefaultBackendLoadbalancerStickinessHTTPOnly = false
|
||||
DefaultBackendLoadbalancerStickinessSameSite = ""
|
||||
DefaultBackendHealthCheckPort = 0
|
||||
)
|
||||
|
||||
@@ -45,6 +48,21 @@ func GetStringValue(labels map[string]string, labelName string, defaultValue str
|
||||
return defaultValue
|
||||
}
|
||||
|
||||
// GetStringSafeValue get string value associated to a label and check if the content is "quotable".
|
||||
func GetStringSafeValue(labels map[string]string, labelName string, defaultValue string) (string, error) {
|
||||
value, ok := labels[labelName]
|
||||
if !ok || len(value) <= 0 {
|
||||
return defaultValue, nil
|
||||
}
|
||||
|
||||
_, err := strconv.Unquote(`"` + value + `"`)
|
||||
if err != nil {
|
||||
return value, err
|
||||
}
|
||||
|
||||
return value, nil
|
||||
}
|
||||
|
||||
// GetBoolValue get bool value associated to a label
|
||||
func GetBoolValue(labels map[string]string, labelName string, defaultValue bool) bool {
|
||||
rawValue, ok := labels[labelName]
|
||||
|
||||
@@ -5,6 +5,7 @@ import (
|
||||
"testing"
|
||||
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/require"
|
||||
)
|
||||
|
||||
func TestSplitAndTrimString(t *testing.T) {
|
||||
@@ -106,6 +107,78 @@ func TestGetStringValue(t *testing.T) {
|
||||
}
|
||||
}
|
||||
|
||||
func TestGetStringSafeValue(t *testing.T) {
|
||||
testCases := []struct {
|
||||
desc string
|
||||
labels map[string]string
|
||||
labelName string
|
||||
defaultValue string
|
||||
expected string
|
||||
expectedError bool
|
||||
}{
|
||||
{
|
||||
desc: "empty labels map",
|
||||
labelName: "foo",
|
||||
defaultValue: "default",
|
||||
expected: "default",
|
||||
},
|
||||
{
|
||||
desc: "unescaped value",
|
||||
labels: map[string]string{
|
||||
"foo": `^https?://(test\.beta\.redacted\.org|redacted\.com)/(.*)`,
|
||||
},
|
||||
labelName: "foo",
|
||||
defaultValue: "default",
|
||||
expected: `^https?://(test\.beta\.redacted\.org|redacted\.com)/(.*)`,
|
||||
expectedError: true,
|
||||
},
|
||||
{
|
||||
desc: "escaped value",
|
||||
labels: map[string]string{
|
||||
"foo": `^https?://(test\\.beta\\.redacted\\.org|redacted\\.com)/(.*)`,
|
||||
},
|
||||
labelName: "foo",
|
||||
defaultValue: "default",
|
||||
expected: `^https?://(test\\.beta\\.redacted\\.org|redacted\\.com)/(.*)`,
|
||||
},
|
||||
{
|
||||
desc: "empty value",
|
||||
labels: map[string]string{
|
||||
"foo": "",
|
||||
},
|
||||
labelName: "foo",
|
||||
defaultValue: "default",
|
||||
expected: "default",
|
||||
},
|
||||
{
|
||||
desc: "non existing label",
|
||||
labels: map[string]string{
|
||||
"foo": "bar",
|
||||
},
|
||||
labelName: "fii",
|
||||
defaultValue: "default",
|
||||
expected: "default",
|
||||
},
|
||||
}
|
||||
|
||||
for _, test := range testCases {
|
||||
test := test
|
||||
t.Run(test.desc, func(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
got, err := GetStringSafeValue(test.labels, test.labelName, test.defaultValue)
|
||||
|
||||
if test.expectedError {
|
||||
assert.Error(t, err)
|
||||
} else {
|
||||
require.NoError(t, err)
|
||||
}
|
||||
|
||||
assert.Equal(t, test.expected, got)
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestGetBoolValue(t *testing.T) {
|
||||
testCases := []struct {
|
||||
desc string
|
||||
|
||||
@@ -2,142 +2,150 @@ package label
|
||||
|
||||
// Traefik labels
|
||||
const (
|
||||
Prefix = "traefik."
|
||||
SuffixBackend = "backend"
|
||||
SuffixDomain = "domain"
|
||||
SuffixEnable = "enable"
|
||||
SuffixPort = "port"
|
||||
SuffixPortName = "portName"
|
||||
SuffixPortIndex = "portIndex"
|
||||
SuffixProtocol = "protocol"
|
||||
SuffixTags = "tags"
|
||||
SuffixWeight = "weight"
|
||||
SuffixBackendID = "backend.id"
|
||||
SuffixBackendCircuitBreaker = "backend.circuitbreaker"
|
||||
SuffixBackendCircuitBreakerExpression = "backend.circuitbreaker.expression"
|
||||
SuffixBackendHealthCheckScheme = "backend.healthcheck.scheme"
|
||||
SuffixBackendHealthCheckPath = "backend.healthcheck.path"
|
||||
SuffixBackendHealthCheckPort = "backend.healthcheck.port"
|
||||
SuffixBackendHealthCheckInterval = "backend.healthcheck.interval"
|
||||
SuffixBackendHealthCheckHostname = "backend.healthcheck.hostname"
|
||||
SuffixBackendHealthCheckHeaders = "backend.healthcheck.headers"
|
||||
SuffixBackendLoadBalancer = "backend.loadbalancer"
|
||||
SuffixBackendLoadBalancerMethod = SuffixBackendLoadBalancer + ".method"
|
||||
SuffixBackendLoadBalancerSticky = SuffixBackendLoadBalancer + ".sticky"
|
||||
SuffixBackendLoadBalancerStickiness = SuffixBackendLoadBalancer + ".stickiness"
|
||||
SuffixBackendLoadBalancerStickinessCookieName = SuffixBackendLoadBalancer + ".stickiness.cookieName"
|
||||
SuffixBackendMaxConnAmount = "backend.maxconn.amount"
|
||||
SuffixBackendMaxConnExtractorFunc = "backend.maxconn.extractorfunc"
|
||||
SuffixBackendBuffering = "backend.buffering"
|
||||
SuffixBackendResponseForwardingFlushInterval = "backend.responseForwarding.flushInterval"
|
||||
SuffixBackendBufferingMaxRequestBodyBytes = SuffixBackendBuffering + ".maxRequestBodyBytes"
|
||||
SuffixBackendBufferingMemRequestBodyBytes = SuffixBackendBuffering + ".memRequestBodyBytes"
|
||||
SuffixBackendBufferingMaxResponseBodyBytes = SuffixBackendBuffering + ".maxResponseBodyBytes"
|
||||
SuffixBackendBufferingMemResponseBodyBytes = SuffixBackendBuffering + ".memResponseBodyBytes"
|
||||
SuffixBackendBufferingRetryExpression = SuffixBackendBuffering + ".retryExpression"
|
||||
SuffixFrontend = "frontend"
|
||||
SuffixFrontendAuth = SuffixFrontend + ".auth"
|
||||
SuffixFrontendAuthBasic = SuffixFrontendAuth + ".basic"
|
||||
SuffixFrontendAuthBasicRemoveHeader = SuffixFrontendAuthBasic + ".removeHeader"
|
||||
SuffixFrontendAuthBasicUsers = SuffixFrontendAuthBasic + ".users"
|
||||
SuffixFrontendAuthBasicUsersFile = SuffixFrontendAuthBasic + ".usersFile"
|
||||
SuffixFrontendAuthDigest = SuffixFrontendAuth + ".digest"
|
||||
SuffixFrontendAuthDigestRemoveHeader = SuffixFrontendAuthDigest + ".removeHeader"
|
||||
SuffixFrontendAuthDigestUsers = SuffixFrontendAuthDigest + ".users"
|
||||
SuffixFrontendAuthDigestUsersFile = SuffixFrontendAuthDigest + ".usersFile"
|
||||
SuffixFrontendAuthForward = SuffixFrontendAuth + ".forward"
|
||||
SuffixFrontendAuthForwardAddress = SuffixFrontendAuthForward + ".address"
|
||||
SuffixFrontendAuthForwardAuthResponseHeaders = SuffixFrontendAuthForward + ".authResponseHeaders"
|
||||
SuffixFrontendAuthForwardTLS = SuffixFrontendAuthForward + ".tls"
|
||||
SuffixFrontendAuthForwardTLSCa = SuffixFrontendAuthForwardTLS + ".ca"
|
||||
SuffixFrontendAuthForwardTLSCaOptional = SuffixFrontendAuthForwardTLS + ".caOptional"
|
||||
SuffixFrontendAuthForwardTLSCert = SuffixFrontendAuthForwardTLS + ".cert"
|
||||
SuffixFrontendAuthForwardTLSInsecureSkipVerify = SuffixFrontendAuthForwardTLS + ".insecureSkipVerify"
|
||||
SuffixFrontendAuthForwardTLSKey = SuffixFrontendAuthForwardTLS + ".key"
|
||||
SuffixFrontendAuthForwardTrustForwardHeader = SuffixFrontendAuthForward + ".trustForwardHeader"
|
||||
SuffixFrontendAuthHeaderField = SuffixFrontendAuth + ".headerField"
|
||||
SuffixFrontendEntryPoints = "frontend.entryPoints"
|
||||
SuffixFrontendHeaders = "frontend.headers."
|
||||
SuffixFrontendRequestHeaders = SuffixFrontendHeaders + "customRequestHeaders"
|
||||
SuffixFrontendResponseHeaders = SuffixFrontendHeaders + "customResponseHeaders"
|
||||
SuffixFrontendHeadersAllowedHosts = SuffixFrontendHeaders + "allowedHosts"
|
||||
SuffixFrontendHeadersHostsProxyHeaders = SuffixFrontendHeaders + "hostsProxyHeaders"
|
||||
SuffixFrontendHeadersSSLForceHost = SuffixFrontendHeaders + "SSLForceHost"
|
||||
SuffixFrontendHeadersSSLRedirect = SuffixFrontendHeaders + "SSLRedirect"
|
||||
SuffixFrontendHeadersSSLTemporaryRedirect = SuffixFrontendHeaders + "SSLTemporaryRedirect"
|
||||
SuffixFrontendHeadersSSLHost = SuffixFrontendHeaders + "SSLHost"
|
||||
SuffixFrontendHeadersSSLProxyHeaders = SuffixFrontendHeaders + "SSLProxyHeaders"
|
||||
SuffixFrontendHeadersSTSSeconds = SuffixFrontendHeaders + "STSSeconds"
|
||||
SuffixFrontendHeadersSTSIncludeSubdomains = SuffixFrontendHeaders + "STSIncludeSubdomains"
|
||||
SuffixFrontendHeadersSTSPreload = SuffixFrontendHeaders + "STSPreload"
|
||||
SuffixFrontendHeadersForceSTSHeader = SuffixFrontendHeaders + "forceSTSHeader"
|
||||
SuffixFrontendHeadersFrameDeny = SuffixFrontendHeaders + "frameDeny"
|
||||
SuffixFrontendHeadersCustomFrameOptionsValue = SuffixFrontendHeaders + "customFrameOptionsValue"
|
||||
SuffixFrontendHeadersContentTypeNosniff = SuffixFrontendHeaders + "contentTypeNosniff"
|
||||
SuffixFrontendHeadersBrowserXSSFilter = SuffixFrontendHeaders + "browserXSSFilter"
|
||||
SuffixFrontendHeadersCustomBrowserXSSValue = SuffixFrontendHeaders + "customBrowserXSSValue"
|
||||
SuffixFrontendHeadersContentSecurityPolicy = SuffixFrontendHeaders + "contentSecurityPolicy"
|
||||
SuffixFrontendHeadersPublicKey = SuffixFrontendHeaders + "publicKey"
|
||||
SuffixFrontendHeadersReferrerPolicy = SuffixFrontendHeaders + "referrerPolicy"
|
||||
SuffixFrontendHeadersIsDevelopment = SuffixFrontendHeaders + "isDevelopment"
|
||||
SuffixFrontendPassHostHeader = "frontend.passHostHeader"
|
||||
SuffixFrontendPassTLSClientCert = "frontend.passTLSClientCert"
|
||||
SuffixFrontendPassTLSClientCertPem = SuffixFrontendPassTLSClientCert + ".pem"
|
||||
SuffixFrontendPassTLSClientCertInfos = SuffixFrontendPassTLSClientCert + ".infos"
|
||||
SuffixFrontendPassTLSClientCertInfosIssuer = SuffixFrontendPassTLSClientCertInfos + ".issuer"
|
||||
SuffixFrontendPassTLSClientCertInfosIssuerCommonName = SuffixFrontendPassTLSClientCertInfosIssuer + ".commonName"
|
||||
SuffixFrontendPassTLSClientCertInfosIssuerCountry = SuffixFrontendPassTLSClientCertInfosIssuer + ".country"
|
||||
SuffixFrontendPassTLSClientCertInfosIssuerDomainComponent = SuffixFrontendPassTLSClientCertInfosIssuer + ".domainComponent"
|
||||
SuffixFrontendPassTLSClientCertInfosIssuerLocality = SuffixFrontendPassTLSClientCertInfosIssuer + ".locality"
|
||||
SuffixFrontendPassTLSClientCertInfosIssuerOrganization = SuffixFrontendPassTLSClientCertInfosIssuer + ".organization"
|
||||
SuffixFrontendPassTLSClientCertInfosIssuerProvince = SuffixFrontendPassTLSClientCertInfosIssuer + ".province"
|
||||
SuffixFrontendPassTLSClientCertInfosIssuerSerialNumber = SuffixFrontendPassTLSClientCertInfosIssuer + ".serialNumber"
|
||||
SuffixFrontendPassTLSClientCertInfosSubject = SuffixFrontendPassTLSClientCertInfos + ".subject"
|
||||
SuffixFrontendPassTLSClientCertInfosNotAfter = SuffixFrontendPassTLSClientCertInfos + ".notAfter"
|
||||
SuffixFrontendPassTLSClientCertInfosNotBefore = SuffixFrontendPassTLSClientCertInfos + ".notBefore"
|
||||
SuffixFrontendPassTLSClientCertInfosSans = SuffixFrontendPassTLSClientCertInfos + ".sans"
|
||||
SuffixFrontendPassTLSClientCertInfosSubjectCommonName = SuffixFrontendPassTLSClientCertInfosSubject + ".commonName"
|
||||
SuffixFrontendPassTLSClientCertInfosSubjectCountry = SuffixFrontendPassTLSClientCertInfosSubject + ".country"
|
||||
SuffixFrontendPassTLSClientCertInfosSubjectDomainComponent = SuffixFrontendPassTLSClientCertInfosSubject + ".domainComponent"
|
||||
SuffixFrontendPassTLSClientCertInfosSubjectLocality = SuffixFrontendPassTLSClientCertInfosSubject + ".locality"
|
||||
SuffixFrontendPassTLSClientCertInfosSubjectOrganization = SuffixFrontendPassTLSClientCertInfosSubject + ".organization"
|
||||
SuffixFrontendPassTLSClientCertInfosSubjectProvince = SuffixFrontendPassTLSClientCertInfosSubject + ".province"
|
||||
SuffixFrontendPassTLSClientCertInfosSubjectSerialNumber = SuffixFrontendPassTLSClientCertInfosSubject + ".serialNumber"
|
||||
SuffixFrontendPassTLSCert = "frontend.passTLSCert" // Deprecated
|
||||
SuffixFrontendPriority = "frontend.priority"
|
||||
SuffixFrontendRateLimitExtractorFunc = "frontend.rateLimit.extractorFunc"
|
||||
SuffixFrontendRedirectEntryPoint = "frontend.redirect.entryPoint"
|
||||
SuffixFrontendRedirectRegex = "frontend.redirect.regex"
|
||||
SuffixFrontendRedirectReplacement = "frontend.redirect.replacement"
|
||||
SuffixFrontendRedirectPermanent = "frontend.redirect.permanent"
|
||||
SuffixFrontendRule = "frontend.rule"
|
||||
SuffixFrontendWhitelistSourceRange = "frontend.whitelistSourceRange" // Deprecated
|
||||
SuffixFrontendWhiteList = "frontend.whiteList."
|
||||
SuffixFrontendWhiteListSourceRange = SuffixFrontendWhiteList + "sourceRange"
|
||||
SuffixFrontendWhiteListUseXForwardedFor = SuffixFrontendWhiteList + "useXForwardedFor"
|
||||
TraefikDomain = Prefix + SuffixDomain
|
||||
TraefikEnable = Prefix + SuffixEnable
|
||||
TraefikPort = Prefix + SuffixPort
|
||||
TraefikPortName = Prefix + SuffixPortName
|
||||
TraefikPortIndex = Prefix + SuffixPortIndex
|
||||
TraefikProtocol = Prefix + SuffixProtocol
|
||||
TraefikTags = Prefix + SuffixTags
|
||||
TraefikWeight = Prefix + SuffixWeight
|
||||
TraefikBackend = Prefix + SuffixBackend
|
||||
TraefikBackendID = Prefix + SuffixBackendID
|
||||
TraefikBackendCircuitBreaker = Prefix + SuffixBackendCircuitBreaker
|
||||
TraefikBackendCircuitBreakerExpression = Prefix + SuffixBackendCircuitBreakerExpression
|
||||
TraefikBackendHealthCheckScheme = Prefix + SuffixBackendHealthCheckScheme
|
||||
TraefikBackendHealthCheckPath = Prefix + SuffixBackendHealthCheckPath
|
||||
TraefikBackendHealthCheckPort = Prefix + SuffixBackendHealthCheckPort
|
||||
TraefikBackendHealthCheckInterval = Prefix + SuffixBackendHealthCheckInterval
|
||||
TraefikBackendHealthCheckHostname = Prefix + SuffixBackendHealthCheckHostname
|
||||
TraefikBackendHealthCheckHeaders = Prefix + SuffixBackendHealthCheckHeaders
|
||||
TraefikBackendLoadBalancer = Prefix + SuffixBackendLoadBalancer
|
||||
TraefikBackendLoadBalancerMethod = Prefix + SuffixBackendLoadBalancerMethod
|
||||
TraefikBackendLoadBalancerSticky = Prefix + SuffixBackendLoadBalancerSticky
|
||||
TraefikBackendLoadBalancerStickiness = Prefix + SuffixBackendLoadBalancerStickiness
|
||||
TraefikBackendLoadBalancerStickinessCookieName = Prefix + SuffixBackendLoadBalancerStickinessCookieName
|
||||
Prefix = "traefik."
|
||||
SuffixBackend = "backend"
|
||||
SuffixDomain = "domain"
|
||||
SuffixEnable = "enable"
|
||||
SuffixPort = "port"
|
||||
SuffixPortName = "portName"
|
||||
SuffixPortIndex = "portIndex"
|
||||
SuffixProtocol = "protocol"
|
||||
SuffixTags = "tags"
|
||||
SuffixWeight = "weight"
|
||||
SuffixBackendID = "backend.id"
|
||||
SuffixBackendCircuitBreaker = "backend.circuitbreaker"
|
||||
SuffixBackendCircuitBreakerExpression = "backend.circuitbreaker.expression"
|
||||
SuffixBackendHealthCheckScheme = "backend.healthcheck.scheme"
|
||||
SuffixBackendHealthCheckPath = "backend.healthcheck.path"
|
||||
SuffixBackendHealthCheckPort = "backend.healthcheck.port"
|
||||
SuffixBackendHealthCheckInterval = "backend.healthcheck.interval"
|
||||
SuffixBackendHealthCheckHostname = "backend.healthcheck.hostname"
|
||||
SuffixBackendHealthCheckHeaders = "backend.healthcheck.headers"
|
||||
SuffixBackendLoadBalancer = "backend.loadbalancer"
|
||||
SuffixBackendLoadBalancerMethod = SuffixBackendLoadBalancer + ".method"
|
||||
SuffixBackendLoadBalancerSticky = SuffixBackendLoadBalancer + ".sticky"
|
||||
SuffixBackendLoadBalancerStickiness = SuffixBackendLoadBalancer + ".stickiness"
|
||||
SuffixBackendLoadBalancerStickinessCookieName = SuffixBackendLoadBalancer + ".stickiness.cookieName"
|
||||
SuffixBackendLoadBalancerStickinessSecure = SuffixBackendLoadBalancer + ".stickiness.secure"
|
||||
SuffixBackendLoadBalancerStickinessCHTTPOnly = SuffixBackendLoadBalancer + ".stickiness.httpOnly"
|
||||
SuffixBackendLoadBalancerStickinessSameSite = SuffixBackendLoadBalancer + ".stickiness.sameSite"
|
||||
SuffixBackendMaxConnAmount = "backend.maxconn.amount"
|
||||
SuffixBackendMaxConnExtractorFunc = "backend.maxconn.extractorfunc"
|
||||
SuffixBackendBuffering = "backend.buffering"
|
||||
SuffixBackendResponseForwardingFlushInterval = "backend.responseForwarding.flushInterval"
|
||||
SuffixBackendBufferingMaxRequestBodyBytes = SuffixBackendBuffering + ".maxRequestBodyBytes"
|
||||
SuffixBackendBufferingMemRequestBodyBytes = SuffixBackendBuffering + ".memRequestBodyBytes"
|
||||
SuffixBackendBufferingMaxResponseBodyBytes = SuffixBackendBuffering + ".maxResponseBodyBytes"
|
||||
SuffixBackendBufferingMemResponseBodyBytes = SuffixBackendBuffering + ".memResponseBodyBytes"
|
||||
SuffixBackendBufferingRetryExpression = SuffixBackendBuffering + ".retryExpression"
|
||||
SuffixFrontend = "frontend"
|
||||
SuffixFrontendAuth = SuffixFrontend + ".auth"
|
||||
SuffixFrontendAuthBasic = SuffixFrontendAuth + ".basic"
|
||||
SuffixFrontendAuthBasicRemoveHeader = SuffixFrontendAuthBasic + ".removeHeader"
|
||||
SuffixFrontendAuthBasicUsers = SuffixFrontendAuthBasic + ".users"
|
||||
SuffixFrontendAuthBasicUsersFile = SuffixFrontendAuthBasic + ".usersFile"
|
||||
SuffixFrontendAuthDigest = SuffixFrontendAuth + ".digest"
|
||||
SuffixFrontendAuthDigestRemoveHeader = SuffixFrontendAuthDigest + ".removeHeader"
|
||||
SuffixFrontendAuthDigestUsers = SuffixFrontendAuthDigest + ".users"
|
||||
SuffixFrontendAuthDigestUsersFile = SuffixFrontendAuthDigest + ".usersFile"
|
||||
SuffixFrontendAuthForward = SuffixFrontendAuth + ".forward"
|
||||
SuffixFrontendAuthForwardAddress = SuffixFrontendAuthForward + ".address"
|
||||
SuffixFrontendAuthForwardAuthResponseHeaders = SuffixFrontendAuthForward + ".authResponseHeaders"
|
||||
SuffixFrontendAuthForwardTLS = SuffixFrontendAuthForward + ".tls"
|
||||
SuffixFrontendAuthForwardTLSCa = SuffixFrontendAuthForwardTLS + ".ca"
|
||||
SuffixFrontendAuthForwardTLSCaOptional = SuffixFrontendAuthForwardTLS + ".caOptional"
|
||||
SuffixFrontendAuthForwardTLSCert = SuffixFrontendAuthForwardTLS + ".cert"
|
||||
SuffixFrontendAuthForwardTLSInsecureSkipVerify = SuffixFrontendAuthForwardTLS + ".insecureSkipVerify"
|
||||
SuffixFrontendAuthForwardTLSKey = SuffixFrontendAuthForwardTLS + ".key"
|
||||
SuffixFrontendAuthForwardTrustForwardHeader = SuffixFrontendAuthForward + ".trustForwardHeader"
|
||||
SuffixFrontendAuthHeaderField = SuffixFrontendAuth + ".headerField"
|
||||
SuffixFrontendEntryPoints = "frontend.entryPoints"
|
||||
SuffixFrontendHeaders = "frontend.headers."
|
||||
SuffixFrontendRequestHeaders = SuffixFrontendHeaders + "customRequestHeaders"
|
||||
SuffixFrontendResponseHeaders = SuffixFrontendHeaders + "customResponseHeaders"
|
||||
SuffixFrontendHeadersAllowedHosts = SuffixFrontendHeaders + "allowedHosts"
|
||||
SuffixFrontendHeadersHostsProxyHeaders = SuffixFrontendHeaders + "hostsProxyHeaders"
|
||||
SuffixFrontendHeadersSSLForceHost = SuffixFrontendHeaders + "SSLForceHost"
|
||||
SuffixFrontendHeadersSSLRedirect = SuffixFrontendHeaders + "SSLRedirect"
|
||||
SuffixFrontendHeadersSSLTemporaryRedirect = SuffixFrontendHeaders + "SSLTemporaryRedirect"
|
||||
SuffixFrontendHeadersSSLHost = SuffixFrontendHeaders + "SSLHost"
|
||||
SuffixFrontendHeadersSSLProxyHeaders = SuffixFrontendHeaders + "SSLProxyHeaders"
|
||||
SuffixFrontendHeadersSTSSeconds = SuffixFrontendHeaders + "STSSeconds"
|
||||
SuffixFrontendHeadersSTSIncludeSubdomains = SuffixFrontendHeaders + "STSIncludeSubdomains"
|
||||
SuffixFrontendHeadersSTSPreload = SuffixFrontendHeaders + "STSPreload"
|
||||
SuffixFrontendHeadersForceSTSHeader = SuffixFrontendHeaders + "forceSTSHeader"
|
||||
SuffixFrontendHeadersFrameDeny = SuffixFrontendHeaders + "frameDeny"
|
||||
SuffixFrontendHeadersCustomFrameOptionsValue = SuffixFrontendHeaders + "customFrameOptionsValue"
|
||||
SuffixFrontendHeadersContentTypeNosniff = SuffixFrontendHeaders + "contentTypeNosniff"
|
||||
SuffixFrontendHeadersBrowserXSSFilter = SuffixFrontendHeaders + "browserXSSFilter"
|
||||
SuffixFrontendHeadersCustomBrowserXSSValue = SuffixFrontendHeaders + "customBrowserXSSValue"
|
||||
SuffixFrontendHeadersContentSecurityPolicy = SuffixFrontendHeaders + "contentSecurityPolicy"
|
||||
SuffixFrontendHeadersPublicKey = SuffixFrontendHeaders + "publicKey"
|
||||
SuffixFrontendHeadersReferrerPolicy = SuffixFrontendHeaders + "referrerPolicy"
|
||||
SuffixFrontendHeadersIsDevelopment = SuffixFrontendHeaders + "isDevelopment"
|
||||
SuffixFrontendPassHostHeader = "frontend.passHostHeader"
|
||||
SuffixFrontendPassTLSClientCert = "frontend.passTLSClientCert"
|
||||
SuffixFrontendPassTLSClientCertPem = SuffixFrontendPassTLSClientCert + ".pem"
|
||||
SuffixFrontendPassTLSClientCertInfos = SuffixFrontendPassTLSClientCert + ".infos"
|
||||
SuffixFrontendPassTLSClientCertInfosIssuer = SuffixFrontendPassTLSClientCertInfos + ".issuer"
|
||||
SuffixFrontendPassTLSClientCertInfosIssuerCommonName = SuffixFrontendPassTLSClientCertInfosIssuer + ".commonName"
|
||||
SuffixFrontendPassTLSClientCertInfosIssuerCountry = SuffixFrontendPassTLSClientCertInfosIssuer + ".country"
|
||||
SuffixFrontendPassTLSClientCertInfosIssuerDomainComponent = SuffixFrontendPassTLSClientCertInfosIssuer + ".domainComponent"
|
||||
SuffixFrontendPassTLSClientCertInfosIssuerLocality = SuffixFrontendPassTLSClientCertInfosIssuer + ".locality"
|
||||
SuffixFrontendPassTLSClientCertInfosIssuerOrganization = SuffixFrontendPassTLSClientCertInfosIssuer + ".organization"
|
||||
SuffixFrontendPassTLSClientCertInfosIssuerProvince = SuffixFrontendPassTLSClientCertInfosIssuer + ".province"
|
||||
SuffixFrontendPassTLSClientCertInfosIssuerSerialNumber = SuffixFrontendPassTLSClientCertInfosIssuer + ".serialNumber"
|
||||
SuffixFrontendPassTLSClientCertInfosSubject = SuffixFrontendPassTLSClientCertInfos + ".subject"
|
||||
SuffixFrontendPassTLSClientCertInfosNotAfter = SuffixFrontendPassTLSClientCertInfos + ".notAfter"
|
||||
SuffixFrontendPassTLSClientCertInfosNotBefore = SuffixFrontendPassTLSClientCertInfos + ".notBefore"
|
||||
SuffixFrontendPassTLSClientCertInfosSans = SuffixFrontendPassTLSClientCertInfos + ".sans"
|
||||
SuffixFrontendPassTLSClientCertInfosSubjectCommonName = SuffixFrontendPassTLSClientCertInfosSubject + ".commonName"
|
||||
SuffixFrontendPassTLSClientCertInfosSubjectCountry = SuffixFrontendPassTLSClientCertInfosSubject + ".country"
|
||||
SuffixFrontendPassTLSClientCertInfosSubjectDomainComponent = SuffixFrontendPassTLSClientCertInfosSubject + ".domainComponent"
|
||||
SuffixFrontendPassTLSClientCertInfosSubjectLocality = SuffixFrontendPassTLSClientCertInfosSubject + ".locality"
|
||||
SuffixFrontendPassTLSClientCertInfosSubjectOrganization = SuffixFrontendPassTLSClientCertInfosSubject + ".organization"
|
||||
SuffixFrontendPassTLSClientCertInfosSubjectProvince = SuffixFrontendPassTLSClientCertInfosSubject + ".province"
|
||||
SuffixFrontendPassTLSClientCertInfosSubjectSerialNumber = SuffixFrontendPassTLSClientCertInfosSubject + ".serialNumber"
|
||||
SuffixFrontendPassTLSCert = "frontend.passTLSCert" // Deprecated
|
||||
SuffixFrontendPriority = "frontend.priority"
|
||||
SuffixFrontendRateLimitExtractorFunc = "frontend.rateLimit.extractorFunc"
|
||||
SuffixFrontendRedirectEntryPoint = "frontend.redirect.entryPoint"
|
||||
SuffixFrontendRedirectRegex = "frontend.redirect.regex"
|
||||
SuffixFrontendRedirectReplacement = "frontend.redirect.replacement"
|
||||
SuffixFrontendRedirectPermanent = "frontend.redirect.permanent"
|
||||
SuffixFrontendRule = "frontend.rule"
|
||||
SuffixFrontendWhitelistSourceRange = "frontend.whitelistSourceRange" // Deprecated
|
||||
SuffixFrontendWhiteList = "frontend.whiteList."
|
||||
SuffixFrontendWhiteListSourceRange = SuffixFrontendWhiteList + "sourceRange"
|
||||
SuffixFrontendWhiteListUseXForwardedFor = SuffixFrontendWhiteList + "useXForwardedFor"
|
||||
TraefikDomain = Prefix + SuffixDomain
|
||||
TraefikEnable = Prefix + SuffixEnable
|
||||
TraefikPort = Prefix + SuffixPort
|
||||
TraefikPortName = Prefix + SuffixPortName
|
||||
TraefikPortIndex = Prefix + SuffixPortIndex
|
||||
TraefikProtocol = Prefix + SuffixProtocol
|
||||
TraefikTags = Prefix + SuffixTags
|
||||
TraefikWeight = Prefix + SuffixWeight
|
||||
TraefikBackend = Prefix + SuffixBackend
|
||||
TraefikBackendID = Prefix + SuffixBackendID
|
||||
TraefikBackendCircuitBreaker = Prefix + SuffixBackendCircuitBreaker
|
||||
TraefikBackendCircuitBreakerExpression = Prefix + SuffixBackendCircuitBreakerExpression
|
||||
TraefikBackendHealthCheckScheme = Prefix + SuffixBackendHealthCheckScheme
|
||||
TraefikBackendHealthCheckPath = Prefix + SuffixBackendHealthCheckPath
|
||||
TraefikBackendHealthCheckPort = Prefix + SuffixBackendHealthCheckPort
|
||||
TraefikBackendHealthCheckInterval = Prefix + SuffixBackendHealthCheckInterval
|
||||
TraefikBackendHealthCheckHostname = Prefix + SuffixBackendHealthCheckHostname
|
||||
TraefikBackendHealthCheckHeaders = Prefix + SuffixBackendHealthCheckHeaders
|
||||
TraefikBackendLoadBalancer = Prefix + SuffixBackendLoadBalancer
|
||||
TraefikBackendLoadBalancerMethod = Prefix + SuffixBackendLoadBalancerMethod
|
||||
TraefikBackendLoadBalancerSticky = Prefix + SuffixBackendLoadBalancerSticky
|
||||
TraefikBackendLoadBalancerStickiness = Prefix + SuffixBackendLoadBalancerStickiness
|
||||
TraefikBackendLoadBalancerStickinessCookieName = Prefix + SuffixBackendLoadBalancerStickinessCookieName
|
||||
// FIXME
|
||||
TraefikBackendLoadBalancerStickinessSecure = Prefix + SuffixBackendLoadBalancerStickinessSecure
|
||||
TraefikBackendLoadBalancerStickinessHTTPOnly = Prefix + SuffixBackendLoadBalancerStickinessCHTTPOnly
|
||||
TraefikBackendLoadBalancerStickinessSameSite = Prefix + SuffixBackendLoadBalancerStickinessSameSite
|
||||
|
||||
TraefikBackendMaxConnAmount = Prefix + SuffixBackendMaxConnAmount
|
||||
TraefikBackendMaxConnExtractorFunc = Prefix + SuffixBackendMaxConnExtractorFunc
|
||||
TraefikBackendBuffering = Prefix + SuffixBackendBuffering
|
||||
|
||||
@@ -50,8 +50,13 @@ func GetRedirect(labels map[string]string) *types.Redirect {
|
||||
|
||||
if Has(labels, TraefikFrontendRedirectRegex) &&
|
||||
Has(labels, TraefikFrontendRedirectReplacement) {
|
||||
value, err := GetStringSafeValue(labels, TraefikFrontendRedirectRegex, "")
|
||||
if err != nil {
|
||||
log.Errorf("Invalid regex syntax: %s", value)
|
||||
return nil
|
||||
}
|
||||
return &types.Redirect{
|
||||
Regex: GetStringValue(labels, TraefikFrontendRedirectRegex, ""),
|
||||
Regex: value,
|
||||
Replacement: GetStringValue(labels, TraefikFrontendRedirectReplacement, ""),
|
||||
Permanent: permanent,
|
||||
}
|
||||
@@ -421,8 +426,12 @@ func GetLoadBalancer(labels map[string]string) *types.LoadBalancer {
|
||||
}
|
||||
|
||||
if GetBoolValue(labels, TraefikBackendLoadBalancerStickiness, false) {
|
||||
cookieName := GetStringValue(labels, TraefikBackendLoadBalancerStickinessCookieName, DefaultBackendLoadbalancerStickinessCookieName)
|
||||
lb.Stickiness = &types.Stickiness{CookieName: cookieName}
|
||||
lb.Stickiness = &types.Stickiness{
|
||||
CookieName: GetStringValue(labels, TraefikBackendLoadBalancerStickinessCookieName, DefaultBackendLoadbalancerStickinessCookieName),
|
||||
Secure: GetBoolValue(labels, TraefikBackendLoadBalancerStickinessSecure, DefaultBackendLoadbalancerStickinessSecure),
|
||||
HTTPOnly: GetBoolValue(labels, TraefikBackendLoadBalancerStickinessHTTPOnly, DefaultBackendLoadbalancerStickinessHTTPOnly),
|
||||
SameSite: GetStringValue(labels, TraefikBackendLoadBalancerStickinessSameSite, DefaultBackendLoadbalancerStickinessSameSite),
|
||||
}
|
||||
}
|
||||
|
||||
return lb
|
||||
|
||||
@@ -257,12 +257,18 @@ func TestGetLoadBalancer(t *testing.T) {
|
||||
TraefikBackendLoadBalancerSticky: "true",
|
||||
TraefikBackendLoadBalancerStickiness: "true",
|
||||
TraefikBackendLoadBalancerStickinessCookieName: "foo",
|
||||
TraefikBackendLoadBalancerStickinessSecure: "true",
|
||||
TraefikBackendLoadBalancerStickinessHTTPOnly: "true",
|
||||
TraefikBackendLoadBalancerStickinessSameSite: "none",
|
||||
},
|
||||
expected: &types.LoadBalancer{
|
||||
Method: "drr",
|
||||
Sticky: true,
|
||||
Stickiness: &types.Stickiness{
|
||||
CookieName: "foo",
|
||||
Secure: true,
|
||||
HTTPOnly: true,
|
||||
SameSite: "none",
|
||||
},
|
||||
},
|
||||
},
|
||||
@@ -449,12 +455,19 @@ func TestGetRedirect(t *testing.T) {
|
||||
labels map[string]string
|
||||
expected *types.Redirect
|
||||
}{
|
||||
|
||||
{
|
||||
desc: "should return nil when no redirect labels",
|
||||
labels: map[string]string{},
|
||||
expected: nil,
|
||||
},
|
||||
{
|
||||
desc: "should return nil when regex syntax is invalid",
|
||||
labels: map[string]string{
|
||||
TraefikFrontendRedirectRegex: `^https?://(test\.beta\.redacted\.org|redacted\.com)/(.*)`,
|
||||
TraefikFrontendRedirectReplacement: "$1",
|
||||
},
|
||||
expected: nil,
|
||||
},
|
||||
{
|
||||
desc: "should use only entry point tag when mix regex redirect and entry point redirect",
|
||||
labels: map[string]string{
|
||||
@@ -805,6 +818,7 @@ func TestGetAuth(t *testing.T) {
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestGetPassTLSClientCert(t *testing.T) {
|
||||
testCases := []struct {
|
||||
desc string
|
||||
|
||||
@@ -101,6 +101,9 @@ func TestBuildConfigurationSegments(t *testing.T) {
|
||||
withLabel(label.TraefikBackendLoadBalancerSticky, "true"),
|
||||
withLabel(label.TraefikBackendLoadBalancerStickiness, "true"),
|
||||
withLabel(label.TraefikBackendLoadBalancerStickinessCookieName, "chocolate"),
|
||||
withLabel(label.TraefikBackendLoadBalancerStickinessSecure, "true"),
|
||||
withLabel(label.TraefikBackendLoadBalancerStickinessHTTPOnly, "true"),
|
||||
withLabel(label.TraefikBackendLoadBalancerStickinessSameSite, "none"),
|
||||
withLabel(label.TraefikBackendMaxConnAmount, "666"),
|
||||
withLabel(label.TraefikBackendMaxConnExtractorFunc, "client.ip"),
|
||||
withLabel(label.TraefikBackendBufferingMaxResponseBodyBytes, "10485760"),
|
||||
@@ -347,6 +350,9 @@ func TestBuildConfigurationSegments(t *testing.T) {
|
||||
Sticky: true,
|
||||
Stickiness: &types.Stickiness{
|
||||
CookieName: "chocolate",
|
||||
Secure: true,
|
||||
HTTPOnly: true,
|
||||
SameSite: "none",
|
||||
},
|
||||
},
|
||||
MaxConn: &types.MaxConn{
|
||||
|
||||
@@ -369,6 +369,9 @@ func TestBuildConfiguration(t *testing.T) {
|
||||
withLabel(label.TraefikBackendLoadBalancerSticky, "true"),
|
||||
withLabel(label.TraefikBackendLoadBalancerStickiness, "true"),
|
||||
withLabel(label.TraefikBackendLoadBalancerStickinessCookieName, "chocolate"),
|
||||
withLabel(label.TraefikBackendLoadBalancerStickinessSecure, "true"),
|
||||
withLabel(label.TraefikBackendLoadBalancerStickinessHTTPOnly, "true"),
|
||||
withLabel(label.TraefikBackendLoadBalancerStickinessSameSite, "none"),
|
||||
withLabel(label.TraefikBackendMaxConnAmount, "666"),
|
||||
withLabel(label.TraefikBackendMaxConnExtractorFunc, "client.ip"),
|
||||
withLabel(label.TraefikBackendBufferingMaxResponseBodyBytes, "10485760"),
|
||||
@@ -613,6 +616,9 @@ func TestBuildConfiguration(t *testing.T) {
|
||||
Sticky: true,
|
||||
Stickiness: &types.Stickiness{
|
||||
CookieName: "chocolate",
|
||||
Secure: true,
|
||||
HTTPOnly: true,
|
||||
SameSite: "none",
|
||||
},
|
||||
},
|
||||
MaxConn: &types.MaxConn{
|
||||
|
||||
@@ -119,6 +119,9 @@ func TestBuildConfigurationSegments(t *testing.T) {
|
||||
withLabel(label.TraefikBackendLoadBalancerSticky, "true"),
|
||||
withLabel(label.TraefikBackendLoadBalancerStickiness, "true"),
|
||||
withLabel(label.TraefikBackendLoadBalancerStickinessCookieName, "chocolate"),
|
||||
withLabel(label.TraefikBackendLoadBalancerStickinessSecure, "true"),
|
||||
withLabel(label.TraefikBackendLoadBalancerStickinessHTTPOnly, "true"),
|
||||
withLabel(label.TraefikBackendLoadBalancerStickinessSameSite, "none"),
|
||||
withLabel(label.TraefikBackendMaxConnAmount, "666"),
|
||||
withLabel(label.TraefikBackendMaxConnExtractorFunc, "client.ip"),
|
||||
withLabel(label.TraefikBackendBufferingMaxResponseBodyBytes, "10485760"),
|
||||
@@ -368,6 +371,9 @@ func TestBuildConfigurationSegments(t *testing.T) {
|
||||
Sticky: true,
|
||||
Stickiness: &types.Stickiness{
|
||||
CookieName: "chocolate",
|
||||
Secure: true,
|
||||
HTTPOnly: true,
|
||||
SameSite: "none",
|
||||
},
|
||||
},
|
||||
MaxConn: &types.MaxConn{
|
||||
|
||||
@@ -325,6 +325,9 @@ func TestBuildConfiguration(t *testing.T) {
|
||||
withLabel(label.TraefikBackendLoadBalancerMethod, "drr"),
|
||||
withLabel(label.TraefikBackendLoadBalancerStickiness, "true"),
|
||||
withLabel(label.TraefikBackendLoadBalancerStickinessCookieName, "chocolate"),
|
||||
withLabel(label.TraefikBackendLoadBalancerStickinessSecure, "true"),
|
||||
withLabel(label.TraefikBackendLoadBalancerStickinessHTTPOnly, "true"),
|
||||
withLabel(label.TraefikBackendLoadBalancerStickinessSameSite, "none"),
|
||||
withLabel(label.TraefikBackendMaxConnAmount, "666"),
|
||||
withLabel(label.TraefikBackendMaxConnExtractorFunc, "client.ip"),
|
||||
withLabel(label.TraefikBackendBufferingMaxResponseBodyBytes, "10485760"),
|
||||
@@ -572,6 +575,9 @@ func TestBuildConfiguration(t *testing.T) {
|
||||
Method: "drr",
|
||||
Stickiness: &types.Stickiness{
|
||||
CookieName: "chocolate",
|
||||
Secure: true,
|
||||
HTTPOnly: true,
|
||||
SameSite: "none",
|
||||
},
|
||||
},
|
||||
MaxConn: &types.MaxConn{
|
||||
|
||||
@@ -52,6 +52,9 @@ func TestProviderBuildConfiguration(t *testing.T) {
|
||||
label.TraefikBackendLoadBalancerSticky: "true",
|
||||
label.TraefikBackendLoadBalancerStickiness: "true",
|
||||
label.TraefikBackendLoadBalancerStickinessCookieName: "chocolate",
|
||||
label.TraefikBackendLoadBalancerStickinessSecure: "true",
|
||||
label.TraefikBackendLoadBalancerStickinessHTTPOnly: "true",
|
||||
label.TraefikBackendLoadBalancerStickinessSameSite: "none",
|
||||
label.TraefikBackendMaxConnAmount: "666",
|
||||
label.TraefikBackendMaxConnExtractorFunc: "client.ip",
|
||||
label.TraefikBackendBufferingMaxResponseBodyBytes: "10485760",
|
||||
@@ -304,6 +307,9 @@ func TestProviderBuildConfiguration(t *testing.T) {
|
||||
Sticky: true,
|
||||
Stickiness: &types.Stickiness{
|
||||
CookieName: "chocolate",
|
||||
Secure: true,
|
||||
HTTPOnly: true,
|
||||
SameSite: "none",
|
||||
},
|
||||
},
|
||||
MaxConn: &types.MaxConn{
|
||||
|
||||
@@ -5,7 +5,7 @@ if [ -z "${VALIDATE_UPSTREAM:-}" ]; then
|
||||
# are running more than one validate bundlescript
|
||||
|
||||
VALIDATE_REPO='https://github.com/containous/traefik.git'
|
||||
VALIDATE_BRANCH='master'
|
||||
VALIDATE_BRANCH='v1.7'
|
||||
|
||||
# Should not be needed for now O:)
|
||||
# if [ "$TRAVIS" = 'true' -a "$TRAVIS_PULL_REQUEST" != 'false' ]; then
|
||||
|
||||
@@ -3,9 +3,8 @@ set -e
|
||||
|
||||
# List of bundles to create when no argument is passed
|
||||
DEFAULT_BUNDLES=(
|
||||
validate-gofmt
|
||||
validate-govet
|
||||
generate
|
||||
validate-gofmt
|
||||
binary
|
||||
|
||||
test-unit
|
||||
|
||||
@@ -1,28 +0,0 @@
|
||||
#!/usr/bin/env bash
|
||||
|
||||
source "$(dirname "$BASH_SOURCE")/.validate"
|
||||
|
||||
IFS=$'\n'
|
||||
files=( $(validate_diff --diff-filter=ACMR --name-only -- '*.go' | grep -v '^vendor/' || true) )
|
||||
unset IFS
|
||||
|
||||
errors=()
|
||||
failedErrcheck=$(errcheck .)
|
||||
if [ "$failedErrcheck" ]; then
|
||||
errors+=( "$failedErrcheck" )
|
||||
fi
|
||||
|
||||
if [ ${#errors[@]} -eq 0 ]; then
|
||||
echo 'Congratulations! All Go source files have been errchecked.'
|
||||
else
|
||||
{
|
||||
echo "Errors from errcheck:"
|
||||
for err in "${errors[@]}"; do
|
||||
echo "$err"
|
||||
done
|
||||
echo
|
||||
echo 'Please fix the above errors. You can test via "errcheck" and commit the result.'
|
||||
echo
|
||||
} >&2
|
||||
false
|
||||
fi
|
||||
@@ -1,31 +0,0 @@
|
||||
#!/usr/bin/env bash
|
||||
|
||||
source "$(dirname "$BASH_SOURCE")/.validate"
|
||||
|
||||
IFS=$'\n'
|
||||
files=( $(validate_diff --diff-filter=ACMR --name-only -- '*.go' | grep -v '^vendor/' || true) )
|
||||
unset IFS
|
||||
|
||||
errors=()
|
||||
for f in "${files[@]}"; do
|
||||
# we use "git show" here to validate that what's committed passes go vet
|
||||
failedVet=$(go tool vet -printf=false "$f")
|
||||
if [ "$failedVet" ]; then
|
||||
errors+=( "$failedVet" )
|
||||
fi
|
||||
done
|
||||
|
||||
if [ ${#errors[@]} -eq 0 ]; then
|
||||
echo 'Congratulations! All Go source files have been vetted.'
|
||||
else
|
||||
{
|
||||
echo "Errors from govet:"
|
||||
for err in "${errors[@]}"; do
|
||||
echo "$err"
|
||||
done
|
||||
echo
|
||||
echo 'Please fix the above errors. You can test via "go vet" and commit the result.'
|
||||
echo
|
||||
} >&2
|
||||
false
|
||||
fi
|
||||
@@ -85,6 +85,8 @@ func (h *hijackConnectionTracker) Shutdown(ctx context.Context) error {
|
||||
|
||||
// Close close all the connections in the tracked connections list
|
||||
func (h *hijackConnectionTracker) Close() {
|
||||
h.lock.Lock()
|
||||
defer h.lock.Unlock()
|
||||
for conn := range h.conns {
|
||||
if err := conn.Close(); err != nil {
|
||||
log.Errorf("Error while closing Hijacked conn: %v", err)
|
||||
@@ -216,7 +218,10 @@ func NewServer(globalConfiguration configuration.GlobalConfiguration, provider p
|
||||
log.Errorf("failed to create HTTP transport: %v", err)
|
||||
}
|
||||
|
||||
server.defaultForwardingRoundTripper = transport
|
||||
server.defaultForwardingRoundTripper, err = newSmartRoundTripper(transport)
|
||||
if err != nil {
|
||||
log.Errorf("Failed to create HTTP transport: %v", err)
|
||||
}
|
||||
|
||||
server.tracingMiddleware = globalConfiguration.Tracing
|
||||
if server.tracingMiddleware != nil && server.tracingMiddleware.Backend != "" {
|
||||
@@ -495,6 +500,12 @@ func (s *Server) createTLSConfig(entryPointName string, tlsOption *traefiktls.TL
|
||||
config.Certificates = []tls.Certificate{}
|
||||
}
|
||||
|
||||
// workaround for users who used GODEBUG to activate TLS1.3
|
||||
config.MaxVersion = tls.VersionTLS12
|
||||
if strings.Contains(os.Getenv("GODEBUG"), "tls13=1") {
|
||||
config.MaxVersion = tls.VersionTLS13
|
||||
}
|
||||
|
||||
// Set the minimum TLS version if set in the config TOML
|
||||
if minConst, exists := traefiktls.MinVersion[s.entryPoints[entryPointName].Configuration.TLS.MinVersion]; exists {
|
||||
config.PreferServerCipherSuites = true
|
||||
@@ -503,7 +514,7 @@ func (s *Server) createTLSConfig(entryPointName string, tlsOption *traefiktls.TL
|
||||
|
||||
// Set the list of CipherSuites if set in the config TOML
|
||||
if s.entryPoints[entryPointName].Configuration.TLS.CipherSuites != nil {
|
||||
// if our list of CipherSuites is defined in the entrypoint config, we can re-initilize the suites list as empty
|
||||
// if our list of CipherSuites is defined in the entrypoint config, we can re-initialize the suites list as empty
|
||||
config.CipherSuites = make([]uint16, 0)
|
||||
for _, cipher := range s.entryPoints[entryPointName].Configuration.TLS.CipherSuites {
|
||||
if cipherConst, exists := traefiktls.CipherSuites[cipher]; exists {
|
||||
|
||||
@@ -237,11 +237,17 @@ func (s *Server) buildForwarder(entryPointName string, entryPoint *configuration
|
||||
}
|
||||
}
|
||||
|
||||
var tlsConfig *tls.Config
|
||||
if smartRt, ok := roundTripper.(*smartRoundTripper); ok {
|
||||
tlsConfig = smartRt.GetTLSClientConfig()
|
||||
}
|
||||
|
||||
var fwd http.Handler
|
||||
fwd, err = forward.New(
|
||||
forward.Stream(true),
|
||||
forward.PassHostHeader(frontend.PassHostHeader),
|
||||
forward.RoundTripper(roundTripper),
|
||||
forward.WebsocketTLSClientConfig(tlsConfig),
|
||||
forward.Rewriter(rewriter),
|
||||
forward.ResponseModifier(responseModifier),
|
||||
forward.BufferPool(s.bufferPool),
|
||||
@@ -303,7 +309,6 @@ func buildServerRoute(serverEntryPoint *serverEntryPoint, frontendName string, f
|
||||
func (s *Server) preLoadConfiguration(configMsg types.ConfigMessage) {
|
||||
providersThrottleDuration := time.Duration(s.globalConfiguration.ProvidersThrottleDuration)
|
||||
s.defaultConfigurationValues(configMsg.Configuration)
|
||||
currentConfigurations := s.currentConfigurations.Get().(types.Configurations)
|
||||
|
||||
if log.GetLevel() == logrus.DebugLevel {
|
||||
jsonConf, _ := json.Marshal(configMsg.Configuration)
|
||||
@@ -315,11 +320,6 @@ func (s *Server) preLoadConfiguration(configMsg types.ConfigMessage) {
|
||||
return
|
||||
}
|
||||
|
||||
if reflect.DeepEqual(currentConfigurations[configMsg.ProviderName], configMsg.Configuration) {
|
||||
log.Infof("Skipping same configuration for provider %s", configMsg.ProviderName)
|
||||
return
|
||||
}
|
||||
|
||||
providerConfigUpdateCh, ok := s.providerConfigUpdateMap[configMsg.ProviderName]
|
||||
if !ok {
|
||||
providerConfigUpdateCh = make(chan types.ConfigMessage)
|
||||
@@ -455,11 +455,17 @@ func (s *Server) throttleProviderConfigReload(throttle time.Duration, publish ch
|
||||
}
|
||||
})
|
||||
|
||||
var previousConfig types.ConfigMessage
|
||||
for {
|
||||
select {
|
||||
case <-stop:
|
||||
return
|
||||
case nextConfig := <-in:
|
||||
if reflect.DeepEqual(previousConfig, nextConfig) {
|
||||
log.Infof("Skipping same configuration for provider %s", nextConfig.ProviderName)
|
||||
continue
|
||||
}
|
||||
previousConfig = nextConfig
|
||||
ring.In() <- nextConfig
|
||||
}
|
||||
}
|
||||
|
||||
@@ -304,9 +304,11 @@ func TestThrottleProviderConfigReload(t *testing.T) {
|
||||
}
|
||||
}()
|
||||
|
||||
// publish 5 new configs, one new config each 10 milliseconds
|
||||
// publish 5 new and different configs, one new config each 10 milliseconds
|
||||
for i := 0; i < 5; i++ {
|
||||
providerConfig <- types.ConfigMessage{}
|
||||
providerConfig <- types.ConfigMessage{
|
||||
ProviderName: fmt.Sprintf("test-%d", i),
|
||||
}
|
||||
time.Sleep(10 * time.Millisecond)
|
||||
}
|
||||
|
||||
|
||||
@@ -123,13 +123,6 @@ func (s *Server) buildLoadBalancer(frontendName string, backendName string, back
|
||||
rr, _ = roundrobin.New(fwd)
|
||||
}
|
||||
|
||||
var stickySession *roundrobin.StickySession
|
||||
var cookieName string
|
||||
if stickiness := backend.LoadBalancer.Stickiness; stickiness != nil {
|
||||
cookieName = cookie.GetName(stickiness.CookieName, backendName)
|
||||
stickySession = roundrobin.NewStickySession(cookieName)
|
||||
}
|
||||
|
||||
lbMethod, err := types.NewLoadBalancerMethod(backend.LoadBalancer)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("error loading load balancer method '%+v' for frontend %s: %v", backend.LoadBalancer, frontendName, err)
|
||||
@@ -141,8 +134,10 @@ func (s *Server) buildLoadBalancer(frontendName string, backendName string, back
|
||||
case types.Drr:
|
||||
log.Debug("Creating load-balancer drr")
|
||||
|
||||
if stickySession != nil {
|
||||
log.Debugf("Sticky session with cookie %v", cookieName)
|
||||
if backend.LoadBalancer.Stickiness != nil {
|
||||
cookieName := cookie.GetName(backend.LoadBalancer.Stickiness.CookieName, backendName)
|
||||
|
||||
stickySession := newStickySession(cookieName, backend.LoadBalancer.Stickiness)
|
||||
|
||||
lb, err = roundrobin.NewRebalancer(rr, roundrobin.RebalancerStickySession(stickySession))
|
||||
if err != nil {
|
||||
@@ -157,16 +152,19 @@ func (s *Server) buildLoadBalancer(frontendName string, backendName string, back
|
||||
case types.Wrr:
|
||||
log.Debug("Creating load-balancer wrr")
|
||||
|
||||
if stickySession != nil {
|
||||
log.Debugf("Sticky session with cookie %v", cookieName)
|
||||
if backend.LoadBalancer.Stickiness != nil {
|
||||
cookieName := cookie.GetName(backend.LoadBalancer.Stickiness.CookieName, backendName)
|
||||
|
||||
stickySession := newStickySession(cookieName, backend.LoadBalancer.Stickiness)
|
||||
|
||||
option := roundrobin.EnableStickySession(stickySession)
|
||||
if s.accessLoggerMiddleware != nil {
|
||||
lb, err = roundrobin.New(saveFrontend, roundrobin.EnableStickySession(stickySession))
|
||||
lb, err = roundrobin.New(saveFrontend, option)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
} else {
|
||||
lb, err = roundrobin.New(fwd, roundrobin.EnableStickySession(stickySession))
|
||||
lb, err = roundrobin.New(fwd, option)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@@ -185,6 +183,30 @@ func (s *Server) buildLoadBalancer(frontendName string, backendName string, back
|
||||
return lb, nil
|
||||
}
|
||||
|
||||
func newStickySession(cookieName string, stickiness *types.Stickiness) *roundrobin.StickySession {
|
||||
log.Debugf("Sticky session with cookie %v", cookieName)
|
||||
|
||||
opts := roundrobin.CookieOptions{
|
||||
HTTPOnly: stickiness.HTTPOnly,
|
||||
Secure: stickiness.Secure,
|
||||
SameSite: convertSameSite(stickiness.SameSite),
|
||||
}
|
||||
|
||||
return roundrobin.NewStickySessionWithOptions(cookieName, opts)
|
||||
}
|
||||
|
||||
func convertSameSite(sameSite string) http.SameSite {
|
||||
switch sameSite {
|
||||
case "none":
|
||||
return http.SameSiteNoneMode
|
||||
case "lax":
|
||||
return http.SameSiteLaxMode
|
||||
case "strict":
|
||||
return http.SameSiteStrictMode
|
||||
default:
|
||||
return 0
|
||||
}
|
||||
}
|
||||
func (s *Server) configureLBServers(lb healthcheck.BalancerHandler, backend *types.Backend, backendName string) error {
|
||||
for name, srv := range backend.Servers {
|
||||
u, err := url.Parse(srv.URL)
|
||||
@@ -206,23 +228,29 @@ func (s *Server) configureLBServers(lb healthcheck.BalancerHandler, backend *typ
|
||||
// getRoundTripper will either use server.defaultForwardingRoundTripper or create a new one
|
||||
// given a custom TLS configuration is passed and the passTLSCert option is set to true.
|
||||
func (s *Server) getRoundTripper(entryPointName string, passTLSCert bool, tls *traefiktls.TLS) (http.RoundTripper, error) {
|
||||
if passTLSCert {
|
||||
tlsConfig, err := createClientTLSConfig(entryPointName, tls)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to create TLSClientConfig: %v", err)
|
||||
}
|
||||
tlsConfig.InsecureSkipVerify = s.globalConfiguration.InsecureSkipVerify
|
||||
|
||||
transport, err := createHTTPTransport(s.globalConfiguration)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to create HTTP transport: %v", err)
|
||||
}
|
||||
|
||||
transport.TLSClientConfig = tlsConfig
|
||||
return transport, nil
|
||||
if !passTLSCert {
|
||||
return s.defaultForwardingRoundTripper, nil
|
||||
}
|
||||
|
||||
return s.defaultForwardingRoundTripper, nil
|
||||
tlsConfig, err := createClientTLSConfig(entryPointName, tls)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to create TLSClientConfig: %v", err)
|
||||
}
|
||||
tlsConfig.InsecureSkipVerify = s.globalConfiguration.InsecureSkipVerify
|
||||
|
||||
transport, err := createHTTPTransport(s.globalConfiguration)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to create HTTP transport: %v", err)
|
||||
}
|
||||
|
||||
transport.TLSClientConfig = tlsConfig
|
||||
|
||||
smartTransport, err := newSmartRoundTripper(transport)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return smartTransport, nil
|
||||
}
|
||||
|
||||
// createHTTPTransport creates an http.Transport configured with the GlobalConfiguration settings.
|
||||
@@ -263,25 +291,21 @@ func createHTTPTransport(globalConfiguration configuration.GlobalConfiguration)
|
||||
transport.ResponseHeaderTimeout = time.Duration(globalConfiguration.ForwardingTimeouts.ResponseHeaderTimeout)
|
||||
}
|
||||
|
||||
if globalConfiguration.InsecureSkipVerify {
|
||||
transport.TLSClientConfig = &tls.Config{InsecureSkipVerify: true}
|
||||
}
|
||||
|
||||
if len(globalConfiguration.RootCAs) > 0 {
|
||||
if globalConfiguration.InsecureSkipVerify || len(globalConfiguration.RootCAs) > 0 {
|
||||
transport.TLSClientConfig = &tls.Config{
|
||||
RootCAs: createRootCACertPool(globalConfiguration.RootCAs),
|
||||
InsecureSkipVerify: globalConfiguration.InsecureSkipVerify,
|
||||
RootCAs: createRootCACertPool(globalConfiguration.RootCAs),
|
||||
}
|
||||
}
|
||||
|
||||
err := http2.ConfigureTransport(transport)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return transport, nil
|
||||
}
|
||||
|
||||
func createRootCACertPool(rootCAs traefiktls.FilesOrContents) *x509.CertPool {
|
||||
if len(rootCAs) == 0 {
|
||||
return nil
|
||||
}
|
||||
|
||||
roots := x509.NewCertPool()
|
||||
|
||||
for _, cert := range rootCAs {
|
||||
|
||||
43
server/smart_roundtripper.go
Normal file
43
server/smart_roundtripper.go
Normal file
@@ -0,0 +1,43 @@
|
||||
package server
|
||||
|
||||
import (
|
||||
"crypto/tls"
|
||||
"net/http"
|
||||
|
||||
"golang.org/x/net/http/httpguts"
|
||||
"golang.org/x/net/http2"
|
||||
)
|
||||
|
||||
func newSmartRoundTripper(transport *http.Transport) (http.RoundTripper, error) {
|
||||
transportHTTP1 := transport.Clone()
|
||||
|
||||
err := http2.ConfigureTransport(transport)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return &smartRoundTripper{
|
||||
http2: transport,
|
||||
http: transportHTTP1,
|
||||
}, nil
|
||||
}
|
||||
|
||||
// smartRoundTripper implements RoundTrip while making sure that HTTP/2 is not used
|
||||
// with protocols that start with a Connection Upgrade, such as SPDY or Websocket.
|
||||
type smartRoundTripper struct {
|
||||
http2 *http.Transport
|
||||
http *http.Transport
|
||||
}
|
||||
|
||||
func (m *smartRoundTripper) RoundTrip(req *http.Request) (*http.Response, error) {
|
||||
// If we have a connection upgrade, we don't use HTTP2
|
||||
if httpguts.HeaderValuesContainsToken(req.Header["Connection"], "Upgrade") {
|
||||
return m.http.RoundTrip(req)
|
||||
}
|
||||
|
||||
return m.http2.RoundTrip(req)
|
||||
}
|
||||
|
||||
func (m *smartRoundTripper) GetTLSClientConfig() *tls.Config {
|
||||
return m.http2.TLSClientConfig
|
||||
}
|
||||
@@ -24,6 +24,9 @@
|
||||
{{if $loadBalancer.Stickiness }}
|
||||
[backends."backend-{{ $backendName }}".loadBalancer.stickiness]
|
||||
cookieName = "{{ $loadBalancer.Stickiness.CookieName }}"
|
||||
secure = {{ $loadBalancer.Stickiness.Secure }}
|
||||
httpOnly = {{ $loadBalancer.Stickiness.HTTPOnly }}
|
||||
sameSite = "{{ $loadBalancer.Stickiness.SameSite }}"
|
||||
{{end}}
|
||||
{{end}}
|
||||
|
||||
|
||||
@@ -23,6 +23,9 @@
|
||||
{{if $loadBalancer.Stickiness }}
|
||||
[backends."backend-{{ $backendName }}".loadBalancer.stickiness]
|
||||
cookieName = "{{ $loadBalancer.Stickiness.CookieName }}"
|
||||
secure = {{ $loadBalancer.Stickiness.Secure }}
|
||||
httpOnly = {{ $loadBalancer.Stickiness.HTTPOnly }}
|
||||
sameSite = "{{ $loadBalancer.Stickiness.SameSite }}"
|
||||
{{end}}
|
||||
{{end}}
|
||||
|
||||
|
||||
@@ -22,6 +22,9 @@
|
||||
{{if $loadBalancer.Stickiness }}
|
||||
[backends."backend-{{ $serviceName }}".loadBalancer.stickiness]
|
||||
cookieName = "{{ $loadBalancer.Stickiness.CookieName }}"
|
||||
secure = {{ $loadBalancer.Stickiness.Secure }}
|
||||
httpOnly = {{ $loadBalancer.Stickiness.HTTPOnly }}
|
||||
sameSite = "{{ $loadBalancer.Stickiness.SameSite }}"
|
||||
{{end}}
|
||||
{{end}}
|
||||
|
||||
|
||||
@@ -19,6 +19,9 @@
|
||||
{{if $backend.LoadBalancer.Stickiness }}
|
||||
[backends."{{ $backendName }}".loadBalancer.stickiness]
|
||||
cookieName = "{{ $backend.LoadBalancer.Stickiness.CookieName }}"
|
||||
secure = {{ $backend.LoadBalancer.Stickiness.Secure }}
|
||||
httpOnly = {{ $backend.LoadBalancer.Stickiness.HTTPOnly }}
|
||||
sameSite = "{{ $backend.LoadBalancer.Stickiness.SameSite }}"
|
||||
{{end}}
|
||||
|
||||
{{if $backend.MaxConn }}
|
||||
@@ -59,7 +62,9 @@
|
||||
|
||||
{{if $frontend.Auth }}
|
||||
[frontends."{{ $frontendName }}".auth]
|
||||
headerField = "X-WebAuth-User"
|
||||
{{if $frontend.Auth.HeaderField }}
|
||||
headerField = "{{ $frontend.Auth.HeaderField }}"
|
||||
{{end}}
|
||||
|
||||
{{if $frontend.Auth.Basic }}
|
||||
[frontends."{{ $frontendName }}".auth.basic]
|
||||
|
||||
@@ -22,6 +22,9 @@
|
||||
{{if $loadBalancer.Stickiness }}
|
||||
[backends."{{ $backendName }}".loadBalancer.stickiness]
|
||||
cookieName = "{{ $loadBalancer.Stickiness.CookieName }}"
|
||||
secure = {{ $loadBalancer.Stickiness.Secure }}
|
||||
httpOnly = {{ $loadBalancer.Stickiness.HTTPOnly }}
|
||||
sameSite = "{{ $loadBalancer.Stickiness.SameSite }}"
|
||||
{{end}}
|
||||
{{end}}
|
||||
|
||||
|
||||
@@ -25,6 +25,9 @@
|
||||
{{if $loadBalancer.Stickiness }}
|
||||
[backends."{{ $backendName }}".loadBalancer.stickiness]
|
||||
cookieName = "{{ $loadBalancer.Stickiness.CookieName }}"
|
||||
secure = {{ $loadBalancer.Stickiness.Secure }}
|
||||
httpOnly = {{ $loadBalancer.Stickiness.HTTPOnly }}
|
||||
sameSite = "{{ $loadBalancer.Stickiness.SameSite }}"
|
||||
{{end}}
|
||||
{{end}}
|
||||
|
||||
|
||||
@@ -25,6 +25,9 @@
|
||||
{{if $loadBalancer.Stickiness }}
|
||||
[backends."backend-{{ $backendName }}".loadBalancer.stickiness]
|
||||
cookieName = "{{ $loadBalancer.Stickiness.CookieName }}"
|
||||
secure = {{ $loadBalancer.Stickiness.Secure }}
|
||||
httpOnly = {{ $loadBalancer.Stickiness.HTTPOnly }}
|
||||
sameSite = "{{ $loadBalancer.Stickiness.SameSite }}"
|
||||
{{end}}
|
||||
{{end}}
|
||||
|
||||
|
||||
@@ -24,6 +24,9 @@
|
||||
{{if $loadBalancer.Stickiness }}
|
||||
[backends."backend-{{ $backendName }}".loadBalancer.stickiness]
|
||||
cookieName = "{{ $loadBalancer.Stickiness.CookieName }}"
|
||||
secure = {{ $loadBalancer.Stickiness.Secure }}
|
||||
httpOnly = {{ $loadBalancer.Stickiness.HTTPOnly }}
|
||||
sameSite = "{{ $loadBalancer.Stickiness.SameSite }}"
|
||||
{{end}}
|
||||
{{end}}
|
||||
|
||||
|
||||
@@ -16,36 +16,43 @@ import (
|
||||
var (
|
||||
// MinVersion Map of allowed TLS minimum versions
|
||||
MinVersion = map[string]uint16{
|
||||
`VersionTLS10`: tls.VersionTLS10,
|
||||
`VersionTLS11`: tls.VersionTLS11,
|
||||
`VersionTLS12`: tls.VersionTLS12,
|
||||
"VersionTLS10": tls.VersionTLS10,
|
||||
"VersionTLS11": tls.VersionTLS11,
|
||||
"VersionTLS12": tls.VersionTLS12,
|
||||
"VersionTLS13": tls.VersionTLS13,
|
||||
}
|
||||
|
||||
// CipherSuites Map of TLS CipherSuites from crypto/tls
|
||||
// Available CipherSuites defined at https://golang.org/pkg/crypto/tls/#pkg-constants
|
||||
CipherSuites = map[string]uint16{
|
||||
`TLS_RSA_WITH_RC4_128_SHA`: tls.TLS_RSA_WITH_RC4_128_SHA,
|
||||
`TLS_RSA_WITH_3DES_EDE_CBC_SHA`: tls.TLS_RSA_WITH_3DES_EDE_CBC_SHA,
|
||||
`TLS_RSA_WITH_AES_128_CBC_SHA`: tls.TLS_RSA_WITH_AES_128_CBC_SHA,
|
||||
`TLS_RSA_WITH_AES_256_CBC_SHA`: tls.TLS_RSA_WITH_AES_256_CBC_SHA,
|
||||
`TLS_RSA_WITH_AES_128_CBC_SHA256`: tls.TLS_RSA_WITH_AES_128_CBC_SHA256,
|
||||
`TLS_RSA_WITH_AES_128_GCM_SHA256`: tls.TLS_RSA_WITH_AES_128_GCM_SHA256,
|
||||
`TLS_RSA_WITH_AES_256_GCM_SHA384`: tls.TLS_RSA_WITH_AES_256_GCM_SHA384,
|
||||
`TLS_ECDHE_ECDSA_WITH_RC4_128_SHA`: tls.TLS_ECDHE_ECDSA_WITH_RC4_128_SHA,
|
||||
`TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA`: tls.TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA,
|
||||
`TLS_ECDHE_ECDSA_WITH_AES_256_CBC_SHA`: tls.TLS_ECDHE_ECDSA_WITH_AES_256_CBC_SHA,
|
||||
`TLS_ECDHE_RSA_WITH_RC4_128_SHA`: tls.TLS_ECDHE_RSA_WITH_RC4_128_SHA,
|
||||
`TLS_ECDHE_RSA_WITH_3DES_EDE_CBC_SHA`: tls.TLS_ECDHE_RSA_WITH_3DES_EDE_CBC_SHA,
|
||||
`TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA`: tls.TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA,
|
||||
`TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA`: tls.TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA,
|
||||
`TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256`: tls.TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256,
|
||||
`TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256`: tls.TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256,
|
||||
`TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256`: tls.TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,
|
||||
`TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256`: tls.TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,
|
||||
`TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384`: tls.TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,
|
||||
`TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384`: tls.TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384,
|
||||
`TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305`: tls.TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305,
|
||||
`TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305`: tls.TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305,
|
||||
"TLS_RSA_WITH_RC4_128_SHA": tls.TLS_RSA_WITH_RC4_128_SHA,
|
||||
"TLS_RSA_WITH_3DES_EDE_CBC_SHA": tls.TLS_RSA_WITH_3DES_EDE_CBC_SHA,
|
||||
"TLS_RSA_WITH_AES_128_CBC_SHA": tls.TLS_RSA_WITH_AES_128_CBC_SHA,
|
||||
"TLS_RSA_WITH_AES_256_CBC_SHA": tls.TLS_RSA_WITH_AES_256_CBC_SHA,
|
||||
"TLS_RSA_WITH_AES_128_CBC_SHA256": tls.TLS_RSA_WITH_AES_128_CBC_SHA256,
|
||||
"TLS_RSA_WITH_AES_128_GCM_SHA256": tls.TLS_RSA_WITH_AES_128_GCM_SHA256,
|
||||
"TLS_RSA_WITH_AES_256_GCM_SHA384": tls.TLS_RSA_WITH_AES_256_GCM_SHA384,
|
||||
"TLS_ECDHE_ECDSA_WITH_RC4_128_SHA": tls.TLS_ECDHE_ECDSA_WITH_RC4_128_SHA,
|
||||
"TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA": tls.TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA,
|
||||
"TLS_ECDHE_ECDSA_WITH_AES_256_CBC_SHA": tls.TLS_ECDHE_ECDSA_WITH_AES_256_CBC_SHA,
|
||||
"TLS_ECDHE_RSA_WITH_RC4_128_SHA": tls.TLS_ECDHE_RSA_WITH_RC4_128_SHA,
|
||||
"TLS_ECDHE_RSA_WITH_3DES_EDE_CBC_SHA": tls.TLS_ECDHE_RSA_WITH_3DES_EDE_CBC_SHA,
|
||||
"TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA": tls.TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA,
|
||||
"TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA": tls.TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA,
|
||||
"TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256": tls.TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256,
|
||||
"TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256": tls.TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256,
|
||||
"TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256": tls.TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,
|
||||
"TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256": tls.TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,
|
||||
"TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384": tls.TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,
|
||||
"TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384": tls.TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384,
|
||||
"TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305": tls.TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305,
|
||||
"TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305_SHA256": tls.TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305_SHA256,
|
||||
"TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305": tls.TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305,
|
||||
"TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305_SHA256": tls.TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305_SHA256,
|
||||
"TLS_AES_128_GCM_SHA256": tls.TLS_AES_128_GCM_SHA256,
|
||||
"TLS_AES_256_GCM_SHA384": tls.TLS_AES_256_GCM_SHA384,
|
||||
"TLS_CHACHA20_POLY1305_SHA256": tls.TLS_CHACHA20_POLY1305_SHA256,
|
||||
"TLS_FALLBACK_SCSV": tls.TLS_FALLBACK_SCSV,
|
||||
}
|
||||
)
|
||||
|
||||
@@ -115,6 +122,7 @@ func (c *Certificates) CreateTLSConfig(entryPointName string) (*tls.Config, erro
|
||||
}
|
||||
}
|
||||
}
|
||||
config.BuildNameToCertificate()
|
||||
return config, nil
|
||||
}
|
||||
|
||||
|
||||
@@ -2,6 +2,7 @@ package tls
|
||||
|
||||
import (
|
||||
"crypto/tls"
|
||||
"crypto/x509"
|
||||
"net"
|
||||
"sort"
|
||||
"strings"
|
||||
@@ -47,6 +48,11 @@ func (c CertificateStore) GetAllDomains() []string {
|
||||
allCerts = append(allCerts, domains)
|
||||
}
|
||||
}
|
||||
|
||||
// Get Default certificate
|
||||
if c.DefaultCertificate != nil {
|
||||
allCerts = append(allCerts, getCertificateDomains(c.DefaultCertificate)...)
|
||||
}
|
||||
return allCerts
|
||||
}
|
||||
|
||||
@@ -115,6 +121,27 @@ func (c CertificateStore) ResetCache() {
|
||||
}
|
||||
}
|
||||
|
||||
func getCertificateDomains(cert *tls.Certificate) []string {
|
||||
if cert == nil {
|
||||
return nil
|
||||
}
|
||||
|
||||
x509Cert, err := x509.ParseCertificate(cert.Certificate[0])
|
||||
if err != nil {
|
||||
return nil
|
||||
}
|
||||
|
||||
var names []string
|
||||
if len(x509Cert.Subject.CommonName) > 0 {
|
||||
names = append(names, x509Cert.Subject.CommonName)
|
||||
}
|
||||
for _, san := range x509Cert.DNSNames {
|
||||
names = append(names, san)
|
||||
}
|
||||
|
||||
return names
|
||||
}
|
||||
|
||||
// MatchDomain return true if a domain match the cert domain
|
||||
func MatchDomain(domain string, certDomain string) bool {
|
||||
if domain == certDomain {
|
||||
|
||||
@@ -13,6 +13,90 @@ import (
|
||||
"github.com/stretchr/testify/require"
|
||||
)
|
||||
|
||||
func TestGetAllDomains(t *testing.T) {
|
||||
testCases := []struct {
|
||||
desc string
|
||||
staticCert string
|
||||
dynamicCert string
|
||||
defaultCert string
|
||||
expectedDomains []string
|
||||
}{
|
||||
{
|
||||
desc: "Empty Store, returns no domains",
|
||||
staticCert: "",
|
||||
dynamicCert: "",
|
||||
defaultCert: "",
|
||||
expectedDomains: nil,
|
||||
},
|
||||
{
|
||||
desc: "Static cert domains",
|
||||
staticCert: "snitest.com",
|
||||
dynamicCert: "",
|
||||
defaultCert: "",
|
||||
expectedDomains: []string{"snitest.com"},
|
||||
},
|
||||
{
|
||||
desc: "Dynamic cert domains",
|
||||
staticCert: "",
|
||||
dynamicCert: "snitest.com",
|
||||
defaultCert: "",
|
||||
expectedDomains: []string{"snitest.com"},
|
||||
},
|
||||
{
|
||||
desc: "Default cert domains",
|
||||
staticCert: "",
|
||||
dynamicCert: "",
|
||||
defaultCert: "snitest.com",
|
||||
expectedDomains: []string{"snitest.com"},
|
||||
},
|
||||
{
|
||||
desc: "All domains",
|
||||
staticCert: "www.snitest.com",
|
||||
dynamicCert: "*.snitest.com",
|
||||
defaultCert: "snitest.com",
|
||||
expectedDomains: []string{"www.snitest.com", "*.snitest.com", "snitest.com"},
|
||||
},
|
||||
}
|
||||
|
||||
for _, test := range testCases {
|
||||
test := test
|
||||
t.Run(test.desc, func(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
staticMap := map[string]*tls.Certificate{}
|
||||
if test.staticCert != "" {
|
||||
cert, err := loadTestCert(test.staticCert, false)
|
||||
require.NoError(t, err)
|
||||
staticMap[strings.ToLower(test.staticCert)] = cert
|
||||
}
|
||||
|
||||
dynamicMap := map[string]*tls.Certificate{}
|
||||
if test.dynamicCert != "" {
|
||||
cert, err := loadTestCert(test.dynamicCert, false)
|
||||
require.NoError(t, err)
|
||||
dynamicMap[strings.ToLower(test.dynamicCert)] = cert
|
||||
}
|
||||
|
||||
var defaultCert *tls.Certificate
|
||||
if test.defaultCert != "" {
|
||||
cert, err := loadTestCert(test.defaultCert, false)
|
||||
require.NoError(t, err)
|
||||
defaultCert = cert
|
||||
}
|
||||
|
||||
store := &CertificateStore{
|
||||
DynamicCerts: safe.New(dynamicMap),
|
||||
StaticCerts: safe.New(staticMap),
|
||||
DefaultCertificate: defaultCert,
|
||||
CertCache: cache.New(1*time.Hour, 10*time.Minute),
|
||||
}
|
||||
|
||||
actual := store.GetAllDomains()
|
||||
assert.Equal(t, test.expectedDomains, actual)
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestGetBestCertificate(t *testing.T) {
|
||||
testCases := []struct {
|
||||
desc string
|
||||
@@ -116,15 +200,15 @@ func TestGetBestCertificate(t *testing.T) {
|
||||
test := test
|
||||
t.Run(test.desc, func(t *testing.T) {
|
||||
t.Parallel()
|
||||
staticMap := map[string]*tls.Certificate{}
|
||||
dynamicMap := map[string]*tls.Certificate{}
|
||||
|
||||
staticMap := map[string]*tls.Certificate{}
|
||||
if test.staticCert != "" {
|
||||
cert, err := loadTestCert(test.staticCert, test.uppercase)
|
||||
require.NoError(t, err)
|
||||
staticMap[strings.ToLower(test.staticCert)] = cert
|
||||
}
|
||||
|
||||
dynamicMap := map[string]*tls.Certificate{}
|
||||
if test.dynamicCert != "" {
|
||||
cert, err := loadTestCert(test.dynamicCert, test.uppercase)
|
||||
require.NoError(t, err)
|
||||
|
||||
102
tls/certificate_test.go
Normal file
102
tls/certificate_test.go
Normal file
@@ -0,0 +1,102 @@
|
||||
package tls
|
||||
|
||||
import (
|
||||
"crypto/rand"
|
||||
"crypto/rsa"
|
||||
"crypto/x509"
|
||||
"crypto/x509/pkix"
|
||||
"encoding/pem"
|
||||
"math/big"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/require"
|
||||
)
|
||||
|
||||
func TestCertificates_CreateTLSConfig(t *testing.T) {
|
||||
var cert Certificates
|
||||
certificate1, err := generateCertificate("test1.traefik.com")
|
||||
require.NoError(t, err)
|
||||
require.NotNil(t, certificate1)
|
||||
|
||||
cert = append(cert, *certificate1)
|
||||
|
||||
certificate2, err := generateCertificate("test2.traefik.com")
|
||||
require.NoError(t, err)
|
||||
require.NotNil(t, certificate2)
|
||||
|
||||
cert = append(cert, *certificate2)
|
||||
|
||||
config, err := cert.CreateTLSConfig("http")
|
||||
require.NoError(t, err)
|
||||
|
||||
assert.Len(t, config.NameToCertificate, 2)
|
||||
assert.Contains(t, config.NameToCertificate, "test1.traefik.com")
|
||||
assert.Contains(t, config.NameToCertificate, "test2.traefik.com")
|
||||
}
|
||||
|
||||
func generateCertificate(domain string) (*Certificate, error) {
|
||||
certPEM, keyPEM, err := keyPair(domain, time.Time{})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return &Certificate{
|
||||
CertFile: FileOrContent(certPEM),
|
||||
KeyFile: FileOrContent(keyPEM),
|
||||
}, nil
|
||||
}
|
||||
|
||||
// keyPair generates cert and key files
|
||||
func keyPair(domain string, expiration time.Time) ([]byte, []byte, error) {
|
||||
rsaPrivKey, err := rsa.GenerateKey(rand.Reader, 2048)
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
keyPEM := pem.EncodeToMemory(&pem.Block{Type: "RSA PRIVATE KEY", Bytes: x509.MarshalPKCS1PrivateKey(rsaPrivKey)})
|
||||
|
||||
certPEM, err := PemCert(rsaPrivKey, domain, expiration)
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
return certPEM, keyPEM, nil
|
||||
}
|
||||
|
||||
// PemCert generates PEM cert file
|
||||
func PemCert(privKey *rsa.PrivateKey, domain string, expiration time.Time) ([]byte, error) {
|
||||
derBytes, err := derCert(privKey, expiration, domain)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return pem.EncodeToMemory(&pem.Block{Type: "CERTIFICATE", Bytes: derBytes}), nil
|
||||
}
|
||||
|
||||
func derCert(privKey *rsa.PrivateKey, expiration time.Time, domain string) ([]byte, error) {
|
||||
serialNumberLimit := new(big.Int).Lsh(big.NewInt(1), 128)
|
||||
serialNumber, err := rand.Int(rand.Reader, serialNumberLimit)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if expiration.IsZero() {
|
||||
expiration = time.Now().Add(365 * (24 * time.Hour))
|
||||
}
|
||||
|
||||
template := x509.Certificate{
|
||||
SerialNumber: serialNumber,
|
||||
Subject: pkix.Name{
|
||||
CommonName: domain,
|
||||
},
|
||||
NotBefore: time.Now(),
|
||||
NotAfter: expiration,
|
||||
|
||||
KeyUsage: x509.KeyUsageKeyEncipherment | x509.KeyUsageDigitalSignature | x509.KeyUsageKeyAgreement | x509.KeyUsageDataEncipherment,
|
||||
ExtKeyUsage: []x509.ExtKeyUsage{x509.ExtKeyUsageServerAuth},
|
||||
BasicConstraintsValid: true,
|
||||
DNSNames: []string{domain},
|
||||
}
|
||||
|
||||
return x509.CreateCertificate(rand.Reader, &template, &template, &privKey.PublicKey, privKey)
|
||||
}
|
||||
@@ -86,6 +86,7 @@ func derCert(privKey *rsa.PrivateKey, expiration time.Time, domain string) ([]by
|
||||
NotAfter: expiration,
|
||||
|
||||
KeyUsage: x509.KeyUsageKeyEncipherment | x509.KeyUsageDigitalSignature | x509.KeyUsageKeyAgreement | x509.KeyUsageDataEncipherment,
|
||||
ExtKeyUsage: []x509.ExtKeyUsage{x509.ExtKeyUsageServerAuth},
|
||||
BasicConstraintsValid: true,
|
||||
DNSNames: []string{domain},
|
||||
}
|
||||
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user