forked from Ivasoft/traefik
Compare commits
14 Commits
v1.0.0-bet
...
v1.0.0-bet
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
c2009b71b1 | ||
|
|
ba8629e2ac | ||
|
|
6aba453afb | ||
|
|
a15578a8f6 | ||
|
|
5c8d9f4eb9 | ||
|
|
a9e615b3c7 | ||
|
|
94ad21020c | ||
|
|
4b76cb4318 | ||
|
|
fad7ec6b7f | ||
|
|
82a49a8e89 | ||
|
|
2bcc5a2ac7 | ||
|
|
4f044cf2f9 | ||
|
|
9a407f79ff | ||
|
|
affec30c64 |
30
Makefile
30
Makefile
@@ -24,36 +24,27 @@ print-%: ; @echo $*=$($*)
|
||||
|
||||
default: binary
|
||||
|
||||
all: generate-webui build
|
||||
all: generate-webui build ## validate all checks, build linux binary, run all tests\ncross non-linux binaries
|
||||
$(DOCKER_RUN_TRAEFIK) ./script/make.sh
|
||||
|
||||
binary: generate-webui build
|
||||
binary: generate-webui build ## build the linux binary
|
||||
$(DOCKER_RUN_TRAEFIK) ./script/make.sh generate binary
|
||||
|
||||
crossbinary: generate-webui build
|
||||
crossbinary: generate-webui build ## cross build the non-linux binaries
|
||||
$(DOCKER_RUN_TRAEFIK) ./script/make.sh generate crossbinary
|
||||
|
||||
test: build
|
||||
test: build ## run the unit and integration tests
|
||||
$(DOCKER_RUN_TRAEFIK) ./script/make.sh generate test-unit binary test-integration
|
||||
|
||||
test-unit: build
|
||||
test-unit: build ## run the unit tests
|
||||
$(DOCKER_RUN_TRAEFIK) ./script/make.sh generate test-unit
|
||||
|
||||
test-integration: build
|
||||
test-integration: build ## run the integration tests
|
||||
$(DOCKER_RUN_TRAEFIK) ./script/make.sh generate test-integration
|
||||
|
||||
validate: build
|
||||
validate: build ## validate gofmt, golint and go vet
|
||||
$(DOCKER_RUN_TRAEFIK) ./script/make.sh validate-gofmt validate-govet validate-golint
|
||||
|
||||
validate-gofmt: build
|
||||
$(DOCKER_RUN_TRAEFIK) ./script/make.sh validate-gofmt
|
||||
|
||||
validate-govet: build
|
||||
$(DOCKER_RUN_TRAEFIK) ./script/make.sh validate-govet
|
||||
|
||||
validate-golint: build
|
||||
$(DOCKER_RUN_TRAEFIK) ./script/make.sh validate-golint
|
||||
|
||||
build: dist
|
||||
docker build -t "$(TRAEFIK_DEV_IMAGE)" -f build.Dockerfile .
|
||||
|
||||
@@ -63,10 +54,10 @@ build-webui:
|
||||
build-no-cache: dist
|
||||
docker build --no-cache -t "$(TRAEFIK_DEV_IMAGE)" -f build.Dockerfile .
|
||||
|
||||
shell: build
|
||||
shell: build ## start a shell inside the build env
|
||||
$(DOCKER_RUN_TRAEFIK) /bin/bash
|
||||
|
||||
image: build
|
||||
image: build ## build a docker traefik image
|
||||
docker build -t $(TRAEFIK_IMAGE) .
|
||||
|
||||
dist:
|
||||
@@ -92,3 +83,6 @@ fmt:
|
||||
|
||||
deploy:
|
||||
./script/deploy.sh
|
||||
|
||||
help: ## this help
|
||||
@awk 'BEGIN {FS = ":.*?## "} /^[a-zA-Z_-]+:.*?## / {sub("\\\\n",sprintf("\n%22c"," "), $$2);printf "\033[36m%-20s\033[0m %s\n", $$1, $$2}' $(MAKEFILE_LIST)
|
||||
|
||||
@@ -1,4 +1,4 @@
|
||||
FROM golang:1.6.0-alpine
|
||||
FROM golang:1.6.1-alpine
|
||||
|
||||
RUN apk update && apk add git bash gcc musl-dev \
|
||||
&& go get github.com/Masterminds/glide \
|
||||
|
||||
1
cmd.go
1
cmd.go
@@ -142,6 +142,7 @@ func init() {
|
||||
traefikCmd.PersistentFlags().BoolVar(&arguments.consulCatalog, "consulCatalog", false, "Enable Consul catalog backend")
|
||||
traefikCmd.PersistentFlags().StringVar(&arguments.ConsulCatalog.Domain, "consulCatalog.domain", "", "Default domain used")
|
||||
traefikCmd.PersistentFlags().StringVar(&arguments.ConsulCatalog.Endpoint, "consulCatalog.endpoint", "127.0.0.1:8500", "Consul server endpoint")
|
||||
traefikCmd.PersistentFlags().StringVar(&arguments.ConsulCatalog.Prefix, "consulCatalog.prefix", "traefik", "Consul catalog tag prefix")
|
||||
|
||||
traefikCmd.PersistentFlags().BoolVar(&arguments.zookeeper, "zookeeper", false, "Enable Zookeeper backend")
|
||||
traefikCmd.PersistentFlags().BoolVar(&arguments.Zookeeper.Watch, "zookeeper.watch", true, "Watch provider")
|
||||
|
||||
@@ -70,7 +70,7 @@ Frontends can be defined using the following rules:
|
||||
- `PathPrefixStrip`: Same as `PathPrefix` but strip the given prefix from the request URL's Path.
|
||||
|
||||
You can optionally enable `passHostHeader` to forward client `Host` header to the backend.
|
||||
|
||||
|
||||
Here is an example of frontends definition:
|
||||
|
||||
```toml
|
||||
@@ -107,7 +107,7 @@ A circuit breaker can also be applied to a backend, preventing high loads on fai
|
||||
Initial state is Standby. CB observes the statistics and does not modify the request.
|
||||
In case if condition matches, CB enters Tripped state, where it responds with predefines code or redirects to another frontend.
|
||||
Once Tripped timer expires, CB enters Recovering state and resets all stats.
|
||||
In case if the condition does not match and recovery timer expries, CB enters Standby state.
|
||||
In case if the condition does not match and recovery timer expires, CB enters Standby state.
|
||||
|
||||
It can be configured using:
|
||||
|
||||
@@ -120,6 +120,26 @@ For example:
|
||||
- `LatencyAtQuantileMS(50.0) > 50`: watch latency at quantile in milliseconds.
|
||||
- `ResponseCodeRatio(500, 600, 0, 600) > 0.5`: ratio of response codes in range [500-600) to [0-600)
|
||||
|
||||
To proactively prevent backends from being overwhelmed with high load, a maximum connection limit can
|
||||
also be applied to each backend.
|
||||
|
||||
Maximum connections can be configured by specifying an integer value for `maxconn.amount` and
|
||||
`maxconn.extractorfunc` which is a strategy used to determine how to categorize requests in order to
|
||||
evaluate the maximum connections.
|
||||
|
||||
For example:
|
||||
```toml
|
||||
[backends]
|
||||
[backends.backend1]
|
||||
[backends.backend1.maxconn]
|
||||
amount = 10
|
||||
extractorfunc = "request.host"
|
||||
```
|
||||
|
||||
- `backend1` will return `HTTP code 429 Too Many Requests` if there are already 10 requests in progress for the same Host header.
|
||||
- Another possible value for `extractorfunc` is `client.ip` which will categorize requests based on client source ip.
|
||||
- Lastly `extractorfunc` can take the value of `request.header.ANY_HEADER` which will categorize requests based on `ANY_HEADER` that you provide.
|
||||
|
||||
## Servers
|
||||
|
||||
Servers are simply defined using a `URL`. You can also apply a custom `weight` to each server (this will be used by load-balacning).
|
||||
|
||||
@@ -1,70 +1,212 @@
|
||||
# Benchmarks
|
||||
|
||||
Here are some early Benchmarks between Nginx, HA-Proxy and Træfɪk acting as simple load balancers between two servers.
|
||||
## Configuration
|
||||
|
||||
- Nginx:
|
||||
I would like to thanks [vincentbernat](https://github.com/vincentbernat) from [exoscale.ch](https://www.exoscale.ch) who kindly provided the infrastructure needed for the benchmarks.
|
||||
|
||||
```sh
|
||||
$ docker run -d -e VIRTUAL_HOST=test.nginx.localhost emilevauge/whoami
|
||||
$ docker run -d -e VIRTUAL_HOST=test.nginx.localhost emilevauge/whoami
|
||||
$ docker run --log-driver=none -d -p 80:80 -v /var/run/docker.sock:/tmp/docker.sock:ro jwilder/nginx-proxy
|
||||
$ wrk -t12 -c400 -d60s -H "Host: test.nginx.localhost" --latency http://127.0.0.1:80
|
||||
Running 1m test @ http://127.0.0.1:80
|
||||
12 threads and 400 connections
|
||||
Thread Stats Avg Stdev Max +/- Stdev
|
||||
Latency 162.61ms 203.34ms 1.72s 91.07%
|
||||
Req/Sec 277.57 107.67 790.00 67.53%
|
||||
Latency Distribution
|
||||
50% 128.19ms
|
||||
75% 218.22ms
|
||||
90% 342.12ms
|
||||
99% 1.08s
|
||||
197991 requests in 1.00m, 82.32MB read
|
||||
Socket errors: connect 0, read 0, write 0, timeout 18
|
||||
Requests/sec: 3296.04
|
||||
Transfer/sec: 1.37MB
|
||||
I used 4 VMs for the tests with the following configuration:
|
||||
|
||||
- 32 GB RAM
|
||||
- 8 CPU Cores
|
||||
- 10 GB SSD
|
||||
- Ubuntu 14.04 LTS 64-bit
|
||||
|
||||
## Setup
|
||||
|
||||
1. One VM used to launch the benchmarking tool [wrk](https://github.com/wg/wrk)
|
||||
2. One VM for traefik (v1.0.0-beta.416) / nginx (v1.4.6)
|
||||
3. Two VMs for 2 backend servers in go [whoami](https://github.com/emilevauge/whoamI/)
|
||||
|
||||
Each VM has been tuned using the following limits:
|
||||
|
||||
```bash
|
||||
sysctl -w fs.file-max="9999999"
|
||||
sysctl -w fs.nr_open="9999999"
|
||||
sysctl -w net.core.netdev_max_backlog="4096"
|
||||
sysctl -w net.core.rmem_max="16777216"
|
||||
sysctl -w net.core.somaxconn="65535"
|
||||
sysctl -w net.core.wmem_max="16777216"
|
||||
sysctl -w net.ipv4.ip_local_port_range="1025 65535"
|
||||
sysctl -w net.ipv4.tcp_fin_timeout="30"
|
||||
sysctl -w net.ipv4.tcp_keepalive_time="30"
|
||||
sysctl -w net.ipv4.tcp_max_syn_backlog="20480"
|
||||
sysctl -w net.ipv4.tcp_max_tw_buckets="400000"
|
||||
sysctl -w net.ipv4.tcp_no_metrics_save="1"
|
||||
sysctl -w net.ipv4.tcp_syn_retries="2"
|
||||
sysctl -w net.ipv4.tcp_synack_retries="2"
|
||||
sysctl -w net.ipv4.tcp_tw_recycle="1"
|
||||
sysctl -w net.ipv4.tcp_tw_reuse="1"
|
||||
sysctl -w vm.min_free_kbytes="65536"
|
||||
sysctl -w vm.overcommit_memory="1"
|
||||
ulimit -n 9999999
|
||||
```
|
||||
|
||||
- HA-Proxy:
|
||||
### Nginx
|
||||
|
||||
```sh
|
||||
$ docker run -d --name web1 -e VIRTUAL_HOST=test.haproxy.localhost emilevauge/whoami
|
||||
$ docker run -d --name web2 -e VIRTUAL_HOST=test.haproxy.localhost emilevauge/whoami
|
||||
$ docker run -d -p 80:80 --link web1:web1 --link web2:web2 dockercloud/haproxy
|
||||
$ wrk -t12 -c400 -d60s -H "Host: test.haproxy.localhost" --latency http://127.0.0.1:80
|
||||
Running 1m test @ http://127.0.0.1:80
|
||||
12 threads and 400 connections
|
||||
Thread Stats Avg Stdev Max +/- Stdev
|
||||
Latency 158.08ms 187.88ms 1.75s 89.61%
|
||||
Req/Sec 281.33 120.47 0.98k 65.88%
|
||||
Latency Distribution
|
||||
50% 121.77ms
|
||||
75% 227.10ms
|
||||
90% 351.98ms
|
||||
99% 1.01s
|
||||
200462 requests in 1.00m, 59.65MB read
|
||||
Requests/sec: 3337.66
|
||||
Transfer/sec: 0.99MB
|
||||
Here is the config Nginx file use `/etc/nginx/nginx.conf`:
|
||||
|
||||
```
|
||||
user www-data;
|
||||
worker_processes auto;
|
||||
worker_rlimit_nofile 200000;
|
||||
pid /var/run/nginx.pid;
|
||||
|
||||
events {
|
||||
worker_connections 10000;
|
||||
use epoll;
|
||||
multi_accept on;
|
||||
}
|
||||
|
||||
http {
|
||||
sendfile on;
|
||||
tcp_nopush on;
|
||||
tcp_nodelay on;
|
||||
keepalive_timeout 300;
|
||||
keepalive_requests 10000;
|
||||
types_hash_max_size 2048;
|
||||
|
||||
open_file_cache max=200000 inactive=300s;
|
||||
open_file_cache_valid 300s;
|
||||
open_file_cache_min_uses 2;
|
||||
open_file_cache_errors on;
|
||||
|
||||
server_tokens off;
|
||||
dav_methods off;
|
||||
|
||||
include /etc/nginx/mime.types;
|
||||
default_type application/octet-stream;
|
||||
|
||||
access_log /var/log/nginx/access.log combined;
|
||||
error_log /var/log/nginx/error.log warn;
|
||||
|
||||
gzip off;
|
||||
gzip_vary off;
|
||||
|
||||
include /etc/nginx/conf.d/*.conf;
|
||||
include /etc/nginx/sites-enabled/*.conf;
|
||||
}
|
||||
```
|
||||
|
||||
- Træfɪk:
|
||||
Here is the Nginx vhost file used:
|
||||
|
||||
```sh
|
||||
$ docker run -d -l traefik.backend=test1 -l traefik.frontend.rule=Host -l traefik.frontend.value=test.traefik.localhost emilevauge/whoami
|
||||
$ docker run -d -l traefik.backend=test1 -l traefik.frontend.rule=Host -l traefik.frontend.value=test.traefik.localhost emilevauge/whoami
|
||||
$ docker run -d -p 8080:8080 -p 80:80 -v $PWD/traefik.toml:/traefik.toml -v /var/run/docker.sock:/var/run/docker.sock traefik
|
||||
$ wrk -t12 -c400 -d60s -H "Host: test.traefik.localhost" --latency http://127.0.0.1:80
|
||||
Running 1m test @ http://127.0.0.1:80
|
||||
12 threads and 400 connections
|
||||
```
|
||||
upstream whoami {
|
||||
server IP-whoami1:80;
|
||||
server IP-whoami2:80;
|
||||
keepalive 300;
|
||||
}
|
||||
|
||||
server {
|
||||
listen 8001;
|
||||
server_name test.traefik;
|
||||
access_log off;
|
||||
error_log /dev/null crit;
|
||||
if ($host != "test.traefik") {
|
||||
return 404;
|
||||
}
|
||||
location / {
|
||||
proxy_pass http://whoami;
|
||||
proxy_http_version 1.1;
|
||||
proxy_set_header Connection "";
|
||||
proxy_set_header X-Forwarded-Host $host;
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
### Traefik
|
||||
|
||||
Here is the `traefik.toml` file used:
|
||||
|
||||
```
|
||||
MaxIdleConnsPerHost = 100000
|
||||
defaultEntryPoints = ["http"]
|
||||
|
||||
[entryPoints]
|
||||
[entryPoints.http]
|
||||
address = ":8000"
|
||||
|
||||
[file]
|
||||
[backends]
|
||||
[backends.backend1]
|
||||
[backends.backend1.servers.server1]
|
||||
url = "http://IP-whoami1:80"
|
||||
weight = 1
|
||||
[backends.backend1.servers.server2]
|
||||
url = "http://IP-whoami2:80"
|
||||
weight = 1
|
||||
|
||||
[frontends]
|
||||
[frontends.frontend1]
|
||||
backend = "backend1"
|
||||
[frontends.frontend1.routes.test_1]
|
||||
rule = "Host: test.traefik"
|
||||
```
|
||||
|
||||
## Results
|
||||
|
||||
### whoami:
|
||||
```
|
||||
wrk -t8 -c1000 -d60s -H "Host: test.traefik" --latency http://IP-whoami:80/bench
|
||||
Running 1m test @ http://IP-whoami:80/bench
|
||||
20 threads and 1000 connections
|
||||
Thread Stats Avg Stdev Max +/- Stdev
|
||||
Latency 132.93ms 121.89ms 1.20s 66.62%
|
||||
Req/Sec 280.95 104.88 740.00 68.26%
|
||||
Latency 70.28ms 134.72ms 1.91s 89.94%
|
||||
Req/Sec 2.92k 742.42 8.78k 68.80%
|
||||
Latency Distribution
|
||||
50% 128.71ms
|
||||
75% 214.15ms
|
||||
90% 281.45ms
|
||||
99% 498.44ms
|
||||
200734 requests in 1.00m, 80.02MB read
|
||||
Requests/sec: 3340.13
|
||||
Transfer/sec: 1.33MB
|
||||
```
|
||||
50% 10.63ms
|
||||
75% 75.64ms
|
||||
90% 205.65ms
|
||||
99% 668.28ms
|
||||
3476705 requests in 1.00m, 384.61MB read
|
||||
Socket errors: connect 0, read 0, write 0, timeout 103
|
||||
Requests/sec: 57894.35
|
||||
Transfer/sec: 6.40MB
|
||||
```
|
||||
|
||||
### nginx:
|
||||
```
|
||||
wrk -t20 -c1000 -d60s -H "Host: test.traefik" --latency http://IP-nginx:8001/bench
|
||||
Running 1m test @ http://IP-nginx:8001/bench
|
||||
20 threads and 1000 connections
|
||||
Thread Stats Avg Stdev Max +/- Stdev
|
||||
Latency 101.25ms 180.09ms 1.99s 89.34%
|
||||
Req/Sec 1.69k 567.69 9.39k 72.62%
|
||||
Latency Distribution
|
||||
50% 15.46ms
|
||||
75% 129.11ms
|
||||
90% 302.44ms
|
||||
99% 846.59ms
|
||||
2018427 requests in 1.00m, 298.36MB read
|
||||
Socket errors: connect 0, read 0, write 0, timeout 90
|
||||
Requests/sec: 33591.67
|
||||
Transfer/sec: 4.97MB
|
||||
```
|
||||
|
||||
### traefik:
|
||||
```
|
||||
wrk -t8 -c1000 -d60s -H "Host: test.traefik" --latency http://IP-traefik:8000/bench
|
||||
Running 1m test @ http://IP-traefik:8000/bench
|
||||
20 threads and 1000 connections
|
||||
Thread Stats Avg Stdev Max +/- Stdev
|
||||
Latency 91.72ms 150.43ms 2.00s 90.50%
|
||||
Req/Sec 1.43k 266.37 2.97k 69.77%
|
||||
Latency Distribution
|
||||
50% 19.74ms
|
||||
75% 121.98ms
|
||||
90% 237.39ms
|
||||
99% 687.49ms
|
||||
1705073 requests in 1.00m, 188.63MB read
|
||||
Socket errors: connect 0, read 0, write 0, timeout 7
|
||||
Requests/sec: 28392.44
|
||||
Transfer/sec: 3.14MB
|
||||
```
|
||||
|
||||
## Conclusion
|
||||
|
||||
Traefik is obviously slower than Nginx, but not so much: Traefik can serve 28392 requests/sec and Nginx 33591 requests/sec which gives a ratio of 85%.
|
||||
Not bad for young project :) !
|
||||
|
||||
Some areas of possible improvements:
|
||||
- Use [GO_REUSEPORT](https://github.com/kavu/go_reuseport) listener
|
||||
- Run a separate server instance per CPU core with `GOMAXPROCS=1` (it appears during benchmarks that there is a lot more context switches with traefik than with nginx)
|
||||
|
||||
|
||||
27
docs/toml.md
27
docs/toml.md
@@ -138,7 +138,7 @@
|
||||
# storageFile = "acme.json"
|
||||
|
||||
# Entrypoint to proxy acme challenge to.
|
||||
# WARNING, must point to an entrypoint on port 443
|
||||
# WARNING, must point to an entrypoint on port 443
|
||||
#
|
||||
# Required
|
||||
#
|
||||
@@ -218,6 +218,9 @@ defaultEntryPoints = ["http", "https"]
|
||||
url = "http://172.17.0.3:80"
|
||||
weight = 1
|
||||
[backends.backend2]
|
||||
[backends.backend1.maxconn]
|
||||
amount = 10
|
||||
extractorfunc = "request.host"
|
||||
[backends.backend2.LoadBalancer]
|
||||
method = "drr"
|
||||
[backends.backend2.servers.server1]
|
||||
@@ -281,6 +284,9 @@ filename = "rules.toml"
|
||||
url = "http://172.17.0.3:80"
|
||||
weight = 1
|
||||
[backends.backend2]
|
||||
[backends.backend1.maxconn]
|
||||
amount = 10
|
||||
extractorfunc = "request.host"
|
||||
[backends.backend2.LoadBalancer]
|
||||
method = "drr"
|
||||
[backends.backend2.servers.server1]
|
||||
@@ -675,11 +681,26 @@ endpoint = "127.0.0.1:8500"
|
||||
# Optional
|
||||
#
|
||||
domain = "consul.localhost"
|
||||
|
||||
# Prefix for Consul catalog tags
|
||||
#
|
||||
# Optional
|
||||
#
|
||||
prefix = "traefik"
|
||||
```
|
||||
|
||||
This backend will create routes matching on hostname based on the service name
|
||||
used in consul.
|
||||
|
||||
Additional settings can be defined using Consul Catalog tags:
|
||||
- ```traefik.enable=false```: disable this container in Træfɪk
|
||||
- ```traefik.protocol=https```: override the default `http` protocol
|
||||
- ```traefik.backend.weight=10```: assign this weight to the container
|
||||
- ```traefik.backend.circuitbreaker=NetworkErrorRatio() > 0.5```
|
||||
- ```traefik.backend.loadbalancer=drr```: override the default load balancing mode
|
||||
- ```traefik.frontend.rule=Host:test.traefik.io```: override the default frontend rule (Default: `Host:{containerName}.{domain}`). See [frontends](#frontends).
|
||||
- ```traefik.frontend.passHostHeader=true```: forward client `Host` header to the backend.
|
||||
- ```traefik.frontend.entryPoints=http,https```: assign this frontend to entry points `http` and `https`. Overrides `defaultEntryPoints`.
|
||||
|
||||
## Etcd backend
|
||||
|
||||
@@ -836,6 +857,8 @@ The Keys-Values structure should look (using `prefix = "/traefik"`):
|
||||
|
||||
| Key | Value |
|
||||
|-----------------------------------------------------|------------------------|
|
||||
| `/traefik/backends/backend2/maxconn/amount` | `10` |
|
||||
| `/traefik/backends/backend2/maxconn/extractorfunc` | `request.host` |
|
||||
| `/traefik/backends/backend2/loadbalancer/method` | `drr` |
|
||||
| `/traefik/backends/backend2/servers/server1/url` | `http://172.17.0.4:80` |
|
||||
| `/traefik/backends/backend2/servers/server1/weight` | `1` |
|
||||
@@ -991,4 +1014,4 @@ entryPoint = "https"
|
||||
entrypoints = ["http", "https"] # overrides defaultEntryPoints
|
||||
backend = "backend2"
|
||||
rule = "Path:/test"
|
||||
```
|
||||
```
|
||||
|
||||
@@ -39,7 +39,7 @@ func (s *ConsulCatalogSuite) SetUpSuite(c *check.C) {
|
||||
time.Sleep(2000 * time.Millisecond)
|
||||
}
|
||||
|
||||
func (s *ConsulCatalogSuite) registerService(name string, address string, port int) error {
|
||||
func (s *ConsulCatalogSuite) registerService(name string, address string, port int, tags []string) error {
|
||||
catalog := s.consulClient.Catalog()
|
||||
_, err := catalog.Register(
|
||||
&api.CatalogRegistration{
|
||||
@@ -50,6 +50,7 @@ func (s *ConsulCatalogSuite) registerService(name string, address string, port i
|
||||
Service: name,
|
||||
Address: address,
|
||||
Port: port,
|
||||
Tags: tags,
|
||||
},
|
||||
},
|
||||
&api.WriteOptions{},
|
||||
@@ -93,7 +94,7 @@ func (s *ConsulCatalogSuite) TestSingleService(c *check.C) {
|
||||
|
||||
nginx := s.composeProject.Container(c, "nginx")
|
||||
|
||||
err = s.registerService("test", nginx.NetworkSettings.IPAddress, 80)
|
||||
err = s.registerService("test", nginx.NetworkSettings.IPAddress, 80, []string{})
|
||||
c.Assert(err, checker.IsNil, check.Commentf("Error registering service"))
|
||||
defer s.deregisterService("test", nginx.NetworkSettings.IPAddress)
|
||||
|
||||
|
||||
@@ -16,6 +16,8 @@ import (
|
||||
const (
|
||||
// DefaultWatchWaitTime is the duration to wait when polling consul
|
||||
DefaultWatchWaitTime = 15 * time.Second
|
||||
// DefaultConsulCatalogTagPrefix is a prefix for additional service/node configurations
|
||||
DefaultConsulCatalogTagPrefix = "traefik"
|
||||
)
|
||||
|
||||
// ConsulCatalog holds configurations of the Consul catalog provider.
|
||||
@@ -24,10 +26,16 @@ type ConsulCatalog struct {
|
||||
Endpoint string
|
||||
Domain string
|
||||
client *api.Client
|
||||
Prefix string
|
||||
}
|
||||
|
||||
type serviceUpdate struct {
|
||||
ServiceName string
|
||||
Attributes []string
|
||||
}
|
||||
|
||||
type catalogUpdate struct {
|
||||
Service string
|
||||
Service *serviceUpdate
|
||||
Nodes []*api.ServiceEntry
|
||||
}
|
||||
|
||||
@@ -79,41 +87,76 @@ func (provider *ConsulCatalog) healthyNodes(service string) (catalogUpdate, erro
|
||||
return catalogUpdate{}, err
|
||||
}
|
||||
|
||||
set := map[string]bool{}
|
||||
tags := []string{}
|
||||
for _, node := range data {
|
||||
for _, tag := range node.Service.Tags {
|
||||
if _, ok := set[tag]; ok == false {
|
||||
set[tag] = true
|
||||
tags = append(tags, tag)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return catalogUpdate{
|
||||
Service: service,
|
||||
Nodes: data,
|
||||
Service: &serviceUpdate{
|
||||
ServiceName: service,
|
||||
Attributes: tags,
|
||||
},
|
||||
Nodes: data,
|
||||
}, nil
|
||||
}
|
||||
|
||||
func (provider *ConsulCatalog) getEntryPoints(list string) []string {
|
||||
return strings.Split(list, ",")
|
||||
}
|
||||
|
||||
func (provider *ConsulCatalog) getBackend(node *api.ServiceEntry) string {
|
||||
return strings.ToLower(node.Service.Service)
|
||||
}
|
||||
|
||||
func (provider *ConsulCatalog) getFrontendValue(service string) string {
|
||||
return "Host:" + service + "." + provider.Domain
|
||||
func (provider *ConsulCatalog) getFrontendRule(service serviceUpdate) string {
|
||||
customFrontendRule := provider.getAttribute("frontend.rule", service.Attributes, "")
|
||||
if customFrontendRule != "" {
|
||||
return customFrontendRule
|
||||
}
|
||||
return "Host:" + service.ServiceName + "." + provider.Domain
|
||||
}
|
||||
|
||||
func (provider *ConsulCatalog) getAttribute(name string, tags []string, defaultValue string) string {
|
||||
for _, tag := range tags {
|
||||
if strings.Index(tag, DefaultConsulCatalogTagPrefix+".") == 0 {
|
||||
if kv := strings.SplitN(tag[len(DefaultConsulCatalogTagPrefix+"."):], "=", 2); len(kv) == 2 && kv[0] == name {
|
||||
return kv[1]
|
||||
}
|
||||
}
|
||||
}
|
||||
return defaultValue
|
||||
}
|
||||
|
||||
func (provider *ConsulCatalog) buildConfig(catalog []catalogUpdate) *types.Configuration {
|
||||
var FuncMap = template.FuncMap{
|
||||
"getBackend": provider.getBackend,
|
||||
"getFrontendValue": provider.getFrontendValue,
|
||||
"replace": replace,
|
||||
"getBackend": provider.getBackend,
|
||||
"getFrontendRule": provider.getFrontendRule,
|
||||
"getAttribute": provider.getAttribute,
|
||||
"getEntryPoints": provider.getEntryPoints,
|
||||
"replace": replace,
|
||||
}
|
||||
|
||||
allNodes := []*api.ServiceEntry{}
|
||||
serviceNames := []string{}
|
||||
services := []*serviceUpdate{}
|
||||
for _, info := range catalog {
|
||||
if len(info.Nodes) > 0 {
|
||||
serviceNames = append(serviceNames, info.Service)
|
||||
services = append(services, info.Service)
|
||||
allNodes = append(allNodes, info.Nodes...)
|
||||
}
|
||||
}
|
||||
|
||||
templateObjects := struct {
|
||||
Services []string
|
||||
Services []*serviceUpdate
|
||||
Nodes []*api.ServiceEntry
|
||||
}{
|
||||
Services: serviceNames,
|
||||
Services: services,
|
||||
Nodes: allNodes,
|
||||
}
|
||||
|
||||
|
||||
@@ -14,17 +14,68 @@ func TestConsulCatalogGetFrontendRule(t *testing.T) {
|
||||
}
|
||||
|
||||
services := []struct {
|
||||
service string
|
||||
service serviceUpdate
|
||||
expected string
|
||||
}{
|
||||
{
|
||||
service: "foo",
|
||||
service: serviceUpdate{
|
||||
ServiceName: "foo",
|
||||
Attributes: []string{},
|
||||
},
|
||||
expected: "Host:foo.localhost",
|
||||
},
|
||||
{
|
||||
service: serviceUpdate{
|
||||
ServiceName: "foo",
|
||||
Attributes: []string{
|
||||
"traefik.frontend.rule=Host:*.example.com",
|
||||
},
|
||||
},
|
||||
expected: "Host:*.example.com",
|
||||
},
|
||||
}
|
||||
|
||||
for _, e := range services {
|
||||
actual := provider.getFrontendValue(e.service)
|
||||
actual := provider.getFrontendRule(e.service)
|
||||
if actual != e.expected {
|
||||
t.Fatalf("expected %q, got %q", e.expected, actual)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestConsulCatalogGetAttribute(t *testing.T) {
|
||||
provider := &ConsulCatalog{
|
||||
Domain: "localhost",
|
||||
}
|
||||
|
||||
services := []struct {
|
||||
tags []string
|
||||
key string
|
||||
defaultValue string
|
||||
expected string
|
||||
}{
|
||||
{
|
||||
tags: []string{
|
||||
"foo.bar=ramdom",
|
||||
"traefik.backend.weight=42",
|
||||
},
|
||||
key: "backend.weight",
|
||||
defaultValue: "",
|
||||
expected: "42",
|
||||
},
|
||||
{
|
||||
tags: []string{
|
||||
"foo.bar=ramdom",
|
||||
"traefik.backend.wei=42",
|
||||
},
|
||||
key: "backend.weight",
|
||||
defaultValue: "",
|
||||
expected: "",
|
||||
},
|
||||
}
|
||||
|
||||
for _, e := range services {
|
||||
actual := provider.getAttribute(e.key, e.tags, e.defaultValue)
|
||||
if actual != e.expected {
|
||||
t.Fatalf("expected %q, got %q", e.expected, actual)
|
||||
}
|
||||
@@ -49,7 +100,10 @@ func TestConsulCatalogBuildConfig(t *testing.T) {
|
||||
{
|
||||
nodes: []catalogUpdate{
|
||||
{
|
||||
Service: "test",
|
||||
Service: &serviceUpdate{
|
||||
ServiceName: "test",
|
||||
Attributes: []string{},
|
||||
},
|
||||
},
|
||||
},
|
||||
expectedFrontends: map[string]*types.Frontend{},
|
||||
@@ -58,12 +112,26 @@ func TestConsulCatalogBuildConfig(t *testing.T) {
|
||||
{
|
||||
nodes: []catalogUpdate{
|
||||
{
|
||||
Service: "test",
|
||||
Service: &serviceUpdate{
|
||||
ServiceName: "test",
|
||||
Attributes: []string{
|
||||
"traefik.backend.loadbalancer=drr",
|
||||
"traefik.backend.circuitbreaker=NetworkErrorRatio() > 0.5",
|
||||
"random.foo=bar",
|
||||
},
|
||||
},
|
||||
Nodes: []*api.ServiceEntry{
|
||||
{
|
||||
Service: &api.AgentService{
|
||||
Service: "test",
|
||||
Address: "127.0.0.1",
|
||||
Port: 80,
|
||||
Tags: []string{
|
||||
"traefik.backend.weight=42",
|
||||
"random.foo=bar",
|
||||
"traefik.backend.passHostHeader=true",
|
||||
"traefik.protocol=https",
|
||||
},
|
||||
},
|
||||
Node: &api.Node{
|
||||
Node: "localhost",
|
||||
@@ -86,12 +154,17 @@ func TestConsulCatalogBuildConfig(t *testing.T) {
|
||||
expectedBackends: map[string]*types.Backend{
|
||||
"backend-test": {
|
||||
Servers: map[string]types.Server{
|
||||
"server-localhost-80": {
|
||||
URL: "http://127.0.0.1:80",
|
||||
"test--127-0-0-1--80": {
|
||||
URL: "https://127.0.0.1:80",
|
||||
Weight: 42,
|
||||
},
|
||||
},
|
||||
CircuitBreaker: nil,
|
||||
LoadBalancer: nil,
|
||||
CircuitBreaker: &types.CircuitBreaker{
|
||||
Expression: "NetworkErrorRatio() > 0.5",
|
||||
},
|
||||
LoadBalancer: &types.LoadBalancer{
|
||||
Method: "drr",
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
|
||||
@@ -168,3 +168,41 @@ func TestReplace(t *testing.T) {
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestGetConfigurationReturnsCorrectMaxConnConfiguration(t *testing.T) {
|
||||
templateFile, err := ioutil.TempFile("", "provider-configuration")
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
defer os.RemoveAll(templateFile.Name())
|
||||
data := []byte(`[backends]
|
||||
[backends.backend1]
|
||||
[backends.backend1.maxconn]
|
||||
amount = 10
|
||||
extractorFunc = "request.host"`)
|
||||
err = ioutil.WriteFile(templateFile.Name(), data, 0700)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
provider := &myProvider{
|
||||
BaseProvider{
|
||||
Filename: templateFile.Name(),
|
||||
},
|
||||
}
|
||||
configuration, err := provider.getConfiguration(templateFile.Name(), nil, nil)
|
||||
if err != nil {
|
||||
t.Fatalf("Shouldn't have error out, got %v", err)
|
||||
}
|
||||
if configuration == nil {
|
||||
t.Fatalf("Configuration should not be nil, but was")
|
||||
}
|
||||
|
||||
if configuration.Backends["backend1"].MaxConn.Amount != 10 {
|
||||
t.Fatalf("Configuration did not parse MaxConn.Amount properly")
|
||||
}
|
||||
|
||||
if configuration.Backends["backend1"].MaxConn.ExtractorFunc != "request.host" {
|
||||
t.Fatalf("Configuration did not parse MaxConn.ExtractorFunc properly")
|
||||
}
|
||||
}
|
||||
|
||||
14
server.go
14
server.go
@@ -22,9 +22,11 @@ import (
|
||||
log "github.com/Sirupsen/logrus"
|
||||
"github.com/codegangsta/negroni"
|
||||
"github.com/containous/oxy/cbreaker"
|
||||
"github.com/containous/oxy/connlimit"
|
||||
"github.com/containous/oxy/forward"
|
||||
"github.com/containous/oxy/roundrobin"
|
||||
"github.com/containous/oxy/stream"
|
||||
"github.com/containous/oxy/utils"
|
||||
"github.com/containous/traefik/middlewares"
|
||||
"github.com/containous/traefik/provider"
|
||||
"github.com/containous/traefik/safe"
|
||||
@@ -423,6 +425,18 @@ func (server *Server) loadConfig(configurations configs, globalConfiguration Glo
|
||||
}
|
||||
}
|
||||
}
|
||||
maxConns := configuration.Backends[frontend.Backend].MaxConn
|
||||
if maxConns != nil && maxConns.Amount != 0 {
|
||||
extractFunc, err := utils.NewExtractor(maxConns.ExtractorFunc)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
log.Debugf("Creating loadd-balancer connlimit")
|
||||
lb, err = connlimit.New(lb, extractFunc, maxConns.Amount, connlimit.Logger(oxyLogger))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
// retry ?
|
||||
if globalConfiguration.Retry != nil {
|
||||
retries := len(configuration.Backends[frontend.Backend].Servers)
|
||||
|
||||
@@ -1,12 +1,40 @@
|
||||
[backends]{{range .Nodes}}
|
||||
[backends.backend-{{getBackend .}}.servers.server-{{.Node.Node | replace "." "-"}}-{{.Service.Port}}]
|
||||
url = "http://{{.Node.Address}}:{{.Service.Port}}"
|
||||
[backends]
|
||||
{{range .Nodes}}
|
||||
{{if ne (getAttribute "enable" .Service.Tags "true") "false"}}
|
||||
[backends.backend-{{getBackend .}}.servers.{{.Service.Service | replace "." "-"}}--{{.Service.Address | replace "." "-"}}--{{.Service.Port}}]
|
||||
url = "{{getAttribute "protocol" .Service.Tags "http"}}://{{.Service.Address}}:{{.Service.Port}}"
|
||||
{{$weight := getAttribute "backend.weight" .Service.Tags ""}}
|
||||
{{with $weight}}
|
||||
weight = {{$weight}}
|
||||
{{end}}
|
||||
{{end}}
|
||||
{{end}}
|
||||
|
||||
{{range .Services}}
|
||||
{{$service := .ServiceName}}
|
||||
{{$circuitBreaker := getAttribute "backend.circuitbreaker" .Attributes ""}}
|
||||
{{with $circuitBreaker}}
|
||||
[backends.backend-{{$service}}.circuitbreaker]
|
||||
expression = "{{$circuitBreaker}}"
|
||||
{{end}}
|
||||
|
||||
{{$loadBalancer := getAttribute "backend.loadbalancer" .Attributes ""}}
|
||||
{{with $loadBalancer}}
|
||||
[backends.backend-{{$service}}.loadbalancer]
|
||||
method = "{{$loadBalancer}}"
|
||||
{{end}}
|
||||
{{end}}
|
||||
|
||||
[frontends]{{range .Services}}
|
||||
[frontends.frontend-{{.}}]
|
||||
backend = "backend-{{.}}"
|
||||
passHostHeader = false
|
||||
[frontends.frontend-{{.}}.routes.route-host-{{.}}]
|
||||
rule = "{{getFrontendValue .}}"
|
||||
[frontends.frontend-{{.ServiceName}}]
|
||||
backend = "backend-{{.ServiceName}}"
|
||||
passHostHeader = {{getAttribute "frontend.passHostHeader" .Attributes "false"}}
|
||||
{{$entryPoints := getAttribute "frontend.entrypoints" .Attributes ""}}
|
||||
{{with $entryPoints}}
|
||||
entrypoints = [{{range getEntryPoints $entryPoints}}
|
||||
"{{.}}",
|
||||
{{end}}]
|
||||
{{end}}
|
||||
[frontends.frontend-{{.ServiceName}}.routes.route-host-{{.ServiceName}}]
|
||||
rule = "{{getFrontendRule .}}"
|
||||
{{end}}
|
||||
|
||||
@@ -17,6 +17,16 @@
|
||||
method = "{{$loadBalancer}}"
|
||||
{{end}}
|
||||
|
||||
{{$maxConnAmt := Get "" . "/maxconn/" "amount"}}
|
||||
{{$maxConnExtractorFunc := Get "" . "/maxconn/" "extractorfunc"}}
|
||||
{{with $maxConnAmt}}
|
||||
{{with $maxConnExtractorFunc}}
|
||||
[backends.{{Last $backend}}.maxConn]
|
||||
amount = {{$maxConnAmt}}
|
||||
extractorFunc = "{{$maxConnExtractorFunc}}"
|
||||
{{end}}
|
||||
{{end}}
|
||||
|
||||
{{range $servers}}
|
||||
[backends.{{Last $backend}}.servers.{{Last .}}]
|
||||
url = "{{Get "" . "/url"}}"
|
||||
@@ -27,7 +37,7 @@
|
||||
[frontends]{{range $frontends}}
|
||||
{{$frontend := Last .}}
|
||||
{{$entryPoints := SplitGet . "/entrypoints"}}
|
||||
[frontends.{{$frontend}}]
|
||||
[frontends."{{$frontend}}"]
|
||||
backend = "{{Get "" . "/backend"}}"
|
||||
passHostHeader = {{Get "false" . "/passHostHeader"}}
|
||||
entryPoints = [{{range $entryPoints}}
|
||||
@@ -35,7 +45,7 @@
|
||||
{{end}}]
|
||||
{{$routes := List . "/routes/"}}
|
||||
{{range $routes}}
|
||||
[frontends.{{$frontend}}.routes.{{Last .}}]
|
||||
[frontends."{{$frontend}}".routes."{{Last .}}"]
|
||||
rule = "{{Get "" . "/rule"}}"
|
||||
{{end}}
|
||||
{{end}}
|
||||
|
||||
@@ -10,6 +10,13 @@ type Backend struct {
|
||||
Servers map[string]Server `json:"servers,omitempty"`
|
||||
CircuitBreaker *CircuitBreaker `json:"circuitBreaker,omitempty"`
|
||||
LoadBalancer *LoadBalancer `json:"loadBalancer,omitempty"`
|
||||
MaxConn *MaxConn `json:"maxConn,omitempty"`
|
||||
}
|
||||
|
||||
// MaxConn holds maximum connection configuraiton
|
||||
type MaxConn struct {
|
||||
Amount int64 `json:"amount,omitempty"`
|
||||
ExtractorFunc string `json:"extractorFunc,omitempty"`
|
||||
}
|
||||
|
||||
// LoadBalancer holds load balancing configuration.
|
||||
|
||||
Reference in New Issue
Block a user