forked from Ivasoft/traefik
Compare commits
16 Commits
v1.0.alpha
...
v1.0.alpha
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
9830086790 | ||
|
|
8393746e02 | ||
|
|
2314ad9bf9 | ||
|
|
3af21612b6 | ||
|
|
7674a82801 | ||
|
|
d63d2a8a26 | ||
|
|
a458018aa2 | ||
|
|
33cde6aacd | ||
|
|
4ded2682d2 | ||
|
|
4042938556 | ||
|
|
0e683cc535 | ||
|
|
4923da7f4d | ||
|
|
11781087ca | ||
|
|
3063251d43 | ||
|
|
b42b170ad2 | ||
|
|
defbb44b35 |
1
.gitignore
vendored
1
.gitignore
vendored
@@ -8,3 +8,4 @@ traefik.toml
|
||||
*.test
|
||||
vendor/
|
||||
static/
|
||||
glide.lock
|
||||
@@ -16,13 +16,14 @@ It supports several backends ([Docker :whale:](https://www.docker.com/), [Mesos/
|
||||
|
||||
## Features
|
||||
|
||||
- [It's fast](docs/index.md#benchmarks)
|
||||
- No dependency hell, single binary made with go
|
||||
- Simple json Rest API
|
||||
- Simple TOML file configuration
|
||||
- Multiple backends supported: Docker, Mesos/Marathon, Consul, Etcd, and more to come
|
||||
- Watchers for backends, can listen change in backends to apply a new configuration automatically
|
||||
- Hot-reloading of configuration. No need to restart the process
|
||||
- Graceful shutdown http connections during hot-reloads
|
||||
- Graceful shutdown http connections
|
||||
- Circuit breakers on backends
|
||||
- Round Robin, rebalancer load-balancers
|
||||
- Rest Metrics
|
||||
|
||||
1
cmd.go
1
cmd.go
@@ -126,7 +126,6 @@ func init() {
|
||||
traefikCmd.PersistentFlags().StringVar(&arguments.Marathon.Filename, "marathon.filename", "", "Override default configuration template. For advanced users :)")
|
||||
traefikCmd.PersistentFlags().StringVar(&arguments.Marathon.Endpoint, "marathon.endpoint", "http://127.0.0.1:8080", "Marathon server endpoint. You can also specify multiple endpoint for Marathon")
|
||||
traefikCmd.PersistentFlags().StringVar(&arguments.Marathon.Domain, "marathon.domain", "", "Default domain used")
|
||||
traefikCmd.PersistentFlags().StringVar(&arguments.Marathon.NetworkInterface, "marathon.networkInterface", "eth0", "Network interface used to call Marathon web services. Needed in case of multiple network interfaces")
|
||||
|
||||
traefikCmd.PersistentFlags().BoolVar(&arguments.consul, "consul", false, "Enable Consul backend")
|
||||
traefikCmd.PersistentFlags().BoolVar(&arguments.Consul.Watch, "consul.watch", true, "Watch provider")
|
||||
|
||||
174
docs/index.md
174
docs/index.md
@@ -151,7 +151,6 @@ Flags:
|
||||
--marathon.domain string Default domain used
|
||||
--marathon.endpoint string Marathon server endpoint. You can also specify multiple endpoint for Marathon (default "http://127.0.0.1:8080")
|
||||
--marathon.filename string Override default configuration template. For advanced users :)
|
||||
--marathon.networkInterface string Network interface used to call Marathon web services. Needed in case of multiple network interfaces (default "eth0")
|
||||
--marathon.watch Watch provider (default true)
|
||||
--maxIdleConnsPerHost int If non-zero, controls the maximum idle (keep-alive) to keep per-host. If zero, DefaultMaxIdleConnsPerHost is used
|
||||
--providersThrottleDuration duration Backends throttle duration: minimum duration between 2 events from providers before applying a new configuration. It avoids unnecessary reloads if multiples events are sent in a short amount of time. (default 2s)
|
||||
@@ -640,12 +639,6 @@ Træfɪk can be configured to use Marathon as a backend configuration:
|
||||
#
|
||||
endpoint = "http://127.0.0.1:8080"
|
||||
|
||||
# Network interface used to call Marathon web services. Needed in case of multiple network interfaces.
|
||||
# Optional
|
||||
# Default: "eth0"
|
||||
#
|
||||
networkInterface = "eth0"
|
||||
|
||||
# Enable watch Marathon changes
|
||||
#
|
||||
# Optional
|
||||
@@ -1069,128 +1062,71 @@ Note that Træfɪk *will not watch for key changes in the `/traefik_configuratio
|
||||
|
||||
## <a id="benchmarks"></a> Benchmarks
|
||||
|
||||
Here are some early Benchmarks between Nginx and Træfɪk acting as simple load balancers between two servers.
|
||||
Here are some early Benchmarks between Nginx, HA-Proxy and Træfɪk acting as simple load balancers between two servers.
|
||||
|
||||
- Nginx:
|
||||
|
||||
```sh
|
||||
$ docker run -d -e VIRTUAL_HOST=test1.localhost emilevauge/whoami
|
||||
$ docker run -d -e VIRTUAL_HOST=test1.localhost emilevauge/whoami
|
||||
$ docker run -d -e VIRTUAL_HOST=test.nginx.localhost emilevauge/whoami
|
||||
$ docker run -d -e VIRTUAL_HOST=test.nginx.localhost emilevauge/whoami
|
||||
$ docker run --log-driver=none -d -p 80:80 -v /var/run/docker.sock:/tmp/docker.sock:ro jwilder/nginx-proxy
|
||||
$ ab -n 20000 -c 20 -r http://test1.localhost/
|
||||
This is ApacheBench, Version 2.3 <$Revision: 1528965 $>
|
||||
Copyright 1996 Adam Twiss, Zeus Technology Ltd, http://www.zeustech.net/
|
||||
Licensed to The Apache Software Foundation, http://www.apache.org/
|
||||
$ wrk -t12 -c400 -d60s -H "Host: test.nginx.localhost" --latency http://127.0.0.1:80
|
||||
Running 1m test @ http://127.0.0.1:80
|
||||
12 threads and 400 connections
|
||||
Thread Stats Avg Stdev Max +/- Stdev
|
||||
Latency 162.61ms 203.34ms 1.72s 91.07%
|
||||
Req/Sec 277.57 107.67 790.00 67.53%
|
||||
Latency Distribution
|
||||
50% 128.19ms
|
||||
75% 218.22ms
|
||||
90% 342.12ms
|
||||
99% 1.08s
|
||||
197991 requests in 1.00m, 82.32MB read
|
||||
Socket errors: connect 0, read 0, write 0, timeout 18
|
||||
Requests/sec: 3296.04
|
||||
Transfer/sec: 1.37MB
|
||||
```
|
||||
|
||||
Benchmarking test1.localhost (be patient)
|
||||
Completed 2000 requests
|
||||
Completed 4000 requests
|
||||
Completed 6000 requests
|
||||
Completed 8000 requests
|
||||
Completed 10000 requests
|
||||
Completed 12000 requests
|
||||
Completed 14000 requests
|
||||
Completed 16000 requests
|
||||
Completed 18000 requests
|
||||
Completed 20000 requests
|
||||
Finished 20000 requests
|
||||
- HA-Proxy:
|
||||
|
||||
|
||||
Server Software: nginx/1.9.2
|
||||
Server Hostname: test1.localhost
|
||||
Server Port: 80
|
||||
|
||||
Document Path: /
|
||||
Document Length: 287 bytes
|
||||
|
||||
Concurrency Level: 20
|
||||
Time taken for tests: 5.874 seconds
|
||||
Complete requests: 20000
|
||||
Failed requests: 0
|
||||
Total transferred: 8900000 bytes
|
||||
HTML transferred: 5740000 bytes
|
||||
Requests per second: 3404.97 [#/sec] (mean)
|
||||
Time per request: 5.874 [ms] (mean)
|
||||
Time per request: 0.294 [ms] (mean, across all concurrent requests)
|
||||
Transfer rate: 1479.70 [Kbytes/sec] received
|
||||
|
||||
Connection Times (ms)
|
||||
min mean[+/-sd] median max
|
||||
Connect: 0 0 0.1 0 2
|
||||
Processing: 0 6 2.4 6 35
|
||||
Waiting: 0 5 2.3 5 33
|
||||
Total: 0 6 2.4 6 36
|
||||
|
||||
Percentage of the requests served within a certain time (ms)
|
||||
50% 6
|
||||
66% 6
|
||||
75% 7
|
||||
80% 7
|
||||
90% 9
|
||||
95% 10
|
||||
98% 12
|
||||
99% 13
|
||||
100% 36 (longest request)
|
||||
```
|
||||
$ docker run -d --name web1 -e VIRTUAL_HOST=test.haproxy.localhost emilevauge/whoami
|
||||
$ docker run -d --name web2 -e VIRTUAL_HOST=test.haproxy.localhost emilevauge/whoami
|
||||
$ docker run -d -p 80:80 --link web1:web1 --link web2:web2 dockercloud/haproxy
|
||||
$ wrk -t12 -c400 -d60s -H "Host: test.haproxy.localhost" --latency http://127.0.0.1:80
|
||||
Running 1m test @ http://127.0.0.1:80
|
||||
12 threads and 400 connections
|
||||
Thread Stats Avg Stdev Max +/- Stdev
|
||||
Latency 158.08ms 187.88ms 1.75s 89.61%
|
||||
Req/Sec 281.33 120.47 0.98k 65.88%
|
||||
Latency Distribution
|
||||
50% 121.77ms
|
||||
75% 227.10ms
|
||||
90% 351.98ms
|
||||
99% 1.01s
|
||||
200462 requests in 1.00m, 59.65MB read
|
||||
Requests/sec: 3337.66
|
||||
Transfer/sec: 0.99MB
|
||||
```
|
||||
|
||||
- Træfɪk:
|
||||
|
||||
```sh
|
||||
docker run -d -l traefik.backend=test1 -l traefik.frontend.rule=Host -l traefik.frontend.value=test1.docker.localhost emilevauge/whoami
|
||||
docker run -d -l traefik.backend=test1 -l traefik.frontend.rule=Host -l traefik.frontend.value=test1.docker.localhost emilevauge/whoami
|
||||
docker run -d -p 8080:8080 -p 80:80 -v $PWD/traefik.toml:/traefik.toml -v /var/run/docker.sock:/var/run/docker.sock containous/traefik
|
||||
$ ab -n 20000 -c 20 -r http://test1.docker.localhost/
|
||||
This is ApacheBench, Version 2.3 <$Revision: 1528965 $>
|
||||
Copyright 1996 Adam Twiss, Zeus Technology Ltd, http://www.zeustech.net/
|
||||
Licensed to The Apache Software Foundation, http://www.apache.org/
|
||||
|
||||
Benchmarking test1.docker.localhost (be patient)
|
||||
Completed 2000 requests
|
||||
Completed 4000 requests
|
||||
Completed 6000 requests
|
||||
Completed 8000 requests
|
||||
Completed 10000 requests
|
||||
Completed 12000 requests
|
||||
Completed 14000 requests
|
||||
Completed 16000 requests
|
||||
Completed 18000 requests
|
||||
Completed 20000 requests
|
||||
Finished 20000 requests
|
||||
|
||||
|
||||
Server Software: .
|
||||
Server Hostname: test1.docker.localhost
|
||||
Server Port: 80
|
||||
|
||||
Document Path: /
|
||||
Document Length: 312 bytes
|
||||
|
||||
Concurrency Level: 20
|
||||
Time taken for tests: 6.545 seconds
|
||||
Complete requests: 20000
|
||||
Failed requests: 0
|
||||
Total transferred: 8600000 bytes
|
||||
HTML transferred: 6240000 bytes
|
||||
Requests per second: 3055.60 [#/sec] (mean)
|
||||
Time per request: 6.545 [ms] (mean)
|
||||
Time per request: 0.327 [ms] (mean, across all concurrent requests)
|
||||
Transfer rate: 1283.11 [Kbytes/sec] received
|
||||
|
||||
Connection Times (ms)
|
||||
min mean[+/-sd] median max
|
||||
Connect: 0 0 0.2 0 7
|
||||
Processing: 1 6 2.2 6 22
|
||||
Waiting: 1 6 2.1 6 21
|
||||
Total: 1 7 2.2 6 22
|
||||
|
||||
Percentage of the requests served within a certain time (ms)
|
||||
50% 6
|
||||
66% 7
|
||||
75% 8
|
||||
80% 8
|
||||
90% 9
|
||||
95% 10
|
||||
98% 11
|
||||
99% 13
|
||||
100% 22 (longest request)
|
||||
$ docker run -d -l traefik.backend=test1 -l traefik.frontend.rule=Host -l traefik.frontend.value=test.traefik.localhost emilevauge/whoami
|
||||
$ docker run -d -l traefik.backend=test1 -l traefik.frontend.rule=Host -l traefik.frontend.value=test.traefik.localhost emilevauge/whoami
|
||||
$ docker run -d -p 8080:8080 -p 80:80 -v $PWD/traefik.toml:/traefik.toml -v /var/run/docker.sock:/var/run/docker.sock containous/traefik
|
||||
$ wrk -t12 -c400 -d60s -H "Host: test.traefik.localhost" --latency http://127.0.0.1:80
|
||||
Running 1m test @ http://127.0.0.1:80
|
||||
12 threads and 400 connections
|
||||
Thread Stats Avg Stdev Max +/- Stdev
|
||||
Latency 132.93ms 121.89ms 1.20s 66.62%
|
||||
Req/Sec 280.95 104.88 740.00 68.26%
|
||||
Latency Distribution
|
||||
50% 128.71ms
|
||||
75% 214.15ms
|
||||
90% 281.45ms
|
||||
99% 498.44ms
|
||||
200734 requests in 1.00m, 80.02MB read
|
||||
Requests/sec: 3340.13
|
||||
Transfer/sec: 1.33MB
|
||||
```
|
||||
|
||||
@@ -57,7 +57,7 @@ import:
|
||||
- package: github.com/flynn/go-shlex
|
||||
ref: 3f9db97f856818214da2e1057f8ad84803971cff
|
||||
- package: github.com/fsouza/go-dockerclient
|
||||
ref: 0239034d42f665efa17fd77c39f891c2f9f32922
|
||||
ref: a49c8269a6899cae30da1f8a4b82e0ce945f9967
|
||||
- package: github.com/boltdb/bolt
|
||||
ref: 51f99c862475898df9773747d3accd05a7ca33c1
|
||||
- package: gopkg.in/mgo.v2
|
||||
|
||||
40
middlewares/handlerSwitcher.go
Normal file
40
middlewares/handlerSwitcher.go
Normal file
@@ -0,0 +1,40 @@
|
||||
package middlewares
|
||||
|
||||
import (
|
||||
"github.com/gorilla/mux"
|
||||
"net/http"
|
||||
"sync"
|
||||
)
|
||||
|
||||
// HandlerSwitcher allows hot switching of http.ServeMux
|
||||
type HandlerSwitcher struct {
|
||||
handler *mux.Router
|
||||
handlerLock *sync.Mutex
|
||||
}
|
||||
|
||||
// NewHandlerSwitcher builds a new instance of HandlerSwitcher
|
||||
func NewHandlerSwitcher(newHandler *mux.Router) (hs *HandlerSwitcher) {
|
||||
return &HandlerSwitcher{
|
||||
handler: newHandler,
|
||||
handlerLock: &sync.Mutex{},
|
||||
}
|
||||
}
|
||||
|
||||
func (hs *HandlerSwitcher) ServeHTTP(rw http.ResponseWriter, r *http.Request) {
|
||||
hs.handlerLock.Lock()
|
||||
handlerBackup := hs.handler
|
||||
hs.handlerLock.Unlock()
|
||||
handlerBackup.ServeHTTP(rw, r)
|
||||
}
|
||||
|
||||
// GetHandler returns the current http.ServeMux
|
||||
func (hs *HandlerSwitcher) GetHandler() (newHandler *mux.Router) {
|
||||
return hs.handler
|
||||
}
|
||||
|
||||
// UpdateHandler safely updates the current http.ServeMux with a new one
|
||||
func (hs *HandlerSwitcher) UpdateHandler(newHandler *mux.Router) {
|
||||
hs.handlerLock.Lock()
|
||||
hs.handler = newHandler
|
||||
defer hs.handlerLock.Unlock()
|
||||
}
|
||||
@@ -676,7 +676,11 @@ func TestDockerLoadDockerConfig(t *testing.T) {
|
||||
Ports: map[docker.Port][]docker.PortBinding{
|
||||
"80/tcp": {},
|
||||
},
|
||||
IPAddress: "127.0.0.1",
|
||||
Networks: map[string]docker.ContainerNetwork{
|
||||
"bridgde": {
|
||||
IPAddress: "127.0.0.1",
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
@@ -718,7 +722,11 @@ func TestDockerLoadDockerConfig(t *testing.T) {
|
||||
Ports: map[docker.Port][]docker.PortBinding{
|
||||
"80/tcp": {},
|
||||
},
|
||||
IPAddress: "127.0.0.1",
|
||||
Networks: map[string]docker.ContainerNetwork{
|
||||
"bridgde": {
|
||||
IPAddress: "127.0.0.1",
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
@@ -732,7 +740,11 @@ func TestDockerLoadDockerConfig(t *testing.T) {
|
||||
Ports: map[docker.Port][]docker.PortBinding{
|
||||
"80/tcp": {},
|
||||
},
|
||||
IPAddress: "127.0.0.1",
|
||||
Networks: map[string]docker.ContainerNetwork{
|
||||
"bridgde": {
|
||||
IPAddress: "127.0.0.1",
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
|
||||
@@ -35,6 +35,27 @@ type KvTLS struct {
|
||||
InsecureSkipVerify bool
|
||||
}
|
||||
|
||||
func (provider *Kv) watchKv(configurationChan chan<- types.ConfigMessage, prefix string) {
|
||||
for {
|
||||
chanKeys, err := provider.kvclient.WatchTree(provider.Prefix, make(chan struct{}) /* stop chan */)
|
||||
if err != nil {
|
||||
log.Errorf("Failed to WatchTree %s", err)
|
||||
continue
|
||||
}
|
||||
|
||||
for range chanKeys {
|
||||
configuration := provider.loadConfig()
|
||||
if configuration != nil {
|
||||
configurationChan <- types.ConfigMessage{
|
||||
ProviderName: string(provider.storeType),
|
||||
Configuration: configuration,
|
||||
}
|
||||
}
|
||||
}
|
||||
log.Warnf("Intermittent failure to WatchTree KV. Retrying.")
|
||||
}
|
||||
}
|
||||
|
||||
func (provider *Kv) provide(configurationChan chan<- types.ConfigMessage) error {
|
||||
storeConfig := &store.Config{
|
||||
ConnectionTimeout: 30 * time.Second,
|
||||
@@ -80,24 +101,7 @@ func (provider *Kv) provide(configurationChan chan<- types.ConfigMessage) error
|
||||
}
|
||||
provider.kvclient = kv
|
||||
if provider.Watch {
|
||||
stopCh := make(chan struct{})
|
||||
chanKeys, err := kv.WatchTree(provider.Prefix, stopCh)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
go func() {
|
||||
for {
|
||||
<-chanKeys
|
||||
configuration := provider.loadConfig()
|
||||
if configuration != nil {
|
||||
configurationChan <- types.ConfigMessage{
|
||||
ProviderName: string(provider.storeType),
|
||||
Configuration: configuration,
|
||||
}
|
||||
}
|
||||
defer close(stopCh)
|
||||
}
|
||||
}()
|
||||
go provider.watchKv(configurationChan, provider.Prefix)
|
||||
}
|
||||
configuration := provider.loadConfig()
|
||||
configurationChan <- types.ConfigMessage{
|
||||
|
||||
@@ -2,8 +2,10 @@ package provider
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"github.com/containous/traefik/types"
|
||||
"strings"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/docker/libkv/store"
|
||||
"reflect"
|
||||
@@ -231,10 +233,60 @@ func TestKvLast(t *testing.T) {
|
||||
}
|
||||
}
|
||||
|
||||
type KvMock struct {
|
||||
Kv
|
||||
}
|
||||
|
||||
func (provider *KvMock) loadConfig() *types.Configuration {
|
||||
return nil
|
||||
}
|
||||
|
||||
func TestKvWatchTree(t *testing.T) {
|
||||
returnedChans := make(chan chan []*store.KVPair)
|
||||
provider := &KvMock{
|
||||
Kv{
|
||||
kvclient: &Mock{
|
||||
WatchTreeMethod: func() <-chan []*store.KVPair {
|
||||
c := make(chan []*store.KVPair, 10)
|
||||
returnedChans <- c
|
||||
return c
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
configChan := make(chan types.ConfigMessage)
|
||||
go provider.watchKv(configChan, "prefix")
|
||||
|
||||
select {
|
||||
case c1 := <-returnedChans:
|
||||
c1 <- []*store.KVPair{}
|
||||
<-configChan
|
||||
close(c1) // WatchTree chans can close due to error
|
||||
case <-time.After(1 * time.Second):
|
||||
t.Fatalf("Failed to create a new WatchTree chan")
|
||||
}
|
||||
|
||||
select {
|
||||
case c2 := <-returnedChans:
|
||||
c2 <- []*store.KVPair{}
|
||||
<-configChan
|
||||
case <-time.After(1 * time.Second):
|
||||
t.Fatalf("Failed to create a new WatchTree chan")
|
||||
}
|
||||
|
||||
select {
|
||||
case _ = <-configChan:
|
||||
t.Fatalf("configChan should be empty")
|
||||
default:
|
||||
}
|
||||
}
|
||||
|
||||
// Extremely limited mock store so we can test initialization
|
||||
type Mock struct {
|
||||
Error bool
|
||||
KVPairs []*store.KVPair
|
||||
Error bool
|
||||
KVPairs []*store.KVPair
|
||||
WatchTreeMethod func() <-chan []*store.KVPair
|
||||
}
|
||||
|
||||
func (s *Mock) Put(key string, value []byte, opts *store.WriteOptions) error {
|
||||
@@ -269,7 +321,7 @@ func (s *Mock) Watch(key string, stopCh <-chan struct{}) (<-chan *store.KVPair,
|
||||
|
||||
// WatchTree mock
|
||||
func (s *Mock) WatchTree(prefix string, stopCh <-chan struct{}) (<-chan []*store.KVPair, error) {
|
||||
return nil, errors.New("WatchTree not supported")
|
||||
return s.WatchTreeMethod(), nil
|
||||
}
|
||||
|
||||
// NewLock mock
|
||||
|
||||
@@ -17,13 +17,12 @@ import (
|
||||
|
||||
// Marathon holds configuration of the Marathon provider.
|
||||
type Marathon struct {
|
||||
BaseProvider `mapstructure:",squash"`
|
||||
Endpoint string
|
||||
Domain string
|
||||
NetworkInterface string
|
||||
Basic *MarathonBasic
|
||||
TLS *tls.Config
|
||||
marathonClient marathon.Marathon
|
||||
BaseProvider `mapstructure:",squash"`
|
||||
Endpoint string
|
||||
Domain string
|
||||
Basic *MarathonBasic
|
||||
TLS *tls.Config
|
||||
marathonClient marathon.Marathon
|
||||
}
|
||||
|
||||
// MarathonBasic holds basic authentication specific configurations
|
||||
@@ -42,7 +41,7 @@ type lightMarathonClient interface {
|
||||
func (provider *Marathon) Provide(configurationChan chan<- types.ConfigMessage) error {
|
||||
config := marathon.NewDefaultConfig()
|
||||
config.URL = provider.Endpoint
|
||||
config.EventsInterface = provider.NetworkInterface
|
||||
config.EventsTransport = marathon.EventsTransportSSE
|
||||
if provider.Basic != nil {
|
||||
config.HTTPBasicAuthUser = provider.Basic.HTTPBasicAuthUser
|
||||
config.HTTPBasicPassword = provider.Basic.HTTPBasicPassword
|
||||
@@ -61,7 +60,7 @@ func (provider *Marathon) Provide(configurationChan chan<- types.ConfigMessage)
|
||||
update := make(marathon.EventsChannel, 5)
|
||||
if provider.Watch {
|
||||
if err := client.AddEventsListener(update, marathon.EVENTS_APPLICATIONS); err != nil {
|
||||
log.Errorf("Failed to register for subscriptions, %s", err)
|
||||
log.Errorf("Failed to register for events, %s", err)
|
||||
} else {
|
||||
go func() {
|
||||
for {
|
||||
|
||||
@@ -31,9 +31,10 @@ git push --follow-tags -u origin master
|
||||
|
||||
# create docker image emilevauge/traefik (compatibility)
|
||||
docker login -e $DOCKER_EMAIL -u $DOCKER_USER -p $DOCKER_PASS
|
||||
docker push ${REPO,,}:latest
|
||||
docker tag ${REPO,,}:latest ${REPO,,}:${VERSION}
|
||||
docker push ${REPO,,}:${VERSION}
|
||||
docker tag containous/traefik emilevauge/traefik:latest
|
||||
docker push emilevauge/traefik:latest
|
||||
docker tag emilevauge/traefik:latest emilevauge/traefik:${VERSION}
|
||||
docker push emilevauge/traefik:${VERSION}
|
||||
|
||||
cd ..
|
||||
rm -Rf traefik-library-image/
|
||||
|
||||
41
server.go
41
server.go
@@ -48,7 +48,7 @@ type Server struct {
|
||||
|
||||
type serverEntryPoint struct {
|
||||
httpServer *manners.GracefulServer
|
||||
httpRouter *mux.Router
|
||||
httpRouter *middlewares.HandlerSwitcher
|
||||
}
|
||||
|
||||
// NewServer returns an initialized Server.
|
||||
@@ -82,7 +82,7 @@ func (server *Server) Start() {
|
||||
// Stop stops the server
|
||||
func (server *Server) Stop() {
|
||||
for _, serverEntryPoint := range server.serverEntryPoints {
|
||||
serverEntryPoint.httpServer.Close()
|
||||
serverEntryPoint.httpServer.BlockingClose()
|
||||
}
|
||||
server.stopChan <- true
|
||||
}
|
||||
@@ -142,22 +142,23 @@ func (server *Server) listenConfigurations() {
|
||||
server.serverLock.Lock()
|
||||
for newServerEntryPointName, newServerEntryPoint := range newServerEntryPoints {
|
||||
currentServerEntryPoint := server.serverEntryPoints[newServerEntryPointName]
|
||||
server.currentConfigurations = newConfigurations
|
||||
currentServerEntryPoint.httpRouter = newServerEntryPoint.httpRouter
|
||||
oldServer := currentServerEntryPoint.httpServer
|
||||
newsrv, err := server.prepareServer(currentServerEntryPoint.httpRouter, server.globalConfiguration.EntryPoints[newServerEntryPointName], oldServer, server.loggerMiddleware, metrics)
|
||||
if err != nil {
|
||||
log.Fatal("Error preparing server: ", err)
|
||||
}
|
||||
go server.startServer(newsrv, server.globalConfiguration)
|
||||
currentServerEntryPoint.httpServer = newsrv
|
||||
server.serverEntryPoints[newServerEntryPointName] = currentServerEntryPoint
|
||||
time.Sleep(1 * time.Second)
|
||||
if oldServer != nil {
|
||||
log.Info("Stopping old server")
|
||||
oldServer.Close()
|
||||
if currentServerEntryPoint.httpServer == nil {
|
||||
newsrv, err := server.prepareServer(newServerEntryPoint.httpRouter, server.globalConfiguration.EntryPoints[newServerEntryPointName], nil, server.loggerMiddleware, metrics)
|
||||
if err != nil {
|
||||
log.Fatal("Error preparing server: ", err)
|
||||
}
|
||||
go server.startServer(newsrv, server.globalConfiguration)
|
||||
currentServerEntryPoint.httpServer = newsrv
|
||||
currentServerEntryPoint.httpRouter = newServerEntryPoint.httpRouter
|
||||
server.serverEntryPoints[newServerEntryPointName] = currentServerEntryPoint
|
||||
log.Infof("Created new Handler: %p", newServerEntryPoint.httpRouter.GetHandler())
|
||||
} else {
|
||||
handlerSwitcher := currentServerEntryPoint.httpRouter
|
||||
handlerSwitcher.UpdateHandler(newServerEntryPoint.httpRouter.GetHandler())
|
||||
log.Infof("Created new Handler: %p", newServerEntryPoint.httpRouter.GetHandler())
|
||||
}
|
||||
}
|
||||
server.currentConfigurations = newConfigurations
|
||||
server.serverLock.Unlock()
|
||||
} else {
|
||||
log.Error("Error loading new configuration, aborted ", err)
|
||||
@@ -264,7 +265,7 @@ func (server *Server) startServer(srv *manners.GracefulServer, globalConfigurati
|
||||
log.Info("Server stopped")
|
||||
}
|
||||
|
||||
func (server *Server) prepareServer(router *mux.Router, entryPoint *EntryPoint, oldServer *manners.GracefulServer, middlewares ...negroni.Handler) (*manners.GracefulServer, error) {
|
||||
func (server *Server) prepareServer(router http.Handler, entryPoint *EntryPoint, oldServer *manners.GracefulServer, middlewares ...negroni.Handler) (*manners.GracefulServer, error) {
|
||||
log.Info("Preparing server")
|
||||
// middlewares
|
||||
var negroni = negroni.New()
|
||||
@@ -303,7 +304,7 @@ func (server *Server) buildEntryPoints(globalConfiguration GlobalConfiguration)
|
||||
for entryPointName := range globalConfiguration.EntryPoints {
|
||||
router := server.buildDefaultHTTPRouter()
|
||||
serverEntryPoints[entryPointName] = serverEntryPoint{
|
||||
httpRouter: router,
|
||||
httpRouter: middlewares.NewHandlerSwitcher(router),
|
||||
}
|
||||
}
|
||||
return serverEntryPoints
|
||||
@@ -332,7 +333,7 @@ func (server *Server) loadConfig(configurations configs, globalConfiguration Glo
|
||||
if _, ok := serverEntryPoints[entryPointName]; !ok {
|
||||
return nil, errors.New("Undefined entrypoint: " + entryPointName)
|
||||
}
|
||||
newRoute := serverEntryPoints[entryPointName].httpRouter.NewRoute().Name(frontendName)
|
||||
newRoute := serverEntryPoints[entryPointName].httpRouter.GetHandler().NewRoute().Name(frontendName)
|
||||
for routeName, route := range frontend.Routes {
|
||||
log.Debugf("Creating route %s %s:%s", routeName, route.Rule, route.Value)
|
||||
route, err := getRoute(newRoute, route.Rule, route.Value)
|
||||
@@ -443,7 +444,7 @@ func (server *Server) loadEntryPointConfig(entryPointName string, entryPoint *En
|
||||
if len(entryPoint.Redirect.EntryPoint) > 0 {
|
||||
regex = "^(?:https?:\\/\\/)?([\\da-z\\.-]+)(?::\\d+)(.*)$"
|
||||
if server.globalConfiguration.EntryPoints[entryPoint.Redirect.EntryPoint] == nil {
|
||||
return nil, errors.New("Unkown entrypoint " + entryPoint.Redirect.EntryPoint)
|
||||
return nil, errors.New("Unknown entrypoint " + entryPoint.Redirect.EntryPoint)
|
||||
}
|
||||
protocol := "http"
|
||||
if server.globalConfiguration.EntryPoints[entryPoint.Redirect.EntryPoint].TLS != nil {
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
[backends]{{range .Containers}}
|
||||
[backends.backend-{{getBackend .}}.servers.server-{{.Name | replace "/" "" | replace "." "-"}}]
|
||||
url = "{{getProtocol .}}://{{.NetworkSettings.IPAddress}}:{{getPort .}}"
|
||||
url = "{{getProtocol .}}://{{range $i := .NetworkSettings.Networks}}{{if $i}}{{.IPAddress}}{{end}}{{end}}:{{getPort .}}"
|
||||
weight = {{getWeight .}}
|
||||
{{end}}
|
||||
|
||||
|
||||
@@ -206,12 +206,6 @@
|
||||
#
|
||||
# endpoint = "http://127.0.0.1:8080"
|
||||
|
||||
# Network interface used to call Marathon web services. Needed in case of multiple network interfaces.
|
||||
# Optional
|
||||
# Default: "eth0"
|
||||
#
|
||||
# networkInterface = "eth0"
|
||||
|
||||
# Enable watch Marathon changes
|
||||
#
|
||||
# Optional
|
||||
|
||||
Reference in New Issue
Block a user