Compare commits

..

10 Commits

Author SHA1 Message Date
Ludovic Fernandez
bdc0e3bfcf Prepare release v1.6.5 2018-07-10 17:46:04 +02:00
SALLEYRON Julien
f173ff02e3 Add a mutex on local store for HTTPChallenges 2018-07-09 23:28:02 +02:00
SALLEYRON Julien
bacd58ed7b Add logs when error is generated in error handler 2018-07-06 10:32:03 +02:00
Or Tzabary
f323df466d Split the error handling from Consul Catalog (deadlock) 2018-07-05 15:12:03 +02:00
Fabian Beuke
b1836587f2 Update keyFile first/last line comment in kv-config.md 2018-07-04 14:20:03 +02:00
John Yani
dbc3b85cd0 Minor formatting issue in user-guide 2018-06-29 17:02:03 +02:00
Jean-Baptiste Doumenjou
5eda08e9b8 Better support on same prefix at the same level in the KV 2018-06-26 16:18:05 +02:00
Ludovic Fernandez
ec6e46e2cb segment labels: multiple frontends for one backend. 2018-06-22 19:44:03 +02:00
Michael
aa705dd691 Create middleware to be able to handle HTTP pipelining correctly 2018-06-20 09:12:03 +02:00
manu5801
1c3e4124f8 The gandiv5 provider works with wildcard 2018-06-18 09:26:02 +02:00
26 changed files with 687 additions and 350 deletions

View File

@@ -1,5 +1,21 @@
# Change Log
## [v1.6.5](https://github.com/containous/traefik/tree/v1.6.5) (2018-07-09)
[All Commits](https://github.com/containous/traefik/compare/v1.6.4...v1.6.5)
**Bug fixes:**
- **[acme]** Add a mutex on local store for HTTPChallenges ([#3579](https://github.com/containous/traefik/pull/3579) by [Juliens](https://github.com/Juliens))
- **[consulcatalog]** Split the error handling from Consul Catalog (deadlock) ([#3560](https://github.com/containous/traefik/pull/3560) by [ortz](https://github.com/ortz))
- **[docker]** segment labels: multiple frontends for one backend. ([#3511](https://github.com/containous/traefik/pull/3511) by [ldez](https://github.com/ldez))
- **[kv]** Better support on same prefix at the same level in the KV ([#3532](https://github.com/containous/traefik/pull/3532) by [jbdoumenjou](https://github.com/jbdoumenjou))
- **[logs]** Add logs when error is generated in error handler ([#3567](https://github.com/containous/traefik/pull/3567) by [Juliens](https://github.com/Juliens))
- **[middleware]** Create middleware to be able to handle HTTP pipelining correctly ([#3513](https://github.com/containous/traefik/pull/3513) by [mmatur](https://github.com/mmatur))
**Documentation:**
- **[acme]** The gandiv5 provider works with wildcard ([#3506](https://github.com/containous/traefik/pull/3506) by [manu5801](https://github.com/manu5801))
- **[kv]** Update keyFile first/last line comment in kv-config.md ([#3558](https://github.com/containous/traefik/pull/3558) by [madnight](https://github.com/madnight))
- Minor formatting issue in user-guide ([#3546](https://github.com/containous/traefik/pull/3546) by [Vanuan](https://github.com/Vanuan))
## [v1.6.4](https://github.com/containous/traefik/tree/v1.6.4) (2018-06-15)
[All Commits](https://github.com/containous/traefik/compare/v1.6.3...v1.6.4)

8
Gopkg.lock generated
View File

@@ -257,8 +257,8 @@
[[projects]]
name = "github.com/containous/staert"
packages = ["."]
revision = "cc00c303ccbd2491ddc1dccc9eb7ccadd807557e"
version = "v3.1.0"
revision = "66717a0e0ca950c4b6dc8c87b46da0b8495c6e41"
version = "v3.1.1"
[[projects]]
name = "github.com/containous/traefik-extra-service-fabric"
@@ -1217,7 +1217,7 @@
"roundrobin",
"utils"
]
revision = "d5b73186eed4aa34b52748699ad19e90f61d4059"
revision = "c2414f4542f085363f490048da2fbec5e4537eb6"
[[projects]]
name = "github.com/vulcand/predicate"
@@ -1679,6 +1679,6 @@
[solve-meta]
analyzer-name = "dep"
analyzer-version = 1
inputs-digest = "ac06fad81167510635546d4e5500b938d61f2eb999bf04d5520d7967b9621f0d"
inputs-digest = "ad34e6336e6f19b82c52e991d22c5b43b9144ed7dc83d7b17197583ace43f346"
solver-name = "gps-cdcl"
solver-version = 1

View File

@@ -62,7 +62,7 @@
[[constraint]]
name = "github.com/containous/staert"
version = "3.1.0"
version = "3.1.1"
[[constraint]]
name = "github.com/containous/traefik-extra-service-fabric"

View File

@@ -175,7 +175,7 @@ func runCmd(globalConfiguration *configuration.GlobalConfiguration, configFile s
log.Debugf("Global configuration loaded %s", string(jsonConf))
if acme.IsEnabled() {
store := acme.NewLocalStore(acme.Get().Storage)
acme.Get().Store = &store
acme.Get().Store = store
}
svr := server.NewServer(*globalConfiguration, configuration.NewProviderAggregator(globalConfiguration))
if acme.IsEnabled() && acme.Get().OnHostRule {

View File

@@ -190,7 +190,7 @@ Here is a list of supported `provider`s, that can automate the DNS verification,
| [Exoscale](https://www.exoscale.ch) | `exoscale` | `EXOSCALE_API_KEY`, `EXOSCALE_API_SECRET`, `EXOSCALE_ENDPOINT` | YES |
| [Fast DNS](https://www.akamai.com/) | `fastdns` | `AKAMAI_CLIENT_TOKEN`, `AKAMAI_CLIENT_SECRET`, `AKAMAI_ACCESS_TOKEN` | Not tested yet |
| [Gandi](https://www.gandi.net) | `gandi` | `GANDI_API_KEY` | Not tested yet |
| [Gandi V5](http://doc.livedns.gandi.net) | `gandiv5` | `GANDIV5_API_KEY` | Not tested yet |
| [Gandi V5](http://doc.livedns.gandi.net) | `gandiv5` | `GANDIV5_API_KEY` | YES |
| [Glesys](https://glesys.com/) | `glesys` | `GLESYS_API_USER`, `GLESYS_API_KEY`, `GLESYS_DOMAIN` | Not tested yet |
| [GoDaddy](https://godaddy.com/domains) | `godaddy` | `GODADDY_API_KEY`, `GODADDY_API_SECRET` | Not tested yet |
| [Google Cloud DNS](https://cloud.google.com/dns/docs/) | `gcloud` | `GCE_PROJECT`, `GCE_SERVICE_ACCOUNT_FILE` | YES |

View File

@@ -85,9 +85,9 @@ defaultEntryPoints = ["http", "https"]
certFile = """-----BEGIN CERTIFICATE-----
<cert file content>
-----END CERTIFICATE-----"""
keyFile = """-----BEGIN CERTIFICATE-----
keyFile = """-----BEGIN PRIVATE KEY-----
<key file content>
-----END CERTIFICATE-----"""
-----END PRIVATE KEY-----"""
[entryPoints.other-https]
address = ":4443"
[entryPoints.other-https.tls]

View File

@@ -102,7 +102,7 @@ Let's explain this command:
| `--mount type=bind,source=/var/run/docker.sock,target=/var/run/docker.sock` | we bind mount the docker socket where Træfik is scheduled to be able to speak to the daemon. |
| `--network traefik-net` | we attach the Træfik service (and thus the underlying container) to the `traefik-net` network. |
| `--docker` | enable docker provider, and `--docker.swarmMode` to enable the swarm mode on Træfik. |
| `--api | activate the webUI on port 8080 |
| `--api` | activate the webUI on port 8080 |
## Deploy your apps

View File

@@ -0,0 +1,62 @@
package pipelining
import (
"bufio"
"net"
"net/http"
)
// Pipelining returns a middleware
type Pipelining struct {
next http.Handler
}
// NewPipelining returns a new Pipelining instance
func NewPipelining(next http.Handler) *Pipelining {
return &Pipelining{
next: next,
}
}
func (p *Pipelining) ServeHTTP(rw http.ResponseWriter, r *http.Request) {
// https://github.com/golang/go/blob/3d59583836630cf13ec4bfbed977d27b1b7adbdc/src/net/http/server.go#L201-L218
if r.Method == http.MethodPut || r.Method == http.MethodPost {
p.next.ServeHTTP(rw, r)
} else {
p.next.ServeHTTP(&writerWithoutCloseNotify{rw}, r)
}
}
// writerWithoutCloseNotify helps to disable closeNotify
type writerWithoutCloseNotify struct {
W http.ResponseWriter
}
// Header returns the response headers.
func (w *writerWithoutCloseNotify) Header() http.Header {
return w.W.Header()
}
// Write writes the data to the connection as part of an HTTP reply.
func (w *writerWithoutCloseNotify) Write(buf []byte) (int, error) {
return w.W.Write(buf)
}
// WriteHeader sends an HTTP response header with the provided
// status code.
func (w *writerWithoutCloseNotify) WriteHeader(code int) {
w.W.WriteHeader(code)
}
// Flush sends any buffered data to the client.
func (w *writerWithoutCloseNotify) Flush() {
if f, ok := w.W.(http.Flusher); ok {
f.Flush()
}
}
// Hijack hijacks the connection.
func (w *writerWithoutCloseNotify) Hijack() (net.Conn, *bufio.ReadWriter, error) {
return w.W.(http.Hijacker).Hijack()
}

View File

@@ -0,0 +1,69 @@
package pipelining
import (
"net/http"
"net/http/httptest"
"testing"
"github.com/stretchr/testify/assert"
)
type recorderWithCloseNotify struct {
*httptest.ResponseRecorder
}
func (r *recorderWithCloseNotify) CloseNotify() <-chan bool {
panic("implement me")
}
func TestNewPipelining(t *testing.T) {
testCases := []struct {
desc string
HTTPMethod string
implementCloseNotifier bool
}{
{
desc: "should not implement CloseNotifier with GET method",
HTTPMethod: http.MethodGet,
implementCloseNotifier: false,
},
{
desc: "should implement CloseNotifier with PUT method",
HTTPMethod: http.MethodPut,
implementCloseNotifier: true,
},
{
desc: "should implement CloseNotifier with POST method",
HTTPMethod: http.MethodPost,
implementCloseNotifier: true,
},
{
desc: "should not implement CloseNotifier with GET method",
HTTPMethod: http.MethodHead,
implementCloseNotifier: false,
},
{
desc: "should not implement CloseNotifier with PROPFIND method",
HTTPMethod: "PROPFIND",
implementCloseNotifier: false,
},
}
for _, test := range testCases {
test := test
t.Run(test.desc, func(t *testing.T) {
t.Parallel()
nextHandler := http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
_, ok := w.(http.CloseNotifier)
assert.Equal(t, test.implementCloseNotifier, ok)
w.WriteHeader(http.StatusOK)
})
handler := NewPipelining(nextHandler)
req := httptest.NewRequest(test.HTTPMethod, "http://localhost", nil)
handler.ServeHTTP(&recorderWithCloseNotify{httptest.NewRecorder()}, req)
})
}
}

View File

@@ -34,15 +34,9 @@ func getTokenValue(token, domain string, store Store) []byte {
var result []byte
operation := func() error {
var ok bool
httpChallenges, err := store.GetHTTPChallenges()
if err != nil {
return fmt.Errorf("HTTPChallenges not available : %s", err)
}
if result, ok = httpChallenges[token][domain]; !ok {
return fmt.Errorf("cannot find challenge for token %v", token)
}
return nil
var err error
result, err = store.GetHTTPChallengeToken(token, domain)
return err
}
notify := func(err error, time time.Duration) {
@@ -60,40 +54,9 @@ func getTokenValue(token, domain string, store Store) []byte {
}
func presentHTTPChallenge(domain, token, keyAuth string, store Store) error {
httpChallenges, err := store.GetHTTPChallenges()
if err != nil {
return fmt.Errorf("unable to get HTTPChallenges : %s", err)
}
if httpChallenges == nil {
httpChallenges = map[string]map[string][]byte{}
}
if _, ok := httpChallenges[token]; !ok {
httpChallenges[token] = map[string][]byte{}
}
httpChallenges[token][domain] = []byte(keyAuth)
return store.SaveHTTPChallenges(httpChallenges)
return store.SetHTTPChallengeToken(token, domain, []byte(keyAuth))
}
func cleanUpHTTPChallenge(domain, token string, store Store) error {
httpChallenges, err := store.GetHTTPChallenges()
if err != nil {
return fmt.Errorf("unable to get HTTPChallenges : %s", err)
}
log.Debugf("Challenge CleanUp for domain %s", domain)
if _, ok := httpChallenges[token]; ok {
if _, domainOk := httpChallenges[token][domain]; domainOk {
delete(httpChallenges[token], domain)
}
if len(httpChallenges[token]) == 0 {
delete(httpChallenges, token)
}
return store.SaveHTTPChallenges(httpChallenges)
}
return nil
return store.RemoveHTTPChallengeToken(token, domain)
}

View File

@@ -2,9 +2,11 @@ package acme
import (
"encoding/json"
"fmt"
"io/ioutil"
"os"
"regexp"
"sync"
"github.com/containous/traefik/log"
"github.com/containous/traefik/safe"
@@ -17,11 +19,12 @@ type LocalStore struct {
filename string
storedData *StoredData
SaveDataChan chan *StoredData `json:"-"`
lock sync.RWMutex
}
// NewLocalStore initializes a new LocalStore with a file name
func NewLocalStore(filename string) LocalStore {
store := LocalStore{filename: filename, SaveDataChan: make(chan *StoredData)}
func NewLocalStore(filename string) *LocalStore {
store := &LocalStore{filename: filename, SaveDataChan: make(chan *StoredData)}
store.listenSaveAction()
return store
}
@@ -149,13 +152,59 @@ func (s *LocalStore) SaveCertificates(certificates []*Certificate) error {
return nil
}
// GetHTTPChallenges returns ACME HTTP Challenges list
func (s *LocalStore) GetHTTPChallenges() (map[string]map[string][]byte, error) {
return s.storedData.HTTPChallenges, nil
// GetHTTPChallengeToken Get the http challenge token from the store
func (s *LocalStore) GetHTTPChallengeToken(token, domain string) ([]byte, error) {
s.lock.RLock()
defer s.lock.RUnlock()
if s.storedData.HTTPChallenges == nil {
s.storedData.HTTPChallenges = map[string]map[string][]byte{}
}
if _, ok := s.storedData.HTTPChallenges[token]; !ok {
return nil, fmt.Errorf("cannot find challenge for token %v", token)
}
result, ok := s.storedData.HTTPChallenges[token][domain]
if !ok {
return nil, fmt.Errorf("cannot find challenge for token %v", token)
}
return result, nil
}
// SaveHTTPChallenges stores ACME HTTP Challenges list
func (s *LocalStore) SaveHTTPChallenges(httpChallenges map[string]map[string][]byte) error {
s.storedData.HTTPChallenges = httpChallenges
// SetHTTPChallengeToken Set the http challenge token in the store
func (s *LocalStore) SetHTTPChallengeToken(token, domain string, keyAuth []byte) error {
s.lock.Lock()
defer s.lock.Unlock()
if s.storedData.HTTPChallenges == nil {
s.storedData.HTTPChallenges = map[string]map[string][]byte{}
}
if _, ok := s.storedData.HTTPChallenges[token]; !ok {
s.storedData.HTTPChallenges[token] = map[string][]byte{}
}
s.storedData.HTTPChallenges[token][domain] = []byte(keyAuth)
return nil
}
// RemoveHTTPChallengeToken Remove the http challenge token in the store
func (s *LocalStore) RemoveHTTPChallengeToken(token, domain string) error {
s.lock.Lock()
defer s.lock.Unlock()
if s.storedData.HTTPChallenges == nil {
return nil
}
if _, ok := s.storedData.HTTPChallenges[token]; ok {
if _, domainOk := s.storedData.HTTPChallenges[token][domain]; domainOk {
delete(s.storedData.HTTPChallenges[token], domain)
}
if len(s.storedData.HTTPChallenges[token]) == 0 {
delete(s.storedData.HTTPChallenges, token)
}
}
return nil
}

View File

@@ -13,6 +13,7 @@ type Store interface {
SaveAccount(*Account) error
GetCertificates() ([]*Certificate, error)
SaveCertificates([]*Certificate) error
GetHTTPChallenges() (map[string]map[string][]byte, error)
SaveHTTPChallenges(map[string]map[string][]byte) error
GetHTTPChallengeToken(token, domain string) ([]byte, error)
SetHTTPChallengeToken(token, domain string, keyAuth []byte) error
RemoveHTTPChallengeToken(token, domain string) error
}

View File

@@ -1,7 +1,6 @@
package consulcatalog
import (
"errors"
"fmt"
"strconv"
"strings"
@@ -155,14 +154,8 @@ func (p *Provider) watch(configurationChan chan<- types.ConfigMessage, stop chan
defer close(stopCh)
defer close(watchCh)
for {
select {
case <-stop:
return nil
case index, ok := <-watchCh:
if !ok {
return errors.New("consul service list nil")
}
safe.Go(func() {
for index := range watchCh {
log.Debug("List of services changed")
nodes, err := p.getNodes(index)
if err != nil {
@@ -173,6 +166,13 @@ func (p *Provider) watch(configurationChan chan<- types.ConfigMessage, stop chan
ProviderName: "consul_catalog",
Configuration: configuration,
}
}
})
for {
select {
case <-stop:
return nil
case err := <-errorCh:
return err
}

View File

@@ -2,6 +2,8 @@ package docker
import (
"context"
"crypto/md5"
"encoding/hex"
"fmt"
"net"
"strconv"
@@ -107,13 +109,11 @@ func (p *Provider) buildConfigurationV2(containersInspected []dockerData) *types
}
func getServiceNameKey(container dockerData, swarmMode bool, segmentName string) string {
serviceNameKey := container.ServiceName
if values, err := label.GetStringMultipleStrict(container.Labels, labelDockerComposeProject, labelDockerComposeService); !swarmMode && err == nil {
serviceNameKey = values[labelDockerComposeService] + values[labelDockerComposeProject]
if swarmMode {
return container.ServiceName + segmentName
}
return serviceNameKey + segmentName
return getServiceName(container) + segmentName
}
func (p *Provider) containerFilter(container dockerData) bool {
@@ -170,7 +170,7 @@ func checkSegmentPort(labels map[string]string, segmentName string) error {
func (p *Provider) getFrontendName(container dockerData, idx int) string {
var name string
if len(container.SegmentName) > 0 {
name = getBackendName(container)
name = container.SegmentName + "-" + getBackendName(container)
} else {
name = p.getFrontendRule(container, container.SegmentLabels) + "-" + strconv.Itoa(idx)
}
@@ -262,17 +262,21 @@ func isBackendLBSwarm(container dockerData) bool {
return label.GetBoolValue(container.Labels, labelBackendLoadBalancerSwarm, false)
}
func getSegmentBackendName(container dockerData) string {
serviceName := container.ServiceName
if values, err := label.GetStringMultipleStrict(container.Labels, labelDockerComposeProject, labelDockerComposeService); err == nil {
serviceName = provider.Normalize(values[labelDockerComposeService] + "_" + values[labelDockerComposeProject])
func getBackendName(container dockerData) string {
if len(container.SegmentName) > 0 {
return getSegmentBackendName(container)
}
return getDefaultBackendName(container)
}
func getSegmentBackendName(container dockerData) string {
serviceName := getServiceName(container)
if value := label.GetStringValue(container.SegmentLabels, label.TraefikBackend, ""); len(value) > 0 {
return provider.Normalize(serviceName + "-" + value)
}
return provider.Normalize(serviceName + "-" + getDefaultBackendName(container) + "-" + container.SegmentName)
return provider.Normalize(serviceName + "-" + container.SegmentName)
}
func getDefaultBackendName(container dockerData) string {
@@ -280,19 +284,17 @@ func getDefaultBackendName(container dockerData) string {
return provider.Normalize(value)
}
if values, err := label.GetStringMultipleStrict(container.Labels, labelDockerComposeProject, labelDockerComposeService); err == nil {
return provider.Normalize(values[labelDockerComposeService] + "_" + values[labelDockerComposeProject])
}
return provider.Normalize(container.ServiceName)
return provider.Normalize(getServiceName(container))
}
func getBackendName(container dockerData) string {
if len(container.SegmentName) > 0 {
return getSegmentBackendName(container)
func getServiceName(container dockerData) string {
serviceName := container.ServiceName
if values, err := label.GetStringMultipleStrict(container.Labels, labelDockerComposeProject, labelDockerComposeService); err == nil {
serviceName = values[labelDockerComposeService] + "_" + values[labelDockerComposeProject]
}
return getDefaultBackendName(container)
return serviceName
}
func getPort(container dockerData) string {
@@ -322,7 +324,7 @@ func getPort(container dockerData) string {
func (p *Provider) getServers(containers []dockerData) map[string]types.Server {
var servers map[string]types.Server
for i, container := range containers {
for _, container := range containers {
ip := p.getIPAddress(container)
if len(ip) == 0 {
log.Warnf("Unable to find the IP address for the container %q: the server is ignored.", container.Name)
@@ -336,16 +338,30 @@ func (p *Provider) getServers(containers []dockerData) map[string]types.Server {
protocol := label.GetStringValue(container.SegmentLabels, label.TraefikProtocol, label.DefaultProtocol)
port := getPort(container)
serverName := "server-" + container.SegmentName + "-" + container.Name
if len(container.SegmentName) > 0 {
serverName += "-" + strconv.Itoa(i)
serverURL := fmt.Sprintf("%s://%s", protocol, net.JoinHostPort(ip, port))
serverName := getServerName(container.Name, serverURL)
if _, exist := servers[serverName]; exist {
log.Debugf("Skipping server %q with the same URL.", serverName)
continue
}
servers[provider.Normalize(serverName)] = types.Server{
URL: fmt.Sprintf("%s://%s", protocol, net.JoinHostPort(ip, port)),
servers[serverName] = types.Server{
URL: serverURL,
Weight: label.GetIntValue(container.SegmentLabels, label.TraefikWeight, label.DefaultWeight),
}
}
return servers
}
func getServerName(containerName, url string) string {
hash := md5.New()
_, err := hash.Write([]byte(url))
if err != nil {
// Impossible case
log.Errorf("Fail to hash server URL %q", url)
}
return provider.Normalize("server-" + containerName + "-" + hex.EncodeToString(hash.Sum(nil)))
}

View File

@@ -55,7 +55,7 @@ func TestDockerBuildConfiguration(t *testing.T) {
expectedBackends: map[string]*types.Backend{
"backend-test": {
Servers: map[string]types.Server{
"server-test": {
"server-test-842895ca2aca17f6ee36ddb2f621194d": {
URL: "http://127.0.0.1:80",
Weight: label.DefaultWeight,
},
@@ -270,7 +270,7 @@ func TestDockerBuildConfiguration(t *testing.T) {
expectedBackends: map[string]*types.Backend{
"backend-foobar": {
Servers: map[string]types.Server{
"server-test1": {
"server-test1-7f6444e0dff3330c8b0ad2bbbd383b0f": {
URL: "https://127.0.0.1:666",
Weight: 12,
},
@@ -372,10 +372,11 @@ func TestDockerBuildConfiguration(t *testing.T) {
expectedBackends: map[string]*types.Backend{
"backend-myService-myProject": {
Servers: map[string]types.Server{
"server-test-0": {
"server-test-0-842895ca2aca17f6ee36ddb2f621194d": {
URL: "http://127.0.0.1:80",
Weight: label.DefaultWeight,
}, "server-test-1": {
},
"server-test-1-48093b9fc43454203aacd2bc4057a08c": {
URL: "http://127.0.0.2:80",
Weight: label.DefaultWeight,
},
@@ -384,7 +385,7 @@ func TestDockerBuildConfiguration(t *testing.T) {
},
"backend-myService2-myProject": {
Servers: map[string]types.Server{
"server-test-2": {
"server-test-2-405767e9733427148cd8dae6c4d331b0": {
URL: "http://127.0.0.3:80",
Weight: label.DefaultWeight,
},
@@ -1055,7 +1056,7 @@ func TestDockerGetServers(t *testing.T) {
})),
},
expected: map[string]types.Server{
"server-test1": {
"server-test1-fb00f762970935200c76ccdaf91458f6": {
URL: "http://10.10.10.10:80",
Weight: 1,
},
@@ -1084,15 +1085,15 @@ func TestDockerGetServers(t *testing.T) {
})),
},
expected: map[string]types.Server{
"server-test1": {
"server-test1-743440b6f4a8ffd8737626215f2c5a33": {
URL: "http://10.10.10.11:80",
Weight: 1,
},
"server-test2": {
"server-test2-547f74bbb5da02b6c8141ce9aa96c13b": {
URL: "http://10.10.10.12:81",
Weight: 1,
},
"server-test3": {
"server-test3-c57fd8b848c814a3f2a4a4c12e13c179": {
URL: "http://10.10.10.13:82",
Weight: 1,
},
@@ -1121,11 +1122,11 @@ func TestDockerGetServers(t *testing.T) {
})),
},
expected: map[string]types.Server{
"server-test2": {
"server-test2-547f74bbb5da02b6c8141ce9aa96c13b": {
URL: "http://10.10.10.12:81",
Weight: 1,
},
"server-test3": {
"server-test3-c57fd8b848c814a3f2a4a4c12e13c179": {
URL: "http://10.10.10.13:82",
Weight: 1,
},

View File

@@ -57,7 +57,7 @@ func TestSwarmBuildConfiguration(t *testing.T) {
expectedBackends: map[string]*types.Backend{
"backend-test": {
Servers: map[string]types.Server{
"server-test": {
"server-test-842895ca2aca17f6ee36ddb2f621194d": {
URL: "http://127.0.0.1:80",
Weight: label.DefaultWeight,
},
@@ -238,7 +238,6 @@ func TestSwarmBuildConfiguration(t *testing.T) {
ReferrerPolicy: "foo",
IsDevelopment: true,
},
Errors: map[string]*types.ErrorPage{
"foo": {
Status: []string{"404"},
@@ -276,7 +275,7 @@ func TestSwarmBuildConfiguration(t *testing.T) {
expectedBackends: map[string]*types.Backend{
"backend-foobar": {
Servers: map[string]types.Server{
"server-test1": {
"server-test1-7f6444e0dff3330c8b0ad2bbbd383b0f": {
URL: "https://127.0.0.1:666",
Weight: 12,
},

View File

@@ -42,22 +42,22 @@ func TestSegmentBuildConfiguration(t *testing.T) {
),
},
expectedFrontends: map[string]*types.Frontend{
"frontend-foo-foo-sauternes": {
Backend: "backend-foo-foo-sauternes",
"frontend-sauternes-foo-sauternes": {
Backend: "backend-foo-sauternes",
PassHostHeader: true,
EntryPoints: []string{"http", "https"},
BasicAuth: []string{},
Routes: map[string]types.Route{
"route-frontend-foo-foo-sauternes": {
"route-frontend-sauternes-foo-sauternes": {
Rule: "Host:foo.docker.localhost",
},
},
},
},
expectedBackends: map[string]*types.Backend{
"backend-foo-foo-sauternes": {
"backend-foo-sauternes": {
Servers: map[string]types.Server{
"server-sauternes-foo-0": {
"server-foo-863563a2e23c95502862016417ee95ea": {
URL: "http://127.0.0.1:2503",
Weight: label.DefaultWeight,
},
@@ -132,8 +132,8 @@ func TestSegmentBuildConfiguration(t *testing.T) {
),
},
expectedFrontends: map[string]*types.Frontend{
"frontend-foo-foo-sauternes": {
Backend: "backend-foo-foo-sauternes",
"frontend-sauternes-foo-sauternes": {
Backend: "backend-foo-sauternes",
EntryPoints: []string{
"http",
"https",
@@ -224,16 +224,16 @@ func TestSegmentBuildConfiguration(t *testing.T) {
},
Routes: map[string]types.Route{
"route-frontend-foo-foo-sauternes": {
"route-frontend-sauternes-foo-sauternes": {
Rule: "Host:foo.docker.localhost",
},
},
},
},
expectedBackends: map[string]*types.Backend{
"backend-foo-foo-sauternes": {
"backend-foo-sauternes": {
Servers: map[string]types.Server{
"server-sauternes-foo-0": {
"server-foo-7f6444e0dff3330c8b0ad2bbbd383b0f": {
URL: "https://127.0.0.1:666",
Weight: 12,
},
@@ -278,7 +278,7 @@ func TestSegmentBuildConfiguration(t *testing.T) {
),
},
expectedFrontends: map[string]*types.Frontend{
"frontend-test1-foobar": {
"frontend-sauternes-test1-foobar": {
Backend: "backend-test1-foobar",
PassHostHeader: false,
Priority: 5000,
@@ -288,18 +288,18 @@ func TestSegmentBuildConfiguration(t *testing.T) {
EntryPoint: "https",
},
Routes: map[string]types.Route{
"route-frontend-test1-foobar": {
"route-frontend-sauternes-test1-foobar": {
Rule: "Path:/mypath",
},
},
},
"frontend-test2-test2-anothersauternes": {
Backend: "backend-test2-test2-anothersauternes",
"frontend-anothersauternes-test2-anothersauternes": {
Backend: "backend-test2-anothersauternes",
PassHostHeader: true,
EntryPoints: []string{},
BasicAuth: []string{},
Routes: map[string]types.Route{
"route-frontend-test2-test2-anothersauternes": {
"route-frontend-anothersauternes-test2-anothersauternes": {
Rule: "Path:/anotherpath",
},
},
@@ -308,16 +308,16 @@ func TestSegmentBuildConfiguration(t *testing.T) {
expectedBackends: map[string]*types.Backend{
"backend-test1-foobar": {
Servers: map[string]types.Server{
"server-sauternes-test1-0": {
"server-test1-79533a101142718f0fdf84c42593c41e": {
URL: "https://127.0.0.1:2503",
Weight: 80,
},
},
CircuitBreaker: nil,
},
"backend-test2-test2-anothersauternes": {
"backend-test2-anothersauternes": {
Servers: map[string]types.Server{
"server-anothersauternes-test2-0": {
"server-test2-e9c1b66f9af919aa46053fbc2391bb4a": {
URL: "http://127.0.0.1:8079",
Weight: 33,
},
@@ -326,6 +326,152 @@ func TestSegmentBuildConfiguration(t *testing.T) {
},
},
},
{
desc: "several segments with the same backend name and same port",
containers: []docker.ContainerJSON{
containerJSON(
name("test1"),
labels(map[string]string{
"traefik.port": "2503",
"traefik.protocol": "https",
"traefik.weight": "80",
"traefik.frontend.entryPoints": "http,https",
"traefik.frontend.redirect.entryPoint": "https",
"traefik.sauternes.backend": "foobar",
"traefik.sauternes.frontend.rule": "Path:/sauternes",
"traefik.sauternes.frontend.priority": "5000",
"traefik.arbois.backend": "foobar",
"traefik.arbois.frontend.rule": "Path:/arbois",
"traefik.arbois.frontend.priority": "3000",
}),
ports(nat.PortMap{
"80/tcp": {},
}),
withNetwork("bridge", ipv4("127.0.0.1")),
),
},
expectedFrontends: map[string]*types.Frontend{
"frontend-sauternes-test1-foobar": {
Backend: "backend-test1-foobar",
PassHostHeader: true,
Priority: 5000,
EntryPoints: []string{"http", "https"},
BasicAuth: []string{},
Redirect: &types.Redirect{
EntryPoint: "https",
},
Routes: map[string]types.Route{
"route-frontend-sauternes-test1-foobar": {
Rule: "Path:/sauternes",
},
},
},
"frontend-arbois-test1-foobar": {
Backend: "backend-test1-foobar",
PassHostHeader: true,
Priority: 3000,
EntryPoints: []string{"http", "https"},
BasicAuth: []string{},
Redirect: &types.Redirect{
EntryPoint: "https",
},
Routes: map[string]types.Route{
"route-frontend-arbois-test1-foobar": {
Rule: "Path:/arbois",
},
},
},
},
expectedBackends: map[string]*types.Backend{
"backend-test1-foobar": {
Servers: map[string]types.Server{
"server-test1-79533a101142718f0fdf84c42593c41e": {
URL: "https://127.0.0.1:2503",
Weight: 80,
},
},
CircuitBreaker: nil,
},
},
},
{
desc: "several segments with the same backend name and different port (wrong behavior)",
containers: []docker.ContainerJSON{
containerJSON(
name("test1"),
labels(map[string]string{
"traefik.protocol": "https",
"traefik.frontend.entryPoints": "http,https",
"traefik.frontend.redirect.entryPoint": "https",
"traefik.sauternes.port": "2503",
"traefik.sauternes.weight": "80",
"traefik.sauternes.backend": "foobar",
"traefik.sauternes.frontend.rule": "Path:/sauternes",
"traefik.sauternes.frontend.priority": "5000",
"traefik.arbois.port": "2504",
"traefik.arbois.weight": "90",
"traefik.arbois.backend": "foobar",
"traefik.arbois.frontend.rule": "Path:/arbois",
"traefik.arbois.frontend.priority": "3000",
}),
ports(nat.PortMap{
"80/tcp": {},
}),
withNetwork("bridge", ipv4("127.0.0.1")),
),
},
expectedFrontends: map[string]*types.Frontend{
"frontend-sauternes-test1-foobar": {
Backend: "backend-test1-foobar",
PassHostHeader: true,
Priority: 5000,
EntryPoints: []string{"http", "https"},
BasicAuth: []string{},
Redirect: &types.Redirect{
EntryPoint: "https",
},
Routes: map[string]types.Route{
"route-frontend-sauternes-test1-foobar": {
Rule: "Path:/sauternes",
},
},
},
"frontend-arbois-test1-foobar": {
Backend: "backend-test1-foobar",
PassHostHeader: true,
Priority: 3000,
EntryPoints: []string{"http", "https"},
BasicAuth: []string{},
Redirect: &types.Redirect{
EntryPoint: "https",
},
Routes: map[string]types.Route{
"route-frontend-arbois-test1-foobar": {
Rule: "Path:/arbois",
},
},
},
},
expectedBackends: map[string]*types.Backend{
"backend-test1-foobar": {
Servers: map[string]types.Server{
"server-test1-79533a101142718f0fdf84c42593c41e": {
URL: "https://127.0.0.1:2503",
Weight: 80,
},
"server-test1-315a41140f1bd825b066e39686c18482": {
URL: "https://127.0.0.1:2504",
Weight: 90,
},
},
CircuitBreaker: nil,
},
},
},
}
provider := &Provider{

View File

@@ -5,6 +5,7 @@ import (
"net"
"net/http"
"github.com/containous/traefik/log"
"github.com/containous/traefik/middlewares"
)
@@ -37,4 +38,5 @@ func (eh *RecordingErrorHandler) ServeHTTP(w http.ResponseWriter, req *http.Requ
w.WriteHeader(statusCode)
w.Write([]byte(http.StatusText(statusCode)))
log.Debugf("'%d %s' caused by: %v", statusCode, http.StatusText(statusCode), err)
}

View File

@@ -32,6 +32,7 @@ import (
"github.com/containous/traefik/middlewares/accesslog"
mauth "github.com/containous/traefik/middlewares/auth"
"github.com/containous/traefik/middlewares/errorpages"
"github.com/containous/traefik/middlewares/pipelining"
"github.com/containous/traefik/middlewares/redirect"
"github.com/containous/traefik/middlewares/tracing"
"github.com/containous/traefik/provider"
@@ -1023,6 +1024,8 @@ func (s *Server) loadConfig(configurations types.Configurations, globalConfigura
})
}
fwd = pipelining.NewPipelining(fwd)
var rr *roundrobin.RoundRobin
var saveFrontend http.Handler
if s.accessLoggerMiddleware != nil {

View File

@@ -46,16 +46,16 @@ func (kv *KvSource) Parse(cmd *flaeg.Command) (*flaeg.Command, error) {
// LoadConfig loads data from the KV Store into the config structure (given by reference)
func (kv *KvSource) LoadConfig(config interface{}) error {
pairs := map[string][]byte{}
if err := kv.ListRecursive(kv.Prefix, pairs); err != nil {
pairs, err := kv.ListValuedPairWithPrefix(kv.Prefix)
if err != nil {
return err
}
// fmt.Printf("pairs : %#v\n", pairs)
mapStruct, err := generateMapstructure(convertPairs(pairs), kv.Prefix)
if err != nil {
return err
}
// fmt.Printf("mapStruct : %#v\n", mapStruct)
configDecoder := &mapstructure.DecoderConfig{
Metadata: nil,
Result: config,
@@ -77,11 +77,11 @@ func generateMapstructure(pairs []*store.KVPair, prefix string) (map[string]inte
for _, p := range pairs {
// Trim the prefix off our key first
key := strings.TrimPrefix(strings.Trim(p.Key, "/"), strings.Trim(prefix, "/")+"/")
raw, err := processKV(key, p.Value, raw)
var err error
raw, err = processKV(key, p.Value, raw)
if err != nil {
return raw, err
}
}
return raw, nil
}
@@ -313,15 +313,23 @@ func collateKvRecursive(objValue reflect.Value, kv map[string]string, key string
func writeCompressedData(data []byte) (string, error) {
var buffer bytes.Buffer
gzipWriter := gzip.NewWriter(&buffer)
_, err := gzipWriter.Write(data)
if err != nil {
return "", err
}
gzipWriter.Close()
err = gzipWriter.Close()
if err != nil {
return "", err
}
return buffer.String(), nil
}
// ListRecursive lists all key value children under key
// Replaced by ListValuedPairWithPrefix
// Deprecated
func (kv *KvSource) ListRecursive(key string, pairs map[string][]byte) error {
pairsN1, err := kv.List(key, nil)
if err == store.ErrKeyNotFound {
@@ -342,14 +350,37 @@ func (kv *KvSource) ListRecursive(key string, pairs map[string][]byte) error {
return nil
}
for _, p := range pairsN1 {
err := kv.ListRecursive(p.Key, pairs)
if err != nil {
return err
if p.Key != key {
err := kv.ListRecursive(p.Key, pairs)
if err != nil {
return err
}
}
}
return nil
}
// ListValuedPairWithPrefix lists all key value children under key
func (kv *KvSource) ListValuedPairWithPrefix(key string) (map[string][]byte, error) {
pairs := make(map[string][]byte)
pairsN1, err := kv.List(key, nil)
if err == store.ErrKeyNotFound {
return pairs, nil
}
if err != nil {
return pairs, err
}
for _, p := range pairsN1 {
if len(p.Value) > 0 {
pairs[p.Key] = p.Value
}
}
return pairs, nil
}
func convertPairs(pairs map[string][]byte) []*store.KVPair {
slicePairs := make([]*store.KVPair, len(pairs))
i := 0

View File

@@ -2,12 +2,8 @@ package staert
import (
"fmt"
"os"
"path/filepath"
"reflect"
"strings"
"github.com/BurntSushi/toml"
"github.com/containous/flaeg"
)
@@ -24,10 +20,7 @@ type Staert struct {
// NewStaert creates and return a pointer on Staert. Need defaultConfig and defaultPointersConfig given by references
func NewStaert(rootCommand *flaeg.Command) *Staert {
s := Staert{
command: rootCommand,
}
return &s
return &Staert{command: rootCommand}
}
// AddSource adds new Source to Staert, give it by reference
@@ -35,40 +28,31 @@ func (s *Staert) AddSource(src Source) {
s.sources = append(s.sources, src)
}
// getConfig for a flaeg.Command run sources Parse func in the raw
func (s *Staert) parseConfigAllSources(cmd *flaeg.Command) error {
for _, src := range s.sources {
var err error
_, err = src.Parse(cmd)
if err != nil {
return err
}
}
return nil
}
// LoadConfig check which command is called and parses config
// It returns the the parsed config or an error if it fails
func (s *Staert) LoadConfig() (interface{}, error) {
for _, src := range s.sources {
//Type assertion
f, ok := src.(*flaeg.Flaeg)
if ok {
if fCmd, err := f.GetCommand(); err != nil {
// Type assertion
if flg, ok := src.(*flaeg.Flaeg); ok {
fCmd, err := flg.GetCommand()
if err != nil {
return nil, err
} else if s.command != fCmd {
//IF fleag sub-command
}
// if fleag sub-command
if s.command != fCmd {
// if parseAllSources
if fCmd.Metadata["parseAllSources"] == "true" {
//IF parseAllSources
fCmdConfigType := reflect.TypeOf(fCmd.Config)
sCmdConfigType := reflect.TypeOf(s.command.Config)
if fCmdConfigType != sCmdConfigType {
return nil, fmt.Errorf("command %s : Config type doesn't match with root command config type. Expected %s got %s", fCmd.Name, sCmdConfigType.Name(), fCmdConfigType.Name())
return nil, fmt.Errorf("command %s : Config type doesn't match with root command config type. Expected %s got %s",
fCmd.Name, sCmdConfigType.Name(), fCmdConfigType.Name())
}
s.command = fCmd
} else {
// ELSE (not parseAllSources)
s.command, err = f.Parse(fCmd)
// (not parseAllSources)
s.command, err = flg.Parse(fCmd)
return s.command.Config, err
}
}
@@ -78,117 +62,19 @@ func (s *Staert) LoadConfig() (interface{}, error) {
return s.command.Config, err
}
// parseConfigAllSources getConfig for a flaeg.Command run sources Parse func in the raw
func (s *Staert) parseConfigAllSources(cmd *flaeg.Command) error {
for _, src := range s.sources {
_, err := src.Parse(cmd)
if err != nil {
return err
}
}
return nil
}
// Run calls the Run func of the command
// Warning, Run doesn't parse the config
func (s *Staert) Run() error {
return s.command.Run()
}
//TomlSource impement Source
type TomlSource struct {
filename string
dirNfullpath []string
fullpath string
}
// NewTomlSource creates and return a pointer on TomlSource.
// Parameter filename is the file name (without extension type, ".toml" will be added)
// dirNfullpath may contain directories or fullpath to the file.
func NewTomlSource(filename string, dirNfullpath []string) *TomlSource {
return &TomlSource{filename, dirNfullpath, ""}
}
// ConfigFileUsed return config file used
func (ts *TomlSource) ConfigFileUsed() string {
return ts.fullpath
}
func preprocessDir(dirIn string) (string, error) {
dirOut := dirIn
expanded := os.ExpandEnv(dirIn)
dirOut, err := filepath.Abs(expanded)
return dirOut, err
}
func findFile(filename string, dirNfile []string) string {
for _, df := range dirNfile {
if df != "" {
fullPath, _ := preprocessDir(df)
if fileInfo, err := os.Stat(fullPath); err == nil && !fileInfo.IsDir() {
return fullPath
}
fullPath = filepath.Join(fullPath, filename+".toml")
if fileInfo, err := os.Stat(fullPath); err == nil && !fileInfo.IsDir() {
return fullPath
}
}
}
return ""
}
// Parse calls toml.DecodeFile() func
func (ts *TomlSource) Parse(cmd *flaeg.Command) (*flaeg.Command, error) {
ts.fullpath = findFile(ts.filename, ts.dirNfullpath)
if len(ts.fullpath) < 2 {
return cmd, nil
}
metadata, err := toml.DecodeFile(ts.fullpath, cmd.Config)
if err != nil {
return nil, err
}
boolFlags, err := flaeg.GetBoolFlags(cmd.Config)
if err != nil {
return nil, err
}
flaegArgs, hasUnderField, err := generateArgs(metadata, boolFlags)
if err != nil {
return nil, err
}
// fmt.Println(flaegArgs)
err = flaeg.Load(cmd.Config, cmd.DefaultPointersConfig, flaegArgs)
//if err!= missing parser err
if err != nil && err != flaeg.ErrParserNotFound {
return nil, err
}
if hasUnderField {
_, err := toml.DecodeFile(ts.fullpath, cmd.Config)
if err != nil {
return nil, err
}
}
return cmd, nil
}
func generateArgs(metadata toml.MetaData, flags []string) ([]string, bool, error) {
var flaegArgs []string
keys := metadata.Keys()
hasUnderField := false
for i, key := range keys {
// fmt.Println(key)
if metadata.Type(key.String()) == "Hash" {
// TOML hashes correspond to Go structs or maps.
// fmt.Printf("%s could be a ptr on a struct, or a map\n", key)
for j := i; j < len(keys); j++ {
// fmt.Printf("%s =? %s\n", keys[j].String(), "."+key.String())
if strings.Contains(keys[j].String(), key.String()+".") {
hasUnderField = true
break
}
}
match := false
for _, flag := range flags {
if flag == strings.ToLower(key.String()) {
match = true
break
}
}
if match {
flaegArgs = append(flaegArgs, "--"+strings.ToLower(key.String()))
}
}
}
return flaegArgs, hasUnderField, nil
}

121
vendor/github.com/containous/staert/toml.go generated vendored Normal file
View File

@@ -0,0 +1,121 @@
package staert
import (
"os"
"path/filepath"
"strings"
"github.com/BurntSushi/toml"
"github.com/containous/flaeg"
)
var _ Source = (*TomlSource)(nil)
// TomlSource implement staert.Source
type TomlSource struct {
filename string
dirNFullPath []string
fullPath string
}
// NewTomlSource creates and return a pointer on Source.
// Parameter filename is the file name (without extension type, ".toml" will be added)
// dirNFullPath may contain directories or fullPath to the file.
func NewTomlSource(filename string, dirNFullPath []string) *TomlSource {
return &TomlSource{filename, dirNFullPath, ""}
}
// ConfigFileUsed return config file used
func (ts *TomlSource) ConfigFileUsed() string {
return ts.fullPath
}
// Parse calls toml.DecodeFile() func
func (ts *TomlSource) Parse(cmd *flaeg.Command) (*flaeg.Command, error) {
ts.fullPath = findFile(ts.filename, ts.dirNFullPath)
if len(ts.fullPath) < 2 {
return cmd, nil
}
metadata, err := toml.DecodeFile(ts.fullPath, cmd.Config)
if err != nil {
return nil, err
}
boolFlags, err := flaeg.GetBoolFlags(cmd.Config)
if err != nil {
return nil, err
}
flgArgs, hasUnderField, err := generateArgs(metadata, boolFlags)
if err != nil {
return nil, err
}
err = flaeg.Load(cmd.Config, cmd.DefaultPointersConfig, flgArgs)
if err != nil && err != flaeg.ErrParserNotFound {
return nil, err
}
if hasUnderField {
_, err := toml.DecodeFile(ts.fullPath, cmd.Config)
if err != nil {
return nil, err
}
}
return cmd, nil
}
func preProcessDir(dirIn string) (string, error) {
expanded := os.ExpandEnv(dirIn)
return filepath.Abs(expanded)
}
func findFile(filename string, dirNFile []string) string {
for _, df := range dirNFile {
if df != "" {
fullPath, _ := preProcessDir(df)
if fileInfo, err := os.Stat(fullPath); err == nil && !fileInfo.IsDir() {
return fullPath
}
fullPath = filepath.Join(fullPath, filename+".toml")
if fileInfo, err := os.Stat(fullPath); err == nil && !fileInfo.IsDir() {
return fullPath
}
}
}
return ""
}
func generateArgs(metadata toml.MetaData, flags []string) ([]string, bool, error) {
var flgArgs []string
keys := metadata.Keys()
hasUnderField := false
for i, key := range keys {
if metadata.Type(key.String()) == "Hash" {
// TOML hashes correspond to Go structs or maps.
for j := i; j < len(keys); j++ {
if strings.Contains(keys[j].String(), key.String()+".") {
hasUnderField = true
break
}
}
match := false
for _, flag := range flags {
if flag == strings.ToLower(key.String()) {
match = true
break
}
}
if match {
flgArgs = append(flgArgs, "--"+strings.ToLower(key.String()))
}
}
}
return flgArgs, hasUnderField, nil
}

View File

@@ -156,7 +156,7 @@ func (c *CircuitBreaker) activateFallback(w http.ResponseWriter, req *http.Reque
func (c *CircuitBreaker) serve(w http.ResponseWriter, req *http.Request) {
start := c.clock.UtcNow()
p := utils.NewSimpleProxyWriter(w)
p := utils.NewProxyWriter(w)
c.next.ServeHTTP(p, req)

View File

@@ -466,16 +466,6 @@ func (f *httpForwarder) serveHTTP(w http.ResponseWriter, inReq *http.Request, ct
defer logEntry.Debug("vulcand/oxy/forward/http: completed ServeHttp on request")
}
var pw utils.ProxyWriter
// Disable closeNotify when method GET for http pipelining
// Waiting for https://github.com/golang/go/issues/23921
if inReq.Method == http.MethodGet {
pw = utils.NewProxyWriterWithoutCloseNotify(w)
} else {
pw = utils.NewSimpleProxyWriter(w)
}
start := time.Now().UTC()
outReq := new(http.Request)
@@ -490,18 +480,24 @@ func (f *httpForwarder) serveHTTP(w http.ResponseWriter, inReq *http.Request, ct
ModifyResponse: f.modifyResponse,
BufferPool: f.bufferPool,
}
revproxy.ServeHTTP(pw, outReq)
if inReq.TLS != nil {
f.log.Debugf("vulcand/oxy/forward/http: Round trip: %v, code: %v, Length: %v, duration: %v tls:version: %x, tls:resume:%t, tls:csuite:%x, tls:server:%v",
inReq.URL, pw.StatusCode(), pw.GetLength(), time.Now().UTC().Sub(start),
inReq.TLS.Version,
inReq.TLS.DidResume,
inReq.TLS.CipherSuite,
inReq.TLS.ServerName)
if f.log.GetLevel() >= log.DebugLevel {
pw := utils.NewProxyWriter(w)
revproxy.ServeHTTP(pw, outReq)
if inReq.TLS != nil {
f.log.Debugf("vulcand/oxy/forward/http: Round trip: %v, code: %v, Length: %v, duration: %v tls:version: %x, tls:resume:%t, tls:csuite:%x, tls:server:%v",
inReq.URL, pw.StatusCode(), pw.GetLength(), time.Now().UTC().Sub(start),
inReq.TLS.Version,
inReq.TLS.DidResume,
inReq.TLS.CipherSuite,
inReq.TLS.ServerName)
} else {
f.log.Debugf("vulcand/oxy/forward/http: Round trip: %v, code: %v, Length: %v, duration: %v",
inReq.URL, pw.StatusCode(), pw.GetLength(), time.Now().UTC().Sub(start))
}
} else {
f.log.Debugf("vulcand/oxy/forward/http: Round trip: %v, code: %v, Length: %v, duration: %v",
inReq.URL, pw.StatusCode(), pw.GetLength(), time.Now().UTC().Sub(start))
revproxy.ServeHTTP(w, outReq)
}
}

View File

@@ -148,7 +148,7 @@ func (rb *Rebalancer) ServeHTTP(w http.ResponseWriter, req *http.Request) {
defer logEntry.Debug("vulcand/oxy/roundrobin/rebalancer: completed ServeHttp on request")
}
pw := utils.NewSimpleProxyWriter(w)
pw := utils.NewProxyWriter(w)
start := rb.clock.UtcNow()
// make shallow copy of request before changing anything to avoid side effects

View File

@@ -12,89 +12,65 @@ import (
log "github.com/sirupsen/logrus"
)
type ProxyWriter interface {
http.ResponseWriter
GetLength() int64
StatusCode() int
GetWriter() http.ResponseWriter
}
// ProxyWriterWithoutCloseNotify helps to capture response headers and status code
// from the ServeHTTP. It can be safely passed to ServeHTTP handler,
// wrapping the real response writer.
type ProxyWriterWithoutCloseNotify struct {
type ProxyWriter struct {
W http.ResponseWriter
Code int
Length int64
code int
length int64
}
func NewProxyWriterWithoutCloseNotify(writer http.ResponseWriter) *ProxyWriterWithoutCloseNotify {
return &ProxyWriterWithoutCloseNotify{
func NewProxyWriter(writer http.ResponseWriter) *ProxyWriter {
return &ProxyWriter{
W: writer,
}
}
func NewSimpleProxyWriter(writer http.ResponseWriter) *SimpleProxyWriter {
return &SimpleProxyWriter{
ProxyWriterWithoutCloseNotify: NewProxyWriterWithoutCloseNotify(writer),
}
}
type SimpleProxyWriter struct {
*ProxyWriterWithoutCloseNotify
}
func (p *ProxyWriterWithoutCloseNotify) GetWriter() http.ResponseWriter {
return p.W
}
func (p *ProxyWriterWithoutCloseNotify) StatusCode() int {
if p.Code == 0 {
func (p *ProxyWriter) StatusCode() int {
if p.code == 0 {
// per contract standard lib will set this to http.StatusOK if not set
// by user, here we avoid the confusion by mirroring this logic
return http.StatusOK
}
return p.Code
return p.code
}
func (p *ProxyWriterWithoutCloseNotify) Header() http.Header {
func (p *ProxyWriter) GetLength() int64 {
return p.length
}
func (p *ProxyWriter) Header() http.Header {
return p.W.Header()
}
func (p *ProxyWriterWithoutCloseNotify) Write(buf []byte) (int, error) {
p.Length = p.Length + int64(len(buf))
func (p *ProxyWriter) Write(buf []byte) (int, error) {
p.length = p.length + int64(len(buf))
return p.W.Write(buf)
}
func (p *ProxyWriterWithoutCloseNotify) WriteHeader(code int) {
p.Code = code
func (p *ProxyWriter) WriteHeader(code int) {
p.code = code
p.W.WriteHeader(code)
}
func (p *ProxyWriterWithoutCloseNotify) Flush() {
func (p *ProxyWriter) Flush() {
if f, ok := p.W.(http.Flusher); ok {
f.Flush()
}
}
func (p *ProxyWriterWithoutCloseNotify) GetLength() int64 {
return p.Length
}
func (p *SimpleProxyWriter) CloseNotify() <-chan bool {
if cn, ok := p.GetWriter().(http.CloseNotifier); ok {
func (p *ProxyWriter) CloseNotify() <-chan bool {
if cn, ok := p.W.(http.CloseNotifier); ok {
return cn.CloseNotify()
}
log.Warningf("Upstream ResponseWriter of type %v does not implement http.CloseNotifier. Returning dummy channel.", reflect.TypeOf(p.GetWriter()))
log.Debugf("Upstream ResponseWriter of type %v does not implement http.CloseNotifier. Returning dummy channel.", reflect.TypeOf(p.W))
return make(<-chan bool)
}
func (p *ProxyWriterWithoutCloseNotify) Hijack() (net.Conn, *bufio.ReadWriter, error) {
func (p *ProxyWriter) Hijack() (net.Conn, *bufio.ReadWriter, error) {
if hi, ok := p.W.(http.Hijacker); ok {
return hi.Hijack()
}
log.Warningf("Upstream ResponseWriter of type %v does not implement http.Hijacker. Returning dummy channel.", reflect.TypeOf(p.W))
return nil, nil, fmt.Errorf("The response writer that was wrapped in this proxy, does not implement http.Hijacker. It is of type: %v", reflect.TypeOf(p.W))
log.Debugf("Upstream ResponseWriter of type %v does not implement http.Hijacker. Returning dummy channel.", reflect.TypeOf(p.W))
return nil, nil, fmt.Errorf("the response writer that was wrapped in this proxy, does not implement http.Hijacker. It is of type: %v", reflect.TypeOf(p.W))
}
func NewBufferWriter(w io.WriteCloser) *BufferWriter {
@@ -139,8 +115,8 @@ func (b *BufferWriter) Hijack() (net.Conn, *bufio.ReadWriter, error) {
if hi, ok := b.W.(http.Hijacker); ok {
return hi.Hijack()
}
log.Warningf("Upstream ResponseWriter of type %v does not implement http.Hijacker. Returning dummy channel.", reflect.TypeOf(b.W))
return nil, nil, fmt.Errorf("The response writer that was wrapped in this proxy, does not implement http.Hijacker. It is of type: %v", reflect.TypeOf(b.W))
log.Debugf("Upstream ResponseWriter of type %v does not implement http.Hijacker. Returning dummy channel.", reflect.TypeOf(b.W))
return nil, nil, fmt.Errorf("the response writer that was wrapped in this proxy, does not implement http.Hijacker. It is of type: %v", reflect.TypeOf(b.W))
}
type nopWriteCloser struct {