forked from SW/traefik
Compare commits
17 Commits
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
1db9482a8e | ||
|
|
888e6dcbc8 | ||
|
|
a09a8b1235 | ||
|
|
36ee69609e | ||
|
|
98b52d1f54 | ||
|
|
4892b2b0da | ||
|
|
91ce78da46 | ||
|
|
f06e256934 | ||
|
|
4699d6be18 | ||
|
|
6473002021 | ||
|
|
4d89ff7e18 | ||
|
|
c5c63071ca | ||
|
|
9fbe21c534 | ||
|
|
7a34303593 | ||
|
|
fdb24c64e4 | ||
|
|
631079a12f | ||
|
|
f99f3b987e |
@@ -2,7 +2,7 @@
|
||||
set -e
|
||||
|
||||
sudo -E apt-get -yq update
|
||||
sudo -E apt-get -yq --no-install-suggests --no-install-recommends --force-yes install docker-engine=${DOCKER_VERSION}*
|
||||
sudo -E apt-get -yq --no-install-suggests --no-install-recommends --force-yes install docker-ce=${DOCKER_VERSION}*
|
||||
docker version
|
||||
|
||||
pip install --user -r requirements.txt
|
||||
|
||||
@@ -5,8 +5,6 @@ export secure='btt4r13t09gQlHb6gYrvGC2yGCMMHfnp1Mz1RQedc4Mpf/FfT8aE6xmK2a2i9CCvs
|
||||
|
||||
export REPO='containous/traefik'
|
||||
|
||||
export DOCKER_VERSION=1.12.6
|
||||
|
||||
if VERSION=$(git describe --exact-match --abbrev=0 --tags);
|
||||
then
|
||||
export VERSION
|
||||
|
||||
@@ -11,7 +11,6 @@ env:
|
||||
- VERSION: $TRAVIS_TAG
|
||||
- CODENAME: raclette
|
||||
- N_MAKE_JOBS: 2
|
||||
- DOCKER_VERSION: 1.12.6
|
||||
|
||||
script:
|
||||
- echo "Skipping tests... (Tests are executed on SemaphoreCI)"
|
||||
@@ -21,7 +20,7 @@ before_deploy:
|
||||
if ! [ "$BEFORE_DEPLOY_RUN" ]; then
|
||||
export BEFORE_DEPLOY_RUN=1;
|
||||
sudo -E apt-get -yq update;
|
||||
sudo -E apt-get -yq --no-install-suggests --no-install-recommends --force-yes install docker-engine=${DOCKER_VERSION}*;
|
||||
sudo -E apt-get -yq --no-install-suggests --no-install-recommends --force-yes install docker-ce=${DOCKER_VERSION}*;
|
||||
docker version;
|
||||
pip install --user -r requirements.txt;
|
||||
make -j${N_MAKE_JOBS} crossbinary-parallel;
|
||||
|
||||
25
CHANGELOG.md
25
CHANGELOG.md
@@ -1,5 +1,30 @@
|
||||
# Change Log
|
||||
|
||||
## [v1.3.4](https://github.com/containous/traefik/tree/v1.3.4) (2017-07-27)
|
||||
[All Commits](https://github.com/containous/traefik/compare/v1.3.3...v1.3.4)
|
||||
|
||||
**Bug fixes:**
|
||||
- **[middleware]** Double compression. ([#1863](https://github.com/containous/traefik/pull/1863) by [ldez](https://github.com/ldez))
|
||||
- **[middleware]** Fix replace path rule ([#1859](https://github.com/containous/traefik/pull/1859) by [dedalusj](https://github.com/dedalusj))
|
||||
- **[websocket]** New oxy with gorilla for websocket with integration tests ([#1896](https://github.com/containous/traefik/pull/1896) by [Juliens](https://github.com/Juliens))
|
||||
|
||||
## [v1.3.3](https://github.com/containous/traefik/tree/v1.3.3) (2017-07-06)
|
||||
[All Commits](https://github.com/containous/traefik/compare/v1.3.2...v1.3.3)
|
||||
|
||||
**Bug fixes:**
|
||||
- **[k8s]** Undo the Secrets controller sync wait. ([#1828](https://github.com/containous/traefik/pull/1828) by [timoreimann](https://github.com/timoreimann))
|
||||
- **[k8s]** Tell glog to log everything into STDERR. ([#1817](https://github.com/containous/traefik/pull/1817) by [timoreimann](https://github.com/timoreimann))
|
||||
|
||||
## [v1.3.2](https://github.com/containous/traefik/tree/v1.3.2) (2017-06-29)
|
||||
[All Commits](https://github.com/containous/traefik/compare/v1.3.1...v1.3.2)
|
||||
|
||||
**Bug fixes:**
|
||||
- **[acme]** Add provided certificate checking before LE certificate generation with OnHostRule option ([#1772](https://github.com/containous/traefik/pull/1772) by [nmengin](https://github.com/nmengin))
|
||||
- **[k8s]** Fix race on closing event channel. ([#1798](https://github.com/containous/traefik/pull/1798) by [timoreimann](https://github.com/timoreimann))
|
||||
- **[marathon]** Upgrade go-marathon to dd6cbd4. ([#1800](https://github.com/containous/traefik/pull/1800) by [timoreimann](https://github.com/timoreimann))
|
||||
- **[oxy,websocket]** Problem with keepalive when switching protocol failed ([#1782](https://github.com/containous/traefik/pull/1782) by [ldez](https://github.com/ldez))
|
||||
- **[oxy]** Fix proxying of unannounced trailers ([#1805](https://github.com/containous/traefik/pull/1805) by [ldez](https://github.com/ldez))
|
||||
|
||||
## [v1.3.1](https://github.com/containous/traefik/tree/v1.3.1) (2017-06-16)
|
||||
[All Commits](https://github.com/containous/traefik/compare/v1.3.0...v1.3.1)
|
||||
|
||||
|
||||
54
acme/acme.go
54
acme/acme.go
@@ -328,14 +328,11 @@ func (a *ACME) CreateLocalConfig(tlsConfig *tls.Config, checkOnDemandDomain func
|
||||
func (a *ACME) getCertificate(clientHello *tls.ClientHelloInfo) (*tls.Certificate, error) {
|
||||
domain := types.CanonicalDomain(clientHello.ServerName)
|
||||
account := a.store.Get().(*Account)
|
||||
//use regex to test for wildcard certs that might have been added into TLSConfig
|
||||
for k := range a.TLSConfig.NameToCertificate {
|
||||
selector := "^" + strings.Replace(k, "*.", "[^\\.]*\\.?", -1) + "$"
|
||||
match, _ := regexp.MatchString(selector, domain)
|
||||
if match {
|
||||
return a.TLSConfig.NameToCertificate[k], nil
|
||||
}
|
||||
|
||||
if providedCertificate := a.getProvidedCertificate([]string{domain}); providedCertificate != nil {
|
||||
return providedCertificate, nil
|
||||
}
|
||||
|
||||
if challengeCert, ok := a.challengeProvider.getCertificate(domain); ok {
|
||||
log.Debugf("ACME got challenge %s", domain)
|
||||
return challengeCert, nil
|
||||
@@ -520,8 +517,20 @@ func (a *ACME) loadCertificateOnDemand(clientHello *tls.ClientHelloInfo) (*tls.C
|
||||
// LoadCertificateForDomains loads certificates from ACME for given domains
|
||||
func (a *ACME) LoadCertificateForDomains(domains []string) {
|
||||
a.jobs.In() <- func() {
|
||||
log.Debugf("LoadCertificateForDomains %s...", domains)
|
||||
log.Debugf("LoadCertificateForDomains %v...", domains)
|
||||
|
||||
if len(domains) == 0 {
|
||||
// no domain
|
||||
return
|
||||
}
|
||||
|
||||
domains = fun.Map(types.CanonicalDomain, domains).([]string)
|
||||
|
||||
// Check provided certificates
|
||||
if a.getProvidedCertificate(domains) != nil {
|
||||
return
|
||||
}
|
||||
|
||||
operation := func() error {
|
||||
if a.client == nil {
|
||||
return fmt.Errorf("ACME client still not built")
|
||||
@@ -540,11 +549,7 @@ func (a *ACME) LoadCertificateForDomains(domains []string) {
|
||||
}
|
||||
account := a.store.Get().(*Account)
|
||||
var domain Domain
|
||||
if len(domains) == 0 {
|
||||
// no domain
|
||||
return
|
||||
|
||||
} else if len(domains) > 1 {
|
||||
if len(domains) > 1 {
|
||||
domain = Domain{Main: domains[0], SANs: domains[1:]}
|
||||
} else {
|
||||
domain = Domain{Main: domains[0]}
|
||||
@@ -578,6 +583,29 @@ func (a *ACME) LoadCertificateForDomains(domains []string) {
|
||||
}
|
||||
}
|
||||
|
||||
// Get provided certificate which check a domains list (Main and SANs)
|
||||
func (a *ACME) getProvidedCertificate(domains []string) *tls.Certificate {
|
||||
// Use regex to test for provided certs that might have been added into TLSConfig
|
||||
providedCertMatch := false
|
||||
log.Debugf("Look for provided certificate to validate %s...", domains)
|
||||
for k := range a.TLSConfig.NameToCertificate {
|
||||
selector := "^" + strings.Replace(k, "*.", "[^\\.]*\\.?", -1) + "$"
|
||||
for _, domainToCheck := range domains {
|
||||
providedCertMatch, _ = regexp.MatchString(selector, domainToCheck)
|
||||
if !providedCertMatch {
|
||||
break
|
||||
}
|
||||
}
|
||||
if providedCertMatch {
|
||||
log.Debugf("Got provided certificate for domains %s", domains)
|
||||
return a.TLSConfig.NameToCertificate[k]
|
||||
|
||||
}
|
||||
}
|
||||
log.Debugf("No provided certificate found for domains %s, get ACME certificate.", domains)
|
||||
return nil
|
||||
}
|
||||
|
||||
func (a *ACME) getDomainsCertificates(domains []string) (*Certificate, error) {
|
||||
domains = fun.Map(types.CanonicalDomain, domains).([]string)
|
||||
log.Debugf("Loading ACME certificates %s...", domains)
|
||||
|
||||
@@ -1,6 +1,7 @@
|
||||
package acme
|
||||
|
||||
import (
|
||||
"crypto/tls"
|
||||
"encoding/base64"
|
||||
"net/http"
|
||||
"net/http/httptest"
|
||||
@@ -9,6 +10,7 @@ import (
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/xenolf/lego/acme"
|
||||
)
|
||||
|
||||
@@ -277,3 +279,18 @@ cijFkALeQp/qyeXdFld2v9gUN3eCgljgcl0QweRoIc=---`)
|
||||
t.Errorf("No change to acme.PreCheckDNS when meant to be adding enforcing override function.")
|
||||
}
|
||||
}
|
||||
|
||||
func TestAcme_getProvidedCertificate(t *testing.T) {
|
||||
mm := make(map[string]*tls.Certificate)
|
||||
mm["*.containo.us"] = &tls.Certificate{}
|
||||
mm["traefik.acme.io"] = &tls.Certificate{}
|
||||
|
||||
a := ACME{TLSConfig: &tls.Config{NameToCertificate: mm}}
|
||||
|
||||
domains := []string{"traefik.containo.us", "trae.containo.us"}
|
||||
certificate := a.getProvidedCertificate(domains)
|
||||
assert.NotNil(t, certificate)
|
||||
domains = []string{"traefik.acme.io", "trae.acme.io"}
|
||||
certificate = a.getProvidedCertificate(domains)
|
||||
assert.Nil(t, certificate)
|
||||
}
|
||||
|
||||
@@ -15,7 +15,7 @@ RUN go get github.com/jteeuwen/go-bindata/... \
|
||||
&& go get github.com/sgotti/glide-vc
|
||||
|
||||
# Which docker version to test on
|
||||
ARG DOCKER_VERSION=1.10.3
|
||||
ARG DOCKER_VERSION=17.03.1
|
||||
|
||||
|
||||
# Which glide version to test on
|
||||
@@ -28,7 +28,7 @@ RUN mkdir -p /usr/local/bin \
|
||||
|
||||
# Download docker
|
||||
RUN mkdir -p /usr/local/bin \
|
||||
&& curl -fL https://get.docker.com/builds/Linux/x86_64/docker-${DOCKER_VERSION}.tgz \
|
||||
&& curl -fL https://get.docker.com/builds/Linux/x86_64/docker-${DOCKER_VERSION}-ce.tgz \
|
||||
| tar -xzC /usr/local/bin --transform 's#^.+/##x'
|
||||
|
||||
WORKDIR /go/src/github.com/containous/traefik
|
||||
|
||||
14
glide.lock
generated
14
glide.lock
generated
@@ -1,5 +1,5 @@
|
||||
hash: e59e8244152a823cd3633fb09cdd583c4e5be78d7b50fb7047ba6b6a9ed5e8ec
|
||||
updated: 2017-05-19T23:30:19.890844996+02:00
|
||||
hash: bfc5801ed56be5f703a0924d8832dcccc42bf02f9e2b035ef77eab62c0cb4884
|
||||
updated: 2017-06-29T16:47:14.848940186+02:00
|
||||
imports:
|
||||
- name: cloud.google.com/go
|
||||
version: 2e6a95edb1071d750f6d7db777bf66cd2997af6c
|
||||
@@ -178,7 +178,7 @@ imports:
|
||||
- store/etcd
|
||||
- store/zookeeper
|
||||
- name: github.com/donovanhide/eventsource
|
||||
version: fd1de70867126402be23c306e1ce32828455d85b
|
||||
version: 441a03aa37b3329bbb79f43de81914ea18724718
|
||||
- name: github.com/eapache/channels
|
||||
version: 47238d5aae8c0fefd518ef2bee46290909cf8263
|
||||
- name: github.com/eapache/queue
|
||||
@@ -201,7 +201,7 @@ imports:
|
||||
- name: github.com/fatih/color
|
||||
version: 9131ab34cf20d2f6d83fdc67168a5430d1c7dc23
|
||||
- name: github.com/gambol99/go-marathon
|
||||
version: 15ea23e360abb8b25071e677aed344f31838e403
|
||||
version: dd6cbd4c2d71294a19fb89158f2a00d427f174ab
|
||||
- name: github.com/ghodss/yaml
|
||||
version: 73d445a93680fa1a78ae23a5839bad48f32ba1ee
|
||||
- name: github.com/go-ini/ini
|
||||
@@ -320,7 +320,9 @@ imports:
|
||||
- name: github.com/mvdan/xurls
|
||||
version: db96455566f05ffe42bd6ac671f05eeb1152b45d
|
||||
- name: github.com/NYTimes/gziphandler
|
||||
version: 22d4470af89e09998fc16b35029df973932df4ae
|
||||
version: 316adfc72ed3b0157975917adf62ba2dc31842ce
|
||||
repo: https://github.com/containous/gziphandler.git
|
||||
vcs: git
|
||||
- name: github.com/ogier/pflag
|
||||
version: 45c278ab3607870051a2ea9040bb85fcb8557481
|
||||
- name: github.com/opencontainers/runc
|
||||
@@ -409,7 +411,7 @@ imports:
|
||||
- name: github.com/vdemeester/docker-events
|
||||
version: be74d4929ec1ad118df54349fda4b0cba60f849b
|
||||
- name: github.com/vulcand/oxy
|
||||
version: f88530866c561d24a6b5aac49f76d6351b788b9f
|
||||
version: 49f1894c20d972f5c73ff44b859f87deb83f0076
|
||||
repo: https://github.com/containous/oxy.git
|
||||
vcs: git
|
||||
subpackages:
|
||||
|
||||
@@ -8,7 +8,7 @@ import:
|
||||
- package: github.com/cenk/backoff
|
||||
- package: github.com/containous/flaeg
|
||||
- package: github.com/vulcand/oxy
|
||||
version: f88530866c561d24a6b5aac49f76d6351b788b9f
|
||||
version: 49f1894c20d972f5c73ff44b859f87deb83f0076
|
||||
repo: https://github.com/containous/oxy.git
|
||||
vcs: git
|
||||
subpackages:
|
||||
@@ -87,13 +87,15 @@ import:
|
||||
vcs: git
|
||||
- package: github.com/abbot/go-http-auth
|
||||
- package: github.com/NYTimes/gziphandler
|
||||
repo: https://github.com/containous/gziphandler.git
|
||||
vcs: git
|
||||
- package: github.com/docker/leadership
|
||||
- package: github.com/satori/go.uuid
|
||||
version: ^1.1.0
|
||||
- package: k8s.io/client-go
|
||||
version: v2.0.0
|
||||
- package: github.com/gambol99/go-marathon
|
||||
version: d672c6fbb499596869d95146a26e7d0746c06c54
|
||||
version: dd6cbd4c2d71294a19fb89158f2a00d427f174ab
|
||||
- package: github.com/ArthurHlt/go-eureka-client
|
||||
subpackages:
|
||||
- eureka
|
||||
|
||||
@@ -2,32 +2,45 @@ package main
|
||||
|
||||
import (
|
||||
"crypto/tls"
|
||||
"errors"
|
||||
"net/http"
|
||||
"os"
|
||||
"os/exec"
|
||||
"time"
|
||||
|
||||
"github.com/go-check/check"
|
||||
|
||||
"errors"
|
||||
"github.com/containous/traefik/integration/utils"
|
||||
"github.com/go-check/check"
|
||||
checker "github.com/vdemeester/shakers"
|
||||
)
|
||||
|
||||
// ACME test suites (using libcompose)
|
||||
type AcmeSuite struct {
|
||||
BaseSuite
|
||||
boulderIP string
|
||||
}
|
||||
|
||||
// Acme tests configuration
|
||||
type AcmeTestCase struct {
|
||||
onDemand bool
|
||||
traefikConfFilePath string
|
||||
domainToCheck string
|
||||
}
|
||||
|
||||
// Domain to check
|
||||
const acmeDomain = "traefik.acme.wtf"
|
||||
|
||||
// Wildcard domain to chekc
|
||||
const wildcardDomain = "*.acme.wtf"
|
||||
|
||||
func (s *AcmeSuite) SetUpSuite(c *check.C) {
|
||||
s.createComposeProject(c, "boulder")
|
||||
s.composeProject.Start(c)
|
||||
|
||||
boulderHost := s.composeProject.Container(c, "boulder").NetworkSettings.IPAddress
|
||||
s.boulderIP = s.composeProject.Container(c, "boulder").NetworkSettings.IPAddress
|
||||
|
||||
// wait for boulder
|
||||
err := utils.Try(120*time.Second, func() error {
|
||||
resp, err := http.Get("http://" + boulderHost + ":4000/directory")
|
||||
resp, err := http.Get("http://" + s.boulderIP + ":4000/directory")
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@@ -47,9 +60,48 @@ func (s *AcmeSuite) TearDownSuite(c *check.C) {
|
||||
}
|
||||
}
|
||||
|
||||
func (s *AcmeSuite) TestRetrieveAcmeCertificate(c *check.C) {
|
||||
boulderHost := s.composeProject.Container(c, "boulder").NetworkSettings.IPAddress
|
||||
file := s.adaptFile(c, "fixtures/acme/acme.toml", struct{ BoulderHost string }{boulderHost})
|
||||
// Test OnDemand option with none provided certificate
|
||||
func (s *AcmeSuite) TestOnDemandRetrieveAcmeCertificate(c *check.C) {
|
||||
aTestCase := AcmeTestCase{
|
||||
traefikConfFilePath: "fixtures/acme/acme.toml",
|
||||
onDemand: true,
|
||||
domainToCheck: acmeDomain}
|
||||
s.retrieveAcmeCertificate(c, aTestCase)
|
||||
}
|
||||
|
||||
// Test OnHostRule option with none provided certificate
|
||||
func (s *AcmeSuite) TestOnHostRuleRetrieveAcmeCertificate(c *check.C) {
|
||||
aTestCase := AcmeTestCase{
|
||||
traefikConfFilePath: "fixtures/acme/acme.toml",
|
||||
onDemand: false,
|
||||
domainToCheck: acmeDomain}
|
||||
s.retrieveAcmeCertificate(c, aTestCase)
|
||||
}
|
||||
|
||||
// Test OnDemand option with a wildcard provided certificate
|
||||
func (s *AcmeSuite) TestOnDemandRetrieveAcmeCertificateWithWildcard(c *check.C) {
|
||||
aTestCase := AcmeTestCase{
|
||||
traefikConfFilePath: "fixtures/acme/acme_provided.toml",
|
||||
onDemand: true,
|
||||
domainToCheck: wildcardDomain}
|
||||
s.retrieveAcmeCertificate(c, aTestCase)
|
||||
}
|
||||
|
||||
// Test onHostRule option with a wildcard provided certificate
|
||||
func (s *AcmeSuite) TestOnHostRuleRetrieveAcmeCertificateWithWildcard(c *check.C) {
|
||||
aTestCase := AcmeTestCase{
|
||||
traefikConfFilePath: "fixtures/acme/acme_provided.toml",
|
||||
onDemand: false,
|
||||
domainToCheck: wildcardDomain}
|
||||
s.retrieveAcmeCertificate(c, aTestCase)
|
||||
}
|
||||
|
||||
// Doing an HTTPS request and test the response certificate
|
||||
func (s *AcmeSuite) retrieveAcmeCertificate(c *check.C, a AcmeTestCase) {
|
||||
file := s.adaptFile(c, a.traefikConfFilePath, struct {
|
||||
BoulderHost string
|
||||
OnDemand, OnHostRule bool
|
||||
}{s.boulderIP, a.onDemand, !a.onDemand})
|
||||
defer os.Remove(file)
|
||||
cmd := exec.Command(traefikBinary, "--configFile="+file)
|
||||
err := cmd.Start()
|
||||
@@ -77,16 +129,32 @@ func (s *AcmeSuite) TestRetrieveAcmeCertificate(c *check.C) {
|
||||
tr = &http.Transport{
|
||||
TLSClientConfig: &tls.Config{
|
||||
InsecureSkipVerify: true,
|
||||
ServerName: "traefik.acme.wtf",
|
||||
ServerName: acmeDomain,
|
||||
},
|
||||
}
|
||||
client = &http.Client{Transport: tr}
|
||||
req, _ := http.NewRequest("GET", "https://127.0.0.1:5001/", nil)
|
||||
req.Host = "traefik.acme.wtf"
|
||||
req.Header.Set("Host", "traefik.acme.wtf")
|
||||
req.Host = acmeDomain
|
||||
req.Header.Set("Host", acmeDomain)
|
||||
req.Header.Set("Accept", "*/*")
|
||||
resp, err := client.Do(req)
|
||||
|
||||
var resp *http.Response
|
||||
// Retry to send a Request which uses the LE generated certificate
|
||||
err = utils.Try(60*time.Second, func() error {
|
||||
resp, err = client.Do(req)
|
||||
// /!\ If connection is not closed, SSLHandshake will only be done during the first trial /!\
|
||||
req.Close = true
|
||||
if err != nil {
|
||||
return err
|
||||
} else if resp.TLS.PeerCertificates[0].Subject.CommonName != a.domainToCheck {
|
||||
return errors.New("Domain " + resp.TLS.PeerCertificates[0].Subject.CommonName + " found in place of " + a.domainToCheck)
|
||||
}
|
||||
return nil
|
||||
})
|
||||
c.Assert(err, checker.IsNil)
|
||||
// Check Domain into response certificate
|
||||
c.Assert(resp.TLS.PeerCertificates[0].Subject.CommonName, checker.Equals, a.domainToCheck)
|
||||
// Expected a 200
|
||||
c.Assert(resp.StatusCode, checker.Equals, 200)
|
||||
|
||||
}
|
||||
|
||||
37
integration/fixtures/acme/README.md
Normal file
37
integration/fixtures/acme/README.md
Normal file
@@ -0,0 +1,37 @@
|
||||
# How to generate the self-signed wildcard certificate
|
||||
|
||||
```bash
|
||||
#!/usr/bin/env bash
|
||||
|
||||
# Specify where we will install
|
||||
# the wildcard certificate
|
||||
SSL_DIR="./ssl"
|
||||
|
||||
# Set the wildcarded domain
|
||||
# we want to use
|
||||
DOMAIN="*.acme.wtf"
|
||||
|
||||
# A blank passphrase
|
||||
PASSPHRASE=""
|
||||
|
||||
# Set our CSR variables
|
||||
SUBJ="
|
||||
C=FR
|
||||
ST=MP
|
||||
O=
|
||||
localityName=Toulouse
|
||||
commonName=$DOMAIN
|
||||
organizationalUnitName=Traefik
|
||||
emailAddress=
|
||||
"
|
||||
|
||||
# Create our SSL directory
|
||||
# in case it doesn't exist
|
||||
sudo mkdir -p "$SSL_DIR"
|
||||
|
||||
# Generate our Private Key, CSR and Certificate
|
||||
sudo openssl genrsa -out "$SSL_DIR/wildcard.key" 2048
|
||||
sudo openssl req -new -subj "$(echo -n "$SUBJ" | tr "\n" "/")" -key "$SSL_DIR/wildcard.key" -out "$SSL_DIR/wildcard.csr" -passin pass:$PASSPHRASE
|
||||
sudo openssl x509 -req -days 3650 -in "$SSL_DIR/wildcard.csr" -signkey "$SSL_DIR/wildcard.key" -out "$SSL_DIR/wildcard.crt"
|
||||
sudo rm -f "$SSL_DIR/wildcard.csr"
|
||||
```
|
||||
@@ -14,7 +14,8 @@ defaultEntryPoints = ["http", "https"]
|
||||
email = "test@traefik.io"
|
||||
storage = "/dev/null"
|
||||
entryPoint = "https"
|
||||
onDemand = true
|
||||
onDemand = {{.OnDemand}}
|
||||
OnHostRule = {{.OnHostRule}}
|
||||
caServer = "http://{{.BoulderHost}}:4000/directory"
|
||||
|
||||
[file]
|
||||
|
||||
35
integration/fixtures/acme/acme_provided.toml
Normal file
35
integration/fixtures/acme/acme_provided.toml
Normal file
@@ -0,0 +1,35 @@
|
||||
logLevel = "DEBUG"
|
||||
|
||||
defaultEntryPoints = ["http", "https"]
|
||||
|
||||
[entryPoints]
|
||||
[entryPoints.http]
|
||||
address = ":8080"
|
||||
[entryPoints.https]
|
||||
address = ":5001"
|
||||
[entryPoints.https.tls]
|
||||
[[entryPoints.https.tls.certificates]]
|
||||
CertFile = "fixtures/acme/ssl/wildcard.crt"
|
||||
KeyFile = "fixtures/acme/ssl/wildcard.key"
|
||||
|
||||
[acme]
|
||||
email = "test@traefik.io"
|
||||
storage = "/dev/null"
|
||||
entryPoint = "https"
|
||||
onDemand = {{.OnDemand}}
|
||||
OnHostRule = {{.OnHostRule}}
|
||||
caServer = "http://{{.BoulderHost}}:4000/directory"
|
||||
|
||||
[file]
|
||||
|
||||
[backends]
|
||||
[backends.backend]
|
||||
[backends.backend.servers.server1]
|
||||
url = "http://127.0.0.1:9010"
|
||||
|
||||
|
||||
[frontends]
|
||||
[frontends.frontend]
|
||||
backend = "backend"
|
||||
[frontends.frontend.routes.test]
|
||||
rule = "Host:traefik.acme.wtf"
|
||||
19
integration/fixtures/acme/ssl/wildcard.crt
Normal file
19
integration/fixtures/acme/ssl/wildcard.crt
Normal file
@@ -0,0 +1,19 @@
|
||||
-----BEGIN CERTIFICATE-----
|
||||
MIIDJDCCAgwCCQCS90TE7NuTqzANBgkqhkiG9w0BAQsFADBUMQswCQYDVQQGEwJG
|
||||
UjELMAkGA1UECAwCTVAxETAPBgNVBAcMCFRvdWxvdXNlMRMwEQYDVQQDDAoqLmFj
|
||||
bWUud3RmMRAwDgYDVQQLDAdUcmFlZmlrMB4XDTE3MDYyMzE0NTE0MVoXDTI3MDYy
|
||||
MTE0NTE0MVowVDELMAkGA1UEBhMCRlIxCzAJBgNVBAgMAk1QMREwDwYDVQQHDAhU
|
||||
b3Vsb3VzZTETMBEGA1UEAwwKKi5hY21lLnd0ZjEQMA4GA1UECwwHVHJhZWZpazCC
|
||||
ASIwDQYJKoZIhvcNAQEBBQADggEPADCCAQoCggEBAODqsVCLhauFZPhPXqZDIKST
|
||||
wqoJST+jO5O/WmA7oC4S6JlecRoNsHAXyddd3cQW3yZqB0ryOHrMOpMX0PPXf3jS
|
||||
OOXoXA6xsq+RXlR4hDrBkOrj/LR/g62Eiuj2JVO2uy6tKJIetSB/Wzl6OgRkY/um
|
||||
EXIc7zQS81/QKg+pg7Z4AYJht5J88nOFHJ3RspUMaH1vJ6LhH3MOUkgFj+I1OiqX
|
||||
Tnkd7EDWbkYxAJa0xI2qbmY5VYv8dsIUN+IlPFDtBt87Fc2qv5dQkOz11FDYxWnz
|
||||
+kxX6+MESLBaTvJjXvG+bzTfh9xCExFQFiN+Us0JuLX8HKQ4MqWL2IiVLsko2osC
|
||||
AwEAATANBgkqhkiG9w0BAQsFAAOCAQEAl2jTX2yzUpiufrJ6WtZjKIAH8GF817hS
|
||||
dWvt2eyLrBPvllMUj8zqCE5uNVUDVuXQvOhOyx+3zZzfcgfYqbTD8G8amNWcSiRA
|
||||
vonoOn1p1pW2OonSi32h3qv5i4gCyh/6cBneYi03lkQ7uLCsJK9+dXTAvoKL6s23
|
||||
IXhZGS0Qkvs4vkORA2MX9tyJdyfCCaCx3GpPCGkKrKJ8ePTEvq1ZE2xdhERnV5pz
|
||||
L1PRY2QthXXVjMz7AXw0gkHvAbtrKVKR1Tv4ZK34bFBh/kyGAjkcn0zdeFKITqTF
|
||||
tCoXWEArmiRqGuXwbqU3mEA9Cv6aMM+0YX89K2InhOnBU80OWs0uMQ==
|
||||
-----END CERTIFICATE-----
|
||||
27
integration/fixtures/acme/ssl/wildcard.key
Normal file
27
integration/fixtures/acme/ssl/wildcard.key
Normal file
@@ -0,0 +1,27 @@
|
||||
-----BEGIN RSA PRIVATE KEY-----
|
||||
MIIEpAIBAAKCAQEA4OqxUIuFq4Vk+E9epkMgpJPCqglJP6M7k79aYDugLhLomV5x
|
||||
Gg2wcBfJ113dxBbfJmoHSvI4esw6kxfQ89d/eNI45ehcDrGyr5FeVHiEOsGQ6uP8
|
||||
tH+DrYSK6PYlU7a7Lq0okh61IH9bOXo6BGRj+6YRchzvNBLzX9AqD6mDtngBgmG3
|
||||
knzyc4UcndGylQxofW8nouEfcw5SSAWP4jU6KpdOeR3sQNZuRjEAlrTEjapuZjlV
|
||||
i/x2whQ34iU8UO0G3zsVzaq/l1CQ7PXUUNjFafP6TFfr4wRIsFpO8mNe8b5vNN+H
|
||||
3EITEVAWI35SzQm4tfwcpDgypYvYiJUuySjaiwIDAQABAoIBAQCs9Ex9v4x+pQlL
|
||||
2NzTxXLom6dp0dI92WwK5W696Zv3UhsDNRiMDFLNH73amxfZnizjAU2yWCkOZNX2
|
||||
Hq5TlDc11ZJjWRbRRdw+He8HzdUAybCCr+a3dgbv+6hGFGIHydCOyCEWm/50ivq/
|
||||
bDoI/pnT/ZQUyCM5TAlSeGSfvp7GRHi9v3HOl85H1Pn2Dvyk9gj4y3BIFrKuv8fJ
|
||||
o6aEzlfgWGROCzshU2m8fB9P0B4hWDlJsc1D01sW60zhjLo9+XoWznmw5mczz7sc
|
||||
S5sdDh47rSJsNRuFd7YDjeLzJWPqLrKVB5nn6nRbvrnBqhfsknkO4VIXhmEMSs1u
|
||||
RMYOJ9ShAoGBAPinA6ktIeez1t5IsfxGwbCeZzFI1suZqZeX6ezNKaMpeykyAPuh
|
||||
CqN7H+a4NCKsinsgHJowU98ckHeAsQ22s7R8dFZhyxEXkcBawY2soK29eq2aJHnY
|
||||
lqKOwjOA7wgElRHwLkNFniQ5lKFPMly8a9NVAqg+Th/J3uR+7wE2t+b1AoGBAOeQ
|
||||
H/vVkdaNB2ovnCxMh+OfxpcjkfF6KnD2jpn/TKsbR5BtnrtyRLc5+qt52D0CEgSy
|
||||
qU3zrsZebShej3OIBPrEwIcPN+LezaxnLMf9RXdOde+wWrQLWLkShJaSTwSoGqZB
|
||||
fcO0/sc1lzhGxm++ByP5mWbHr/VM9IdTQQH5Bct/AoGBAMhmOrIXeNL4Az2FU0Vi
|
||||
dWp2T+7NqKfRAXj264Z5V4xzuxpZfadPhHZ7nhth7Erhyn4vRD4UoxQXPmvB4XCP
|
||||
Bkh5YX3ZNUNiPorL2mDnd1xvcLcHm0xEfisnaWb/DCbnIomhjHeVXT4O1jYn0Qwi
|
||||
o7hgNFMKXAaMuUJo9xGAWzkdAoGASxC4nY2tOiz7k1udt+qTPqHj4cjhHbOpoHb8
|
||||
4UUWmH0+ZL50b3Vqey8raH0WMSjDqIw2QBPXu2yO3EBTJnOYkaZIdz/isQPjDplf
|
||||
tfEPnM5tgubbcHQhLdWn75u8S9km0nB2kYPR98gSnmarGzwx2mKmbOAc1Vs+BcRi
|
||||
VX5hd4cCgYAubBq0VsFT0KVU3Rva3dgPR1K5bp4r4hE5cGXm4HvLiOgv995CwPy1
|
||||
27eONF9GN7hvjI6C17jA1Gyx5sN0QrsMv/1BZqiGaragMOPXFD+tVecWuKH4lZQi
|
||||
VbKTOWHlGkrDCpiYWpfetQAjouj+0c6d+wigcoC8e5dwxBPI2f3rGw==
|
||||
-----END RSA PRIVATE KEY-----
|
||||
24
integration/fixtures/websocket/config.toml
Normal file
24
integration/fixtures/websocket/config.toml
Normal file
@@ -0,0 +1,24 @@
|
||||
defaultEntryPoints = ["http"]
|
||||
|
||||
logLevel = "DEBUG"
|
||||
|
||||
[entryPoints]
|
||||
[entryPoints.http]
|
||||
address = ":8000"
|
||||
|
||||
|
||||
[web]
|
||||
address = ":8080"
|
||||
|
||||
[file]
|
||||
|
||||
[backends]
|
||||
[backends.backend1]
|
||||
[backends.backend1.servers.server1]
|
||||
url = "{{ .WebsocketServer }}"
|
||||
|
||||
[frontends]
|
||||
[frontends.frontend1]
|
||||
backend = "backend1"
|
||||
[frontends.frontend1.routes.test_1]
|
||||
rule = "Path:/ws"
|
||||
@@ -14,6 +14,8 @@ import (
|
||||
"github.com/containous/traefik/integration/utils"
|
||||
"github.com/go-check/check"
|
||||
|
||||
"bytes"
|
||||
|
||||
compose "github.com/libkermit/compose/check"
|
||||
checker "github.com/vdemeester/shakers"
|
||||
)
|
||||
@@ -38,6 +40,7 @@ func init() {
|
||||
check.Suite(&EurekaSuite{})
|
||||
check.Suite(&AcmeSuite{})
|
||||
check.Suite(&DynamoDBSuite{})
|
||||
check.Suite(&WebsocketSuite{})
|
||||
}
|
||||
|
||||
var traefikBinary = "../dist/traefik"
|
||||
@@ -71,6 +74,18 @@ func (s *BaseSuite) createComposeProject(c *check.C, name string) {
|
||||
s.composeProject = compose.CreateProject(c, projectName, composeFile)
|
||||
}
|
||||
|
||||
func withConfigFile(file string) string {
|
||||
return "--configFile=" + file
|
||||
}
|
||||
|
||||
func (s *BaseSuite) cmdTraefik(args ...string) (*exec.Cmd, *bytes.Buffer) {
|
||||
cmd := exec.Command(traefikBinary, args...)
|
||||
var out bytes.Buffer
|
||||
cmd.Stdout = &out
|
||||
cmd.Stderr = &out
|
||||
return cmd, &out
|
||||
}
|
||||
|
||||
func (s *BaseSuite) traefikCmd(c *check.C, args ...string) (*exec.Cmd, string) {
|
||||
cmd, out, err := utils.RunCommand(traefikBinary, args...)
|
||||
c.Assert(err, checker.IsNil, check.Commentf("Fail to run %s with %v", traefikBinary, args))
|
||||
|
||||
81
integration/websocket_test.go
Normal file
81
integration/websocket_test.go
Normal file
@@ -0,0 +1,81 @@
|
||||
package main
|
||||
|
||||
import (
|
||||
"net/http"
|
||||
"net/http/httptest"
|
||||
"time"
|
||||
|
||||
"github.com/go-check/check"
|
||||
|
||||
"errors"
|
||||
"io/ioutil"
|
||||
"os"
|
||||
"strings"
|
||||
|
||||
"github.com/containous/traefik/integration/utils"
|
||||
"github.com/gorilla/websocket"
|
||||
checker "github.com/vdemeester/shakers"
|
||||
)
|
||||
|
||||
// WebsocketSuite
|
||||
type WebsocketSuite struct{ BaseSuite }
|
||||
|
||||
func (suite *WebsocketSuite) TestBase(c *check.C) {
|
||||
var upgrader = websocket.Upgrader{} // use default options
|
||||
|
||||
srv := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
|
||||
c, err := upgrader.Upgrade(w, r, nil)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
defer c.Close()
|
||||
for {
|
||||
mt, message, err := c.ReadMessage()
|
||||
if err != nil {
|
||||
break
|
||||
}
|
||||
err = c.WriteMessage(mt, message)
|
||||
if err != nil {
|
||||
break
|
||||
}
|
||||
}
|
||||
}))
|
||||
|
||||
file := suite.adaptFile(c, "fixtures/websocket/config.toml", struct {
|
||||
WebsocketServer string
|
||||
}{
|
||||
WebsocketServer: srv.URL,
|
||||
})
|
||||
|
||||
defer os.Remove(file)
|
||||
cmd, _ := suite.cmdTraefik(withConfigFile(file), "--debug")
|
||||
|
||||
err := cmd.Start()
|
||||
|
||||
c.Assert(err, check.IsNil)
|
||||
defer cmd.Process.Kill()
|
||||
|
||||
// wait for traefik
|
||||
err = utils.TryRequest("http://127.0.0.1:8080/api/providers", 60*time.Second, func(res *http.Response) error {
|
||||
body, err := ioutil.ReadAll(res.Body)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if !strings.Contains(string(body), "127.0.0.1") {
|
||||
return errors.New("Incorrect traefik config")
|
||||
}
|
||||
return nil
|
||||
})
|
||||
c.Assert(err, checker.IsNil)
|
||||
|
||||
conn, _, err := websocket.DefaultDialer.Dial("ws://127.0.0.1:8000/ws", nil)
|
||||
|
||||
c.Assert(err, checker.IsNil)
|
||||
conn.WriteMessage(websocket.TextMessage, []byte("OK"))
|
||||
|
||||
_, msg, err := conn.ReadMessage()
|
||||
c.Assert(err, checker.IsNil)
|
||||
|
||||
c.Assert(string(msg), checker.Equals, "OK")
|
||||
|
||||
}
|
||||
@@ -1,13 +1,11 @@
|
||||
package middlewares
|
||||
|
||||
import (
|
||||
"compress/gzip"
|
||||
"net/http"
|
||||
|
||||
"github.com/NYTimes/gziphandler"
|
||||
)
|
||||
|
||||
const (
|
||||
contentEncodingHeader = "Content-Encoding"
|
||||
"github.com/containous/traefik/log"
|
||||
)
|
||||
|
||||
// Compress is a middleware that allows redirection
|
||||
@@ -15,17 +13,13 @@ type Compress struct{}
|
||||
|
||||
// ServerHTTP is a function used by Negroni
|
||||
func (c *Compress) ServeHTTP(rw http.ResponseWriter, r *http.Request, next http.HandlerFunc) {
|
||||
if isEncoded(r.Header) {
|
||||
next.ServeHTTP(rw, r)
|
||||
} else {
|
||||
newGzipHandler := gziphandler.GzipHandler(next)
|
||||
newGzipHandler.ServeHTTP(rw, r)
|
||||
}
|
||||
gzipHandler(next).ServeHTTP(rw, r)
|
||||
}
|
||||
|
||||
func isEncoded(headers http.Header) bool {
|
||||
header := headers.Get(contentEncodingHeader)
|
||||
// According to https://developer.mozilla.org/en-US/docs/Web/HTTP/Headers/Content-Encoding,
|
||||
// content is not encoded if the header 'Content-Encoding' is empty or equals to 'identity'.
|
||||
return header != "" && header != "identity"
|
||||
func gzipHandler(h http.Handler) http.Handler {
|
||||
wrapper, err := gziphandler.NewGzipHandler(gzip.DefaultCompression, gziphandler.DefaultMinSize, &gziphandler.GzipResponseWriterWrapper{})
|
||||
if err != nil {
|
||||
log.Error(err)
|
||||
}
|
||||
return wrapper(h)
|
||||
}
|
||||
|
||||
@@ -1,36 +1,39 @@
|
||||
package middlewares
|
||||
|
||||
import (
|
||||
"io/ioutil"
|
||||
"net/http"
|
||||
"net/http/httptest"
|
||||
"testing"
|
||||
|
||||
"github.com/NYTimes/gziphandler"
|
||||
"github.com/codegangsta/negroni"
|
||||
"github.com/containous/traefik/testhelpers"
|
||||
"github.com/stretchr/testify/assert"
|
||||
)
|
||||
|
||||
const (
|
||||
acceptEncodingHeader = "Accept-Encoding"
|
||||
varyHeader = "Vary"
|
||||
gzip = "gzip"
|
||||
acceptEncodingHeader = "Accept-Encoding"
|
||||
contentEncodingHeader = "Content-Encoding"
|
||||
varyHeader = "Vary"
|
||||
gzipValue = "gzip"
|
||||
)
|
||||
|
||||
func TestShouldCompressWhenNoContentEncodingHeader(t *testing.T) {
|
||||
handler := &Compress{}
|
||||
|
||||
req := testhelpers.MustNewRequest(http.MethodGet, "http://localhost", nil)
|
||||
req.Header.Add(acceptEncodingHeader, gzip)
|
||||
req.Header.Add(acceptEncodingHeader, gzipValue)
|
||||
|
||||
baseBody := generateBytes(gziphandler.DefaultMinSize)
|
||||
next := func(rw http.ResponseWriter, r *http.Request) {
|
||||
rw.Write(baseBody)
|
||||
}
|
||||
rw := httptest.NewRecorder()
|
||||
|
||||
rw := httptest.NewRecorder()
|
||||
handler.ServeHTTP(rw, req, next)
|
||||
|
||||
assert.Equal(t, gzip, rw.Header().Get(contentEncodingHeader))
|
||||
assert.Equal(t, gzipValue, rw.Header().Get(contentEncodingHeader))
|
||||
assert.Equal(t, acceptEncodingHeader, rw.Header().Get(varyHeader))
|
||||
|
||||
if assert.ObjectsAreEqualValues(rw.Body.Bytes(), baseBody) {
|
||||
@@ -42,28 +45,105 @@ func TestShouldNotCompressWhenContentEncodingHeader(t *testing.T) {
|
||||
handler := &Compress{}
|
||||
|
||||
req := testhelpers.MustNewRequest(http.MethodGet, "http://localhost", nil)
|
||||
req.Header.Add(acceptEncodingHeader, gzip)
|
||||
req.Header.Add(contentEncodingHeader, gzip)
|
||||
|
||||
baseBody := generateBytes(gziphandler.DefaultMinSize)
|
||||
req.Header.Add(acceptEncodingHeader, gzipValue)
|
||||
|
||||
fakeCompressedBody := generateBytes(gziphandler.DefaultMinSize)
|
||||
next := func(rw http.ResponseWriter, r *http.Request) {
|
||||
rw.Write(baseBody)
|
||||
rw.Header().Add(contentEncodingHeader, gzipValue)
|
||||
rw.Header().Add(varyHeader, acceptEncodingHeader)
|
||||
rw.Write(fakeCompressedBody)
|
||||
}
|
||||
|
||||
rw := httptest.NewRecorder()
|
||||
handler.ServeHTTP(rw, req, next)
|
||||
|
||||
assert.Equal(t, "", rw.Header().Get(contentEncodingHeader))
|
||||
assert.Equal(t, "", rw.Header().Get(varyHeader))
|
||||
assert.Equal(t, gzipValue, rw.Header().Get(contentEncodingHeader))
|
||||
assert.Equal(t, acceptEncodingHeader, rw.Header().Get(varyHeader))
|
||||
|
||||
assert.EqualValues(t, rw.Body.Bytes(), baseBody)
|
||||
assert.EqualValues(t, rw.Body.Bytes(), fakeCompressedBody)
|
||||
}
|
||||
|
||||
func TestShouldNotCompressWhenNoAcceptEncodingHeader(t *testing.T) {
|
||||
handler := &Compress{}
|
||||
|
||||
req := testhelpers.MustNewRequest(http.MethodGet, "http://localhost", nil)
|
||||
|
||||
fakeBody := generateBytes(gziphandler.DefaultMinSize)
|
||||
next := func(rw http.ResponseWriter, r *http.Request) {
|
||||
rw.Write(fakeBody)
|
||||
}
|
||||
|
||||
rw := httptest.NewRecorder()
|
||||
handler.ServeHTTP(rw, req, next)
|
||||
|
||||
assert.Empty(t, rw.Header().Get(contentEncodingHeader))
|
||||
assert.EqualValues(t, rw.Body.Bytes(), fakeBody)
|
||||
}
|
||||
|
||||
func TestIntegrationShouldNotCompressWhenContentAlreadyCompressed(t *testing.T) {
|
||||
fakeCompressedBody := generateBytes(100000)
|
||||
|
||||
handler := func(rw http.ResponseWriter, r *http.Request) {
|
||||
rw.Header().Add(contentEncodingHeader, gzipValue)
|
||||
rw.Header().Add(varyHeader, acceptEncodingHeader)
|
||||
rw.Write(fakeCompressedBody)
|
||||
}
|
||||
|
||||
comp := &Compress{}
|
||||
|
||||
negro := negroni.New(comp)
|
||||
negro.UseHandlerFunc(handler)
|
||||
ts := httptest.NewServer(negro)
|
||||
defer ts.Close()
|
||||
|
||||
client := &http.Client{}
|
||||
req := testhelpers.MustNewRequest(http.MethodGet, ts.URL, nil)
|
||||
req.Header.Add(acceptEncodingHeader, gzipValue)
|
||||
|
||||
resp, err := client.Do(req)
|
||||
assert.NoError(t, err, "there should be no error")
|
||||
|
||||
assert.Equal(t, gzipValue, resp.Header.Get(contentEncodingHeader))
|
||||
assert.Equal(t, acceptEncodingHeader, resp.Header.Get(varyHeader))
|
||||
|
||||
body, err := ioutil.ReadAll(resp.Body)
|
||||
assert.EqualValues(t, fakeCompressedBody, body)
|
||||
}
|
||||
|
||||
func TestIntegrationShouldCompressWhenAcceptEncodingHeaderIsPresent(t *testing.T) {
|
||||
fakeBody := generateBytes(100000)
|
||||
|
||||
handler := func(rw http.ResponseWriter, r *http.Request) {
|
||||
rw.Write(fakeBody)
|
||||
}
|
||||
|
||||
comp := &Compress{}
|
||||
|
||||
negro := negroni.New(comp)
|
||||
negro.UseHandlerFunc(handler)
|
||||
ts := httptest.NewServer(negro)
|
||||
defer ts.Close()
|
||||
|
||||
client := &http.Client{}
|
||||
req := testhelpers.MustNewRequest(http.MethodGet, ts.URL, nil)
|
||||
req.Header.Add(acceptEncodingHeader, gzipValue)
|
||||
|
||||
resp, err := client.Do(req)
|
||||
assert.NoError(t, err, "there should be no error")
|
||||
|
||||
assert.Equal(t, gzipValue, resp.Header.Get(contentEncodingHeader))
|
||||
assert.Equal(t, acceptEncodingHeader, resp.Header.Get(varyHeader))
|
||||
|
||||
body, err := ioutil.ReadAll(resp.Body)
|
||||
if assert.ObjectsAreEqualValues(body, fakeBody) {
|
||||
assert.Fail(t, "expected a compressed body", "got %v", body)
|
||||
}
|
||||
}
|
||||
|
||||
func generateBytes(len int) []byte {
|
||||
var value []byte
|
||||
for i := 0; i < len; i++ {
|
||||
value = append(value, 0x61)
|
||||
value = append(value, 0x61+byte(i))
|
||||
}
|
||||
return value
|
||||
}
|
||||
|
||||
@@ -16,5 +16,6 @@ const ReplacedPathHeader = "X-Replaced-Path"
|
||||
func (s *ReplacePath) ServeHTTP(w http.ResponseWriter, r *http.Request) {
|
||||
r.Header.Add(ReplacedPathHeader, r.URL.Path)
|
||||
r.URL.Path = s.Path
|
||||
r.RequestURI = r.URL.RequestURI()
|
||||
s.Handler.ServeHTTP(w, r)
|
||||
}
|
||||
|
||||
@@ -1,10 +1,11 @@
|
||||
package middlewares_test
|
||||
package middlewares
|
||||
|
||||
import (
|
||||
"net/http"
|
||||
"testing"
|
||||
|
||||
"github.com/containous/traefik/middlewares"
|
||||
"github.com/containous/traefik/testhelpers"
|
||||
"github.com/stretchr/testify/assert"
|
||||
)
|
||||
|
||||
func TestReplacePath(t *testing.T) {
|
||||
@@ -17,28 +18,24 @@ func TestReplacePath(t *testing.T) {
|
||||
|
||||
for _, path := range paths {
|
||||
t.Run(path, func(t *testing.T) {
|
||||
var newPath, oldPath string
|
||||
handler := &middlewares.ReplacePath{
|
||||
|
||||
var expectedPath, actualHeader, requestURI string
|
||||
handler := &ReplacePath{
|
||||
Path: replacementPath,
|
||||
Handler: http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
|
||||
newPath = r.URL.Path
|
||||
oldPath = r.Header.Get("X-Replaced-Path")
|
||||
expectedPath = r.URL.Path
|
||||
actualHeader = r.Header.Get(ReplacedPathHeader)
|
||||
requestURI = r.RequestURI
|
||||
}),
|
||||
}
|
||||
|
||||
req, err := http.NewRequest("GET", "http://localhost"+path, nil)
|
||||
if err != nil {
|
||||
t.Error(err)
|
||||
}
|
||||
req := testhelpers.MustNewRequest(http.MethodGet, "http://localhost"+path, nil)
|
||||
|
||||
handler.ServeHTTP(nil, req)
|
||||
if newPath != replacementPath {
|
||||
t.Fatalf("new path should be '%s'", replacementPath)
|
||||
}
|
||||
|
||||
if oldPath != path {
|
||||
t.Fatalf("old path should be '%s'", path)
|
||||
}
|
||||
assert.Equal(t, expectedPath, replacementPath, "Unexpected path.")
|
||||
assert.Equal(t, path, actualHeader, "Unexpected '%s' header.", ReplacedPathHeader)
|
||||
assert.Equal(t, expectedPath, requestURI, "Unexpected request URI.")
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
@@ -4,6 +4,7 @@ import (
|
||||
"bufio"
|
||||
"bytes"
|
||||
"errors"
|
||||
"flag"
|
||||
"fmt"
|
||||
"os"
|
||||
"reflect"
|
||||
@@ -62,6 +63,12 @@ func (p *Provider) newK8sClient() (Client, error) {
|
||||
// Provide allows the k8s provider to provide configurations to traefik
|
||||
// using the given configuration channel.
|
||||
func (p *Provider) Provide(configurationChan chan<- types.ConfigMessage, pool *safe.Pool, constraints types.Constraints) error {
|
||||
// Tell glog (used by client-go) to log into STDERR. Otherwise, we risk
|
||||
// certain kinds of API errors getting logged into a directory not
|
||||
// available in a `FROM scratch` Docker container, causing glog to abort
|
||||
// hard with an exit code > 0.
|
||||
flag.Set("logtostderr", "true")
|
||||
|
||||
k8sClient, err := p.newK8sClient()
|
||||
if err != nil {
|
||||
return err
|
||||
@@ -110,11 +117,11 @@ func (p *Provider) Provide(configurationChan chan<- types.ConfigMessage, pool *s
|
||||
}
|
||||
|
||||
notify := func(err error, time time.Duration) {
|
||||
log.Errorf("Provider connection error %+v, retrying in %s", err, time)
|
||||
log.Errorf("Provider connection error: %s; retrying in %s", err, time)
|
||||
}
|
||||
err := backoff.RetryNotify(safe.OperationWithRecover(operation), job.NewBackOff(backoff.NewExponentialBackOff()), notify)
|
||||
if err != nil {
|
||||
log.Errorf("Cannot connect to Provider server %+v", err)
|
||||
log.Errorf("Cannot connect to Provider: %s", err)
|
||||
}
|
||||
})
|
||||
|
||||
@@ -171,7 +178,8 @@ func (p *Provider) loadIngresses(k8sClient Client) (*types.Configuration, error)
|
||||
if _, exists := templateObjects.Frontends[r.Host+pa.Path]; !exists {
|
||||
basicAuthCreds, err := handleBasicAuthConfig(i, k8sClient)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
log.Errorf("Failed to retrieve basic auth configuration for ingress %s/%s: %s", i.ObjectMeta.Namespace, i.ObjectMeta.Name, err)
|
||||
continue
|
||||
}
|
||||
templateObjects.Frontends[r.Host+pa.Path] = &types.Frontend{
|
||||
Backend: r.Host + pa.Path,
|
||||
@@ -290,18 +298,15 @@ func handleBasicAuthConfig(i *v1beta1.Ingress, k8sClient Client) ([]string, erro
|
||||
return nil, nil
|
||||
}
|
||||
if strings.ToLower(authType) != "basic" {
|
||||
return nil, fmt.Errorf("unsupported auth-type: %q", authType)
|
||||
return nil, fmt.Errorf("unsupported auth-type on annotation ingress.kubernetes.io/auth-type: %q", authType)
|
||||
}
|
||||
authSecret := i.Annotations["ingress.kubernetes.io/auth-secret"]
|
||||
if authSecret == "" {
|
||||
return nil, errors.New("auth-secret annotation must be set")
|
||||
return nil, errors.New("auth-secret annotation ingress.kubernetes.io/auth-secret must be set")
|
||||
}
|
||||
basicAuthCreds, err := loadAuthCredentials(i.Namespace, authSecret, k8sClient)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if len(basicAuthCreds) == 0 {
|
||||
return nil, errors.New("secret file without credentials")
|
||||
return nil, fmt.Errorf("failed to load auth credentials: %s", err)
|
||||
}
|
||||
return basicAuthCreds, nil
|
||||
}
|
||||
@@ -314,9 +319,9 @@ func loadAuthCredentials(namespace, secretName string, k8sClient Client) ([]stri
|
||||
case !ok:
|
||||
return nil, fmt.Errorf("secret %q/%q not found", namespace, secretName)
|
||||
case secret == nil:
|
||||
return nil, errors.New("secret data must not be nil")
|
||||
return nil, fmt.Errorf("data for secret %q/%q must not be nil", namespace, secretName)
|
||||
case len(secret.Data) != 1:
|
||||
return nil, errors.New("secret must contain single element only")
|
||||
return nil, fmt.Errorf("found %d elements for secret %q/%q, must be single element exactly", len(secret.Data), namespace, secretName)
|
||||
default:
|
||||
}
|
||||
var firstSecret []byte
|
||||
@@ -331,6 +336,10 @@ func loadAuthCredentials(namespace, secretName string, k8sClient Client) ([]stri
|
||||
creds = append(creds, cred)
|
||||
}
|
||||
}
|
||||
if len(creds) == 0 {
|
||||
return nil, fmt.Errorf("secret %q/%q does not contain any credentials", namespace, secretName)
|
||||
}
|
||||
|
||||
return creds, nil
|
||||
}
|
||||
|
||||
|
||||
@@ -10,7 +10,7 @@ fi
|
||||
|
||||
# create docker image containous/traefik
|
||||
echo "Updating docker containous/traefik image..."
|
||||
docker login -e $DOCKER_EMAIL -u $DOCKER_USER -p $DOCKER_PASS
|
||||
docker login -u $DOCKER_USER -p $DOCKER_PASS
|
||||
docker tag containous/traefik containous/traefik:${TRAVIS_COMMIT}
|
||||
docker push containous/traefik:${TRAVIS_COMMIT}
|
||||
docker tag containous/traefik containous/traefik:experimental
|
||||
|
||||
@@ -30,7 +30,7 @@ git push -q --follow-tags -u origin master > /dev/null 2>&1
|
||||
|
||||
# create docker image emilevauge/traefik (compatibility)
|
||||
echo "Updating docker emilevauge/traefik image..."
|
||||
docker login -e $DOCKER_EMAIL -u $DOCKER_USER -p $DOCKER_PASS
|
||||
docker login -u $DOCKER_USER -p $DOCKER_PASS
|
||||
docker tag containous/traefik emilevauge/traefik:latest
|
||||
docker push emilevauge/traefik:latest
|
||||
docker tag emilevauge/traefik:latest emilevauge/traefik:${VERSION}
|
||||
|
||||
59
vendor/github.com/NYTimes/gziphandler/gzip.go
generated
vendored
59
vendor/github.com/NYTimes/gziphandler/gzip.go
generated
vendored
@@ -3,6 +3,7 @@ package gziphandler
|
||||
import (
|
||||
"bufio"
|
||||
"compress/gzip"
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"net"
|
||||
@@ -97,6 +98,7 @@ func (w *GzipResponseWriter) Write(b []byte) (int, error) {
|
||||
}
|
||||
|
||||
// Save the write into a buffer for later use in GZIP responseWriter (if content is long enough) or at close with regular responseWriter.
|
||||
// On the first write, w.buf changes from nil to a valid slice
|
||||
w.buf = append(w.buf, b...)
|
||||
|
||||
// If the global writes are bigger than the minSize, compression is enable.
|
||||
@@ -122,7 +124,9 @@ func (w *GzipResponseWriter) startGzip() error {
|
||||
w.Header().Del(contentLength)
|
||||
|
||||
// Write the header to gzip response.
|
||||
w.writeHeader()
|
||||
if w.code != 0 {
|
||||
w.ResponseWriter.WriteHeader(w.code)
|
||||
}
|
||||
|
||||
// Initialize the GZIP response.
|
||||
w.init()
|
||||
@@ -146,14 +150,6 @@ func (w *GzipResponseWriter) WriteHeader(code int) {
|
||||
w.code = code
|
||||
}
|
||||
|
||||
// writeHeader uses the saved code to send it to the ResponseWriter.
|
||||
func (w *GzipResponseWriter) writeHeader() {
|
||||
if w.code == 0 {
|
||||
w.code = http.StatusOK
|
||||
}
|
||||
w.ResponseWriter.WriteHeader(w.code)
|
||||
}
|
||||
|
||||
// init graps a new gzip writer from the gzipWriterPool and writes the correct
|
||||
// content encoding header.
|
||||
func (w *GzipResponseWriter) init() {
|
||||
@@ -166,19 +162,18 @@ func (w *GzipResponseWriter) init() {
|
||||
|
||||
// Close will close the gzip.Writer and will put it back in the gzipWriterPool.
|
||||
func (w *GzipResponseWriter) Close() error {
|
||||
// Buffer not nil means the regular response must be returned.
|
||||
if w.buf != nil {
|
||||
w.writeHeader()
|
||||
// Make the write into the regular response.
|
||||
_, writeErr := w.ResponseWriter.Write(w.buf)
|
||||
// Returns the error if any at write.
|
||||
if writeErr != nil {
|
||||
return fmt.Errorf("gziphandler: write to regular responseWriter at close gets error: %q", writeErr.Error())
|
||||
}
|
||||
}
|
||||
|
||||
// If the GZIP responseWriter is not set no needs to close it.
|
||||
if w.gw == nil {
|
||||
// Gzip not trigged yet, write out regular response.
|
||||
if w.code != 0 {
|
||||
w.ResponseWriter.WriteHeader(w.code)
|
||||
}
|
||||
if w.buf != nil {
|
||||
_, writeErr := w.ResponseWriter.Write(w.buf)
|
||||
// Returns the error if any at write.
|
||||
if writeErr != nil {
|
||||
return fmt.Errorf("gziphandler: write to regular responseWriter at close gets error: %q", writeErr.Error())
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
@@ -236,12 +231,22 @@ func NewGzipLevelHandler(level int) (func(http.Handler) http.Handler, error) {
|
||||
// NewGzipLevelAndMinSize behave as NewGzipLevelHandler except it let the caller
|
||||
// specify the minimum size before compression.
|
||||
func NewGzipLevelAndMinSize(level, minSize int) (func(http.Handler) http.Handler, error) {
|
||||
return NewGzipHandler(level, minSize, &GzipResponseWriter{})
|
||||
}
|
||||
|
||||
// NewGzipHandler behave as NewGzipLevelHandler except it let the caller
|
||||
// specify the minimum size before compression and a GzipWriter.
|
||||
func NewGzipHandler(level, minSize int, gw GzipWriter) (func(http.Handler) http.Handler, error) {
|
||||
if level != gzip.DefaultCompression && (level < gzip.BestSpeed || level > gzip.BestCompression) {
|
||||
return nil, fmt.Errorf("invalid compression level requested: %d", level)
|
||||
}
|
||||
if minSize < 0 {
|
||||
return nil, fmt.Errorf("minimum size must be more than zero")
|
||||
return nil, errors.New("minimum size must be more than zero")
|
||||
}
|
||||
if gw == nil {
|
||||
return nil, errors.New("the GzipWriter must be defined")
|
||||
}
|
||||
|
||||
return func(h http.Handler) http.Handler {
|
||||
index := poolIndex(level)
|
||||
|
||||
@@ -249,13 +254,9 @@ func NewGzipLevelAndMinSize(level, minSize int) (func(http.Handler) http.Handler
|
||||
w.Header().Add(vary, acceptEncoding)
|
||||
|
||||
if acceptsGzip(r) {
|
||||
gw := &GzipResponseWriter{
|
||||
ResponseWriter: w,
|
||||
index: index,
|
||||
minSize: minSize,
|
||||
|
||||
buf: []byte{},
|
||||
}
|
||||
gw.SetResponseWriter(w)
|
||||
gw.setIndex(index)
|
||||
gw.setMinSize(minSize)
|
||||
defer gw.Close()
|
||||
|
||||
h.ServeHTTP(gw, r)
|
||||
|
||||
58
vendor/github.com/NYTimes/gziphandler/wrapper.go
generated
vendored
Normal file
58
vendor/github.com/NYTimes/gziphandler/wrapper.go
generated
vendored
Normal file
@@ -0,0 +1,58 @@
|
||||
package gziphandler
|
||||
|
||||
import (
|
||||
"bufio"
|
||||
"net"
|
||||
"net/http"
|
||||
)
|
||||
|
||||
const (
|
||||
contentEncodingHeader = "Content-Encoding"
|
||||
)
|
||||
|
||||
// ----------
|
||||
|
||||
// http.ResponseWriter
|
||||
// http.Hijacker
|
||||
type GzipWriter interface {
|
||||
Header() http.Header
|
||||
Write([]byte) (int, error)
|
||||
WriteHeader(int)
|
||||
Hijack() (net.Conn, *bufio.ReadWriter, error)
|
||||
Close() error
|
||||
SetResponseWriter(http.ResponseWriter)
|
||||
setIndex(int)
|
||||
setMinSize(int)
|
||||
}
|
||||
|
||||
func (w *GzipResponseWriter) SetResponseWriter(rw http.ResponseWriter) {
|
||||
w.ResponseWriter = rw
|
||||
}
|
||||
|
||||
func (w *GzipResponseWriter) setIndex(index int) {
|
||||
w.index = index
|
||||
}
|
||||
|
||||
func (w *GzipResponseWriter) setMinSize(minSize int) {
|
||||
w.minSize = minSize
|
||||
}
|
||||
|
||||
// --------
|
||||
|
||||
type GzipResponseWriterWrapper struct {
|
||||
GzipResponseWriter
|
||||
}
|
||||
|
||||
func (g *GzipResponseWriterWrapper) Write(b []byte) (int, error) {
|
||||
if g.gw == nil && isEncoded(g.Header()) {
|
||||
return g.ResponseWriter.Write(b)
|
||||
}
|
||||
return g.GzipResponseWriter.Write(b)
|
||||
}
|
||||
|
||||
func isEncoded(headers http.Header) bool {
|
||||
header := headers.Get(contentEncodingHeader)
|
||||
// According to https://developer.mozilla.org/en-US/docs/Web/HTTP/Headers/Content-Encoding,
|
||||
// content is not encoded if the header 'Content-Encoding' is empty or equals to 'identity'.
|
||||
return header != "" && header != "identity"
|
||||
}
|
||||
22
vendor/github.com/donovanhide/eventsource/server.go
generated
vendored
22
vendor/github.com/donovanhide/eventsource/server.go
generated
vendored
@@ -4,6 +4,7 @@ import (
|
||||
"log"
|
||||
"net/http"
|
||||
"strings"
|
||||
"sync"
|
||||
)
|
||||
|
||||
type subscription struct {
|
||||
@@ -32,6 +33,8 @@ type Server struct {
|
||||
subs chan *subscription
|
||||
unregister chan *subscription
|
||||
quit chan bool
|
||||
isClosed bool
|
||||
isClosedMutex sync.RWMutex
|
||||
}
|
||||
|
||||
// Create a new Server ready for handler creation and publishing events
|
||||
@@ -51,6 +54,7 @@ func NewServer() *Server {
|
||||
// Stop handling publishing
|
||||
func (srv *Server) Close() {
|
||||
srv.quit <- true
|
||||
srv.markServerClosed()
|
||||
}
|
||||
|
||||
// Create a new handler for serving a specified channel
|
||||
@@ -69,6 +73,12 @@ func (srv *Server) Handler(channel string) http.HandlerFunc {
|
||||
}
|
||||
w.WriteHeader(http.StatusOK)
|
||||
|
||||
// If the Handler is still active even though the server is closed, stop here.
|
||||
// Otherwise the Handler will block while publishing to srv.subs indefinitely.
|
||||
if srv.isServerClosed() {
|
||||
return
|
||||
}
|
||||
|
||||
sub := &subscription{
|
||||
channel: channel,
|
||||
lastEventId: req.Header.Get("Last-Event-ID"),
|
||||
@@ -165,3 +175,15 @@ func (srv *Server) run() {
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (srv *Server) isServerClosed() bool {
|
||||
srv.isClosedMutex.RLock()
|
||||
defer srv.isClosedMutex.RUnlock()
|
||||
return srv.isClosed
|
||||
}
|
||||
|
||||
func (srv *Server) markServerClosed() {
|
||||
srv.isClosedMutex.Lock()
|
||||
defer srv.isClosedMutex.Unlock()
|
||||
srv.isClosed = true
|
||||
}
|
||||
|
||||
63
vendor/github.com/donovanhide/eventsource/stream.go
generated
vendored
63
vendor/github.com/donovanhide/eventsource/stream.go
generated
vendored
@@ -7,6 +7,7 @@ import (
|
||||
"io/ioutil"
|
||||
"log"
|
||||
"net/http"
|
||||
"sync"
|
||||
"time"
|
||||
)
|
||||
|
||||
@@ -27,6 +28,10 @@ type Stream struct {
|
||||
Errors chan error
|
||||
// Logger is a logger that, when set, will be used for logging debug messages
|
||||
Logger *log.Logger
|
||||
// isClosed is a marker that the stream is/should be closed
|
||||
isClosed bool
|
||||
// isClosedMutex is a mutex protecting concurrent read/write access of isClosed
|
||||
isClosedMutex sync.RWMutex
|
||||
}
|
||||
|
||||
type SubscriptionError struct {
|
||||
@@ -61,7 +66,7 @@ func SubscribeWith(lastEventId string, client *http.Client, request *http.Reques
|
||||
c: client,
|
||||
req: request,
|
||||
lastEventId: lastEventId,
|
||||
retry: (time.Millisecond * 3000),
|
||||
retry: time.Millisecond * 3000,
|
||||
Events: make(chan Event),
|
||||
Errors: make(chan error),
|
||||
}
|
||||
@@ -75,6 +80,29 @@ func SubscribeWith(lastEventId string, client *http.Client, request *http.Reques
|
||||
return stream, nil
|
||||
}
|
||||
|
||||
// Close will close the stream. It is safe for concurrent access and can be called multiple times.
|
||||
func (stream *Stream) Close() {
|
||||
if stream.isStreamClosed() {
|
||||
return
|
||||
}
|
||||
|
||||
stream.markStreamClosed()
|
||||
close(stream.Errors)
|
||||
close(stream.Events)
|
||||
}
|
||||
|
||||
func (stream *Stream) isStreamClosed() bool {
|
||||
stream.isClosedMutex.RLock()
|
||||
defer stream.isClosedMutex.RUnlock()
|
||||
return stream.isClosed
|
||||
}
|
||||
|
||||
func (stream *Stream) markStreamClosed() {
|
||||
stream.isClosedMutex.Lock()
|
||||
defer stream.isClosedMutex.Unlock()
|
||||
stream.isClosed = true
|
||||
}
|
||||
|
||||
// Go's http package doesn't copy headers across when it encounters
|
||||
// redirects so we need to do that manually.
|
||||
func checkRedirect(req *http.Request, via []*http.Request) error {
|
||||
@@ -112,15 +140,27 @@ func (stream *Stream) connect() (r io.ReadCloser, err error) {
|
||||
|
||||
func (stream *Stream) stream(r io.ReadCloser) {
|
||||
defer r.Close()
|
||||
|
||||
// receives events until an error is encountered
|
||||
stream.receiveEvents(r)
|
||||
|
||||
// tries to reconnect and start the stream again
|
||||
stream.retryRestartStream()
|
||||
}
|
||||
|
||||
func (stream *Stream) receiveEvents(r io.ReadCloser) {
|
||||
dec := NewDecoder(r)
|
||||
|
||||
for {
|
||||
ev, err := dec.Decode()
|
||||
|
||||
if stream.isStreamClosed() {
|
||||
return
|
||||
}
|
||||
if err != nil {
|
||||
stream.Errors <- err
|
||||
// respond to all errors by reconnecting and trying again
|
||||
break
|
||||
return
|
||||
}
|
||||
|
||||
pub := ev.(*publication)
|
||||
if pub.Retry() > 0 {
|
||||
stream.retry = time.Duration(pub.Retry()) * time.Millisecond
|
||||
@@ -130,20 +170,25 @@ func (stream *Stream) stream(r io.ReadCloser) {
|
||||
}
|
||||
stream.Events <- ev
|
||||
}
|
||||
}
|
||||
|
||||
func (stream *Stream) retryRestartStream() {
|
||||
backoff := stream.retry
|
||||
for {
|
||||
time.Sleep(backoff)
|
||||
if stream.Logger != nil {
|
||||
stream.Logger.Printf("Reconnecting in %0.4f secs\n", backoff.Seconds())
|
||||
}
|
||||
|
||||
time.Sleep(backoff)
|
||||
if stream.isStreamClosed() {
|
||||
return
|
||||
}
|
||||
// NOTE: because of the defer we're opening the new connection
|
||||
// before closing the old one. Shouldn't be a problem in practice,
|
||||
// but something to be aware of.
|
||||
next, err := stream.connect()
|
||||
r, err := stream.connect()
|
||||
if err == nil {
|
||||
go stream.stream(next)
|
||||
break
|
||||
go stream.stream(r)
|
||||
return
|
||||
}
|
||||
stream.Errors <- err
|
||||
backoff *= 2
|
||||
|
||||
15
vendor/github.com/gambol99/go-marathon/client.go
generated
vendored
15
vendor/github.com/gambol99/go-marathon/client.go
generated
vendored
@@ -150,8 +150,6 @@ type Marathon interface {
|
||||
}
|
||||
|
||||
var (
|
||||
// ErrInvalidResponse is thrown when marathon responds with invalid or error response
|
||||
ErrInvalidResponse = errors.New("invalid response from Marathon")
|
||||
// ErrMarathonDown is thrown when all the marathon endpoints are down
|
||||
ErrMarathonDown = errors.New("all the Marathon hosts are presently down")
|
||||
// ErrTimeoutError is thrown when the operation has timed out
|
||||
@@ -190,6 +188,11 @@ type httpClient struct {
|
||||
config Config
|
||||
}
|
||||
|
||||
// newRequestError signals that creating a new http.Request failed
|
||||
type newRequestError struct {
|
||||
error
|
||||
}
|
||||
|
||||
// NewClient creates a new marathon client
|
||||
// config: the configuration to use
|
||||
func NewClient(config Config) (Marathon, error) {
|
||||
@@ -298,8 +301,7 @@ func (r *marathonClient) apiCall(method, path string, body, result interface{})
|
||||
if response.StatusCode >= 200 && response.StatusCode <= 299 {
|
||||
if result != nil {
|
||||
if err := json.Unmarshal(respBody, result); err != nil {
|
||||
r.debugLog.Printf("apiCall(): failed to unmarshall the response from marathon, error: %s\n", err)
|
||||
return ErrInvalidResponse
|
||||
return fmt.Errorf("failed to unmarshal response from Marathon: %s", err)
|
||||
}
|
||||
}
|
||||
return nil
|
||||
@@ -317,7 +319,8 @@ func (r *marathonClient) apiCall(method, path string, body, result interface{})
|
||||
}
|
||||
}
|
||||
|
||||
// buildAPIRequest creates a default API request
|
||||
// buildAPIRequest creates a default API request.
|
||||
// It fails when there is no available member in the cluster anymore or when the request can not be built.
|
||||
func (r *marathonClient) buildAPIRequest(method, path string, reader io.Reader) (request *http.Request, member string, err error) {
|
||||
// Grab a member from the cluster
|
||||
member, err = r.hosts.getMember()
|
||||
@@ -328,7 +331,7 @@ func (r *marathonClient) buildAPIRequest(method, path string, reader io.Reader)
|
||||
// Build the HTTP request to Marathon
|
||||
request, err = r.client.buildMarathonRequest(method, member, path, reader)
|
||||
if err != nil {
|
||||
return nil, member, err
|
||||
return nil, member, newRequestError{err}
|
||||
}
|
||||
return request, member, nil
|
||||
}
|
||||
|
||||
8
vendor/github.com/gambol99/go-marathon/group.go
generated
vendored
8
vendor/github.com/gambol99/go-marathon/group.go
generated
vendored
@@ -209,7 +209,9 @@ func (r *marathonClient) WaitOnGroup(name string, timeout time.Duration) error {
|
||||
func (r *marathonClient) DeleteGroup(name string, force bool) (*DeploymentID, error) {
|
||||
version := new(DeploymentID)
|
||||
path := fmt.Sprintf("%s/%s", marathonAPIGroups, trimRootPath(name))
|
||||
path = buildPathWithForceParam(path, force)
|
||||
if force {
|
||||
path += "?force=true"
|
||||
}
|
||||
if err := r.apiDelete(path, nil, version); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@@ -224,7 +226,9 @@ func (r *marathonClient) DeleteGroup(name string, force bool) (*DeploymentID, er
|
||||
func (r *marathonClient) UpdateGroup(name string, group *Group, force bool) (*DeploymentID, error) {
|
||||
deploymentID := new(DeploymentID)
|
||||
path := fmt.Sprintf("%s/%s", marathonAPIGroups, trimRootPath(name))
|
||||
path = buildPathWithForceParam(path, force)
|
||||
if force {
|
||||
path += "?force=true"
|
||||
}
|
||||
if err := r.apiPut(path, group, deploymentID); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
2
vendor/github.com/gambol99/go-marathon/health.go
generated
vendored
2
vendor/github.com/gambol99/go-marathon/health.go
generated
vendored
@@ -27,7 +27,7 @@ type HealthCheck struct {
|
||||
GracePeriodSeconds int `json:"gracePeriodSeconds,omitempty"`
|
||||
IntervalSeconds int `json:"intervalSeconds,omitempty"`
|
||||
TimeoutSeconds int `json:"timeoutSeconds,omitempty"`
|
||||
IgnoreHTTP1xx *bool `json:"ignoreHttp1xx,ommitempty"`
|
||||
IgnoreHTTP1xx *bool `json:"ignoreHttp1xx,omitempty"`
|
||||
}
|
||||
|
||||
// SetCommand sets the given command on the health check.
|
||||
|
||||
92
vendor/github.com/gambol99/go-marathon/subscription.go
generated
vendored
92
vendor/github.com/gambol99/go-marathon/subscription.go
generated
vendored
@@ -103,7 +103,8 @@ func (r *marathonClient) registerSubscription() error {
|
||||
case EventsTransportCallback:
|
||||
return r.registerCallbackSubscription()
|
||||
case EventsTransportSSE:
|
||||
return r.registerSSESubscription()
|
||||
r.registerSSESubscription()
|
||||
return nil
|
||||
default:
|
||||
return fmt.Errorf("the events transport: %d is not supported", r.config.EventsTransport)
|
||||
}
|
||||
@@ -162,40 +163,81 @@ func (r *marathonClient) registerCallbackSubscription() error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (r *marathonClient) registerSSESubscription() error {
|
||||
// Prevent multiple SSE subscriptions
|
||||
// registerSSESubscription starts a go routine that continously tries to
|
||||
// connect to the SSE stream and to process the received events. To establish
|
||||
// the connection it tries the active cluster members until no more member is
|
||||
// active. When this happens it will retry to get a connection every 5 seconds.
|
||||
func (r *marathonClient) registerSSESubscription() {
|
||||
if r.subscribedToSSE {
|
||||
return nil
|
||||
}
|
||||
|
||||
request, _, err := r.buildAPIRequest("GET", marathonAPIEventStream, nil)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Try to connect to stream, reusing the http client settings
|
||||
stream, err := eventsource.SubscribeWith("", r.config.HTTPClient, request)
|
||||
if err != nil {
|
||||
return err
|
||||
return
|
||||
}
|
||||
|
||||
go func() {
|
||||
for {
|
||||
select {
|
||||
case ev := <-stream.Events:
|
||||
if err := r.handleEvent(ev.Data()); err != nil {
|
||||
// TODO let the user handle this error instead of logging it here
|
||||
r.debugLog.Printf("registerSSESubscription(): failed to handle event: %v\n", err)
|
||||
}
|
||||
case err := <-stream.Errors:
|
||||
// TODO let the user handle this error instead of logging it here
|
||||
r.debugLog.Printf("registerSSESubscription(): failed to receive event: %v\n", err)
|
||||
stream, err := r.connectToSSE()
|
||||
if err != nil {
|
||||
r.debugLog.Printf("Error connecting SSE subscription: %s", err)
|
||||
<-time.After(5 * time.Second)
|
||||
continue
|
||||
}
|
||||
|
||||
err = r.listenToSSE(stream)
|
||||
stream.Close()
|
||||
r.debugLog.Printf("Error on SSE subscription: %s", err)
|
||||
}
|
||||
}()
|
||||
|
||||
r.subscribedToSSE = true
|
||||
return nil
|
||||
}
|
||||
|
||||
// connectToSSE tries to establish an *eventsource.Stream to any of the Marathon cluster members, marking the
|
||||
// member as down on connection failure, until there is no more active member in the cluster.
|
||||
// Given the http request can not be built, it will panic as this case should never happen.
|
||||
func (r *marathonClient) connectToSSE() (*eventsource.Stream, error) {
|
||||
for {
|
||||
request, member, err := r.buildAPIRequest("GET", marathonAPIEventStream, nil)
|
||||
if err != nil {
|
||||
switch err.(type) {
|
||||
case newRequestError:
|
||||
panic(fmt.Sprintf("Requests for SSE subscriptions should never fail to be created: %s", err.Error()))
|
||||
default:
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
|
||||
// The event source library manipulates the HTTPClient. So we create a new one and copy
|
||||
// its underlying fields for performance reasons. See note that at least the Transport
|
||||
// should be reused here: https://golang.org/pkg/net/http/#Client
|
||||
httpClient := &http.Client{
|
||||
Transport: r.config.HTTPClient.Transport,
|
||||
CheckRedirect: r.config.HTTPClient.CheckRedirect,
|
||||
Jar: r.config.HTTPClient.Jar,
|
||||
Timeout: r.config.HTTPClient.Timeout,
|
||||
}
|
||||
|
||||
stream, err := eventsource.SubscribeWith("", httpClient, request)
|
||||
if err != nil {
|
||||
r.debugLog.Printf("Error subscribing to Marathon event stream: %s", err)
|
||||
r.hosts.markDown(member)
|
||||
continue
|
||||
}
|
||||
|
||||
return stream, nil
|
||||
}
|
||||
}
|
||||
|
||||
func (r *marathonClient) listenToSSE(stream *eventsource.Stream) error {
|
||||
for {
|
||||
select {
|
||||
case ev := <-stream.Events:
|
||||
if err := r.handleEvent(ev.Data()); err != nil {
|
||||
r.debugLog.Printf("listenToSSE(): failed to handle event: %v", err)
|
||||
}
|
||||
case err := <-stream.Errors:
|
||||
return err
|
||||
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Subscribe adds a URL to Marathon's callback facility
|
||||
|
||||
42
vendor/github.com/gambol99/go-marathon/unreachable_strategy.go
generated
vendored
42
vendor/github.com/gambol99/go-marathon/unreachable_strategy.go
generated
vendored
@@ -16,12 +16,54 @@ limitations under the License.
|
||||
|
||||
package marathon
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
)
|
||||
|
||||
const UnreachableStrategyAbsenceReasonDisabled = "disabled"
|
||||
|
||||
// UnreachableStrategy is the unreachable strategy applied to an application.
|
||||
type UnreachableStrategy struct {
|
||||
EnabledUnreachableStrategy
|
||||
AbsenceReason string
|
||||
}
|
||||
|
||||
// EnabledUnreachableStrategy covers parameters pertaining to present unreachable strategies.
|
||||
type EnabledUnreachableStrategy struct {
|
||||
InactiveAfterSeconds *float64 `json:"inactiveAfterSeconds,omitempty"`
|
||||
ExpungeAfterSeconds *float64 `json:"expungeAfterSeconds,omitempty"`
|
||||
}
|
||||
|
||||
type unreachableStrategy UnreachableStrategy
|
||||
|
||||
// UnmarshalJSON unmarshals the given JSON into an UnreachableStrategy. It
|
||||
// populates parameters for present strategies, and otherwise only sets the
|
||||
// absence reason.
|
||||
func (us *UnreachableStrategy) UnmarshalJSON(b []byte) error {
|
||||
var u unreachableStrategy
|
||||
var errEnabledUS, errNonEnabledUS error
|
||||
if errEnabledUS = json.Unmarshal(b, &u); errEnabledUS == nil {
|
||||
*us = UnreachableStrategy(u)
|
||||
return nil
|
||||
}
|
||||
|
||||
if errNonEnabledUS = json.Unmarshal(b, &us.AbsenceReason); errNonEnabledUS == nil {
|
||||
return nil
|
||||
}
|
||||
|
||||
return fmt.Errorf("failed to unmarshal unreachable strategy: unmarshaling into enabled returned error '%s'; unmarshaling into non-enabled returned error '%s'", errEnabledUS, errNonEnabledUS)
|
||||
}
|
||||
|
||||
// MarshalJSON marshals the unreachable strategy.
|
||||
func (us *UnreachableStrategy) MarshalJSON() ([]byte, error) {
|
||||
if us.AbsenceReason == "" {
|
||||
return json.Marshal(us.EnabledUnreachableStrategy)
|
||||
}
|
||||
|
||||
return json.Marshal(us.AbsenceReason)
|
||||
}
|
||||
|
||||
// SetInactiveAfterSeconds sets the period after which instance will be marked as inactive.
|
||||
func (us UnreachableStrategy) SetInactiveAfterSeconds(cap float64) UnreachableStrategy {
|
||||
us.InactiveAfterSeconds = &cap
|
||||
|
||||
121
vendor/github.com/vulcand/oxy/forward/fwd.go
generated
vendored
121
vendor/github.com/vulcand/oxy/forward/fwd.go
generated
vendored
@@ -6,15 +6,14 @@ package forward
|
||||
import (
|
||||
"crypto/tls"
|
||||
"io"
|
||||
"net"
|
||||
"net/http"
|
||||
"net/url"
|
||||
"os"
|
||||
"reflect"
|
||||
"strconv"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/gorilla/websocket"
|
||||
"github.com/vulcand/oxy/utils"
|
||||
)
|
||||
|
||||
@@ -158,7 +157,9 @@ func (f *Forwarder) ServeHTTP(w http.ResponseWriter, req *http.Request) {
|
||||
// serveHTTP forwards HTTP traffic using the configured transport
|
||||
func (f *httpForwarder) serveHTTP(w http.ResponseWriter, req *http.Request, ctx *handlerContext) {
|
||||
start := time.Now().UTC()
|
||||
|
||||
response, err := f.roundTripper.RoundTrip(f.copyRequest(req, req.URL))
|
||||
|
||||
if err != nil {
|
||||
ctx.log.Errorf("Error forwarding to %v, err: %v", req.URL, err)
|
||||
ctx.errHandler.ServeHTTP(w, req, err)
|
||||
@@ -168,6 +169,16 @@ func (f *httpForwarder) serveHTTP(w http.ResponseWriter, req *http.Request, ctx
|
||||
utils.CopyHeaders(w.Header(), response.Header)
|
||||
// Remove hop-by-hop headers.
|
||||
utils.RemoveHeaders(w.Header(), HopHeaders...)
|
||||
|
||||
announcedTrailerKeyCount := len(response.Trailer)
|
||||
if announcedTrailerKeyCount > 0 {
|
||||
trailerKeys := make([]string, 0, announcedTrailerKeyCount)
|
||||
for k := range response.Trailer {
|
||||
trailerKeys = append(trailerKeys, k)
|
||||
}
|
||||
w.Header().Add("Trailer", strings.Join(trailerKeys, ", "))
|
||||
}
|
||||
|
||||
w.WriteHeader(response.StatusCode)
|
||||
|
||||
stream := f.streamResponse
|
||||
@@ -178,6 +189,20 @@ func (f *httpForwarder) serveHTTP(w http.ResponseWriter, req *http.Request, ctx
|
||||
}
|
||||
}
|
||||
written, err := io.Copy(newResponseFlusher(w, stream), response.Body)
|
||||
if err != nil {
|
||||
ctx.log.Errorf("Error copying upstream response body: %v", err)
|
||||
ctx.errHandler.ServeHTTP(w, req, err)
|
||||
return
|
||||
}
|
||||
|
||||
defer response.Body.Close()
|
||||
|
||||
forceSetTrailers := len(response.Trailer) != announcedTrailerKeyCount
|
||||
shallowCopyTrailers(w.Header(), response.Trailer, forceSetTrailers)
|
||||
|
||||
if written != 0 {
|
||||
w.Header().Set(ContentLength, strconv.FormatInt(written, 10))
|
||||
}
|
||||
|
||||
if req.TLS != nil {
|
||||
ctx.log.Infof("Round trip: %v, code: %v, duration: %v tls:version: %x, tls:resume:%t, tls:csuite:%x, tls:server:%v",
|
||||
@@ -191,17 +216,6 @@ func (f *httpForwarder) serveHTTP(w http.ResponseWriter, req *http.Request, ctx
|
||||
req.URL, response.StatusCode, time.Now().UTC().Sub(start))
|
||||
}
|
||||
|
||||
defer response.Body.Close()
|
||||
|
||||
if err != nil {
|
||||
ctx.log.Errorf("Error copying upstream response Body: %v", err)
|
||||
ctx.errHandler.ServeHTTP(w, req, err)
|
||||
return
|
||||
}
|
||||
|
||||
if written != 0 {
|
||||
w.Header().Set(ContentLength, strconv.FormatInt(written, 10))
|
||||
}
|
||||
}
|
||||
|
||||
// copyRequest makes a copy of the specified request to be sent using the configured
|
||||
@@ -239,65 +253,47 @@ func (f *httpForwarder) copyRequest(req *http.Request, u *url.URL) *http.Request
|
||||
// serveHTTP forwards websocket traffic
|
||||
func (f *websocketForwarder) serveHTTP(w http.ResponseWriter, req *http.Request, ctx *handlerContext) {
|
||||
outReq := f.copyRequest(req, req.URL)
|
||||
host := outReq.URL.Host
|
||||
dial := net.Dial
|
||||
|
||||
// if host does not specify a port, use the default http port
|
||||
if !strings.Contains(host, ":") {
|
||||
if outReq.URL.Scheme == "wss" {
|
||||
host = host + ":443"
|
||||
} else {
|
||||
host = host + ":80"
|
||||
}
|
||||
dialer := websocket.DefaultDialer
|
||||
if outReq.URL.Scheme == "wss" && f.TLSClientConfig != nil {
|
||||
dialer.TLSClientConfig = f.TLSClientConfig
|
||||
}
|
||||
|
||||
if outReq.URL.Scheme == "wss" {
|
||||
if f.TLSClientConfig == nil {
|
||||
f.TLSClientConfig = http.DefaultTransport.(*http.Transport).TLSClientConfig
|
||||
}
|
||||
dial = func(network, address string) (net.Conn, error) {
|
||||
return tls.Dial("tcp", host, f.TLSClientConfig)
|
||||
}
|
||||
}
|
||||
|
||||
targetConn, err := dial("tcp", host)
|
||||
targetConn, resp, err := dialer.Dial(outReq.URL.String(), outReq.Header)
|
||||
if err != nil {
|
||||
ctx.log.Errorf("Error dialing `%v`: %v", host, err)
|
||||
ctx.log.Errorf("Error dialing `%v`: %v", outReq.Host, err)
|
||||
ctx.errHandler.ServeHTTP(w, req, err)
|
||||
return
|
||||
}
|
||||
hijacker, ok := w.(http.Hijacker)
|
||||
if !ok {
|
||||
ctx.log.Errorf("Unable to hijack the connection: %v", reflect.TypeOf(w))
|
||||
ctx.errHandler.ServeHTTP(w, req, nil)
|
||||
return
|
||||
}
|
||||
underlyingConn, _, err := hijacker.Hijack()
|
||||
upgrader := websocket.Upgrader{}
|
||||
utils.RemoveHeaders(resp.Header, WebsocketUpgradeHeaders...)
|
||||
underlyingConn, err := upgrader.Upgrade(w, req, resp.Header)
|
||||
if err != nil {
|
||||
ctx.log.Errorf("Unable to hijack the connection: %v %v", reflect.TypeOf(w), err)
|
||||
ctx.errHandler.ServeHTTP(w, req, err)
|
||||
ctx.log.Errorf("Error while upgrading connection : %v", err)
|
||||
return
|
||||
}
|
||||
// it is now caller's responsibility to Close the underlying connection
|
||||
defer underlyingConn.Close()
|
||||
defer targetConn.Close()
|
||||
|
||||
ctx.log.Infof("Writing outgoing Websocket request to target connection: %+v", outReq)
|
||||
|
||||
// write the modified incoming request to the dialed connection
|
||||
if err = outReq.Write(targetConn); err != nil {
|
||||
ctx.log.Errorf("Unable to copy request to target: %v", err)
|
||||
ctx.errHandler.ServeHTTP(w, req, err)
|
||||
return
|
||||
}
|
||||
errc := make(chan error, 2)
|
||||
replicate := func(dst io.Writer, src io.Reader) {
|
||||
_, err := io.Copy(dst, src)
|
||||
errc <- err
|
||||
}
|
||||
go replicate(targetConn, underlyingConn)
|
||||
go replicate(underlyingConn, targetConn)
|
||||
|
||||
go replicate(targetConn.UnderlyingConn(), underlyingConn.UnderlyingConn())
|
||||
|
||||
// Try to read the first message
|
||||
t, msg, err := targetConn.ReadMessage()
|
||||
if err != nil {
|
||||
ctx.log.Errorf("Couldn't read first message : %v", err)
|
||||
} else {
|
||||
underlyingConn.WriteMessage(t, msg)
|
||||
}
|
||||
|
||||
go replicate(underlyingConn.UnderlyingConn(), targetConn.UnderlyingConn())
|
||||
<-errc
|
||||
|
||||
}
|
||||
|
||||
// copyRequest makes a copy of the specified request.
|
||||
@@ -307,6 +303,7 @@ func (f *websocketForwarder) copyRequest(req *http.Request, u *url.URL) (outReq
|
||||
|
||||
outReq.URL = utils.CopyURL(req.URL)
|
||||
outReq.URL.Scheme = u.Scheme
|
||||
outReq.URL.Path = outReq.RequestURI
|
||||
|
||||
//sometimes backends might be registered as HTTP/HTTPS servers so translate URLs to websocket URLs.
|
||||
switch u.Scheme {
|
||||
@@ -317,19 +314,12 @@ func (f *websocketForwarder) copyRequest(req *http.Request, u *url.URL) (outReq
|
||||
}
|
||||
|
||||
outReq.URL.Host = u.Host
|
||||
outReq.URL.Opaque = req.RequestURI
|
||||
// raw query is already included in RequestURI, so ignore it to avoid dupes
|
||||
outReq.URL.RawQuery = ""
|
||||
|
||||
outReq.Proto = "HTTP/1.1"
|
||||
outReq.ProtoMajor = 1
|
||||
outReq.ProtoMinor = 1
|
||||
|
||||
// Overwrite close flag so we can keep persistent connection for the backend servers
|
||||
outReq.Close = false
|
||||
|
||||
outReq.Header = make(http.Header)
|
||||
utils.CopyHeaders(outReq.Header, req.Header)
|
||||
utils.RemoveHeaders(outReq.Header, WebsocketDialHeaders...)
|
||||
|
||||
if f.rewriter != nil {
|
||||
f.rewriter.Rewrite(outReq)
|
||||
@@ -351,3 +341,12 @@ func isWebsocketRequest(req *http.Request) bool {
|
||||
}
|
||||
return containsHeader(Connection, "upgrade") && containsHeader(Upgrade, "websocket")
|
||||
}
|
||||
|
||||
func shallowCopyTrailers(dstHeader, srcTrailer http.Header, forceSetTrailers bool) {
|
||||
for k, vv := range srcTrailer {
|
||||
if forceSetTrailers {
|
||||
k = http.TrailerPrefix + k
|
||||
}
|
||||
dstHeader[k] = vv
|
||||
}
|
||||
}
|
||||
|
||||
49
vendor/github.com/vulcand/oxy/forward/headers.go
generated
vendored
49
vendor/github.com/vulcand/oxy/forward/headers.go
generated
vendored
@@ -1,20 +1,25 @@
|
||||
package forward
|
||||
|
||||
const (
|
||||
XForwardedProto = "X-Forwarded-Proto"
|
||||
XForwardedFor = "X-Forwarded-For"
|
||||
XForwardedHost = "X-Forwarded-Host"
|
||||
XForwardedServer = "X-Forwarded-Server"
|
||||
Connection = "Connection"
|
||||
KeepAlive = "Keep-Alive"
|
||||
ProxyAuthenticate = "Proxy-Authenticate"
|
||||
ProxyAuthorization = "Proxy-Authorization"
|
||||
Te = "Te" // canonicalized version of "TE"
|
||||
Trailers = "Trailers"
|
||||
TransferEncoding = "Transfer-Encoding"
|
||||
Upgrade = "Upgrade"
|
||||
ContentLength = "Content-Length"
|
||||
ContentType = "Content-Type"
|
||||
XForwardedProto = "X-Forwarded-Proto"
|
||||
XForwardedFor = "X-Forwarded-For"
|
||||
XForwardedHost = "X-Forwarded-Host"
|
||||
XForwardedServer = "X-Forwarded-Server"
|
||||
Connection = "Connection"
|
||||
KeepAlive = "Keep-Alive"
|
||||
ProxyAuthenticate = "Proxy-Authenticate"
|
||||
ProxyAuthorization = "Proxy-Authorization"
|
||||
Te = "Te" // canonicalized version of "TE"
|
||||
Trailers = "Trailers"
|
||||
TransferEncoding = "Transfer-Encoding"
|
||||
Upgrade = "Upgrade"
|
||||
ContentLength = "Content-Length"
|
||||
ContentType = "Content-Type"
|
||||
SecWebsocketKey = "Sec-Websocket-Key"
|
||||
SecWebsocketVersion = "Sec-Websocket-Version"
|
||||
SecWebsocketExtensions = "Sec-Websocket-Extensions"
|
||||
SecWebsocketProtocol = "Sec-Websocket-Protocol"
|
||||
SecWebsocketAccept = "Sec-Websocket-Accept"
|
||||
)
|
||||
|
||||
// Hop-by-hop headers. These are removed when sent to the backend.
|
||||
@@ -30,3 +35,19 @@ var HopHeaders = []string{
|
||||
TransferEncoding,
|
||||
Upgrade,
|
||||
}
|
||||
|
||||
var WebsocketDialHeaders = []string{
|
||||
Upgrade,
|
||||
Connection,
|
||||
SecWebsocketKey,
|
||||
SecWebsocketVersion,
|
||||
SecWebsocketExtensions,
|
||||
SecWebsocketProtocol,
|
||||
SecWebsocketAccept,
|
||||
}
|
||||
|
||||
var WebsocketUpgradeHeaders = []string{
|
||||
Upgrade,
|
||||
Connection,
|
||||
SecWebsocketAccept,
|
||||
}
|
||||
|
||||
Reference in New Issue
Block a user