forked from SW/traefik
Compare commits
37 Commits
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
422109b82f | ||
|
|
c864a7297b | ||
|
|
8da038041d | ||
|
|
dd954f3c0a | ||
|
|
db483e9d34 | ||
|
|
700b7a1b51 | ||
|
|
ed65d00574 | ||
|
|
f460c1990e | ||
|
|
83381e99cf | ||
|
|
31550fd2c9 | ||
|
|
ba046b4d3a | ||
|
|
d675d46930 | ||
|
|
7ea76929d4 | ||
|
|
f98c537ec2 | ||
|
|
083bde64ee | ||
|
|
45fe218ee2 | ||
|
|
d54777236c | ||
|
|
4f3b06472b | ||
|
|
52bad03c8d | ||
|
|
2fde3e8679 | ||
|
|
1e71f52b72 | ||
|
|
2b1d2853cd | ||
|
|
f07e8f58e6 | ||
|
|
7b19cb5631 | ||
|
|
dbd173b4e4 | ||
|
|
85cfd87c44 | ||
|
|
c867f48f11 | ||
|
|
514f9a7215 | ||
|
|
0b0380b690 | ||
|
|
4d0c8c189a | ||
|
|
afe4c307f9 | ||
|
|
ce3a0fdd46 | ||
|
|
203a5c5c48 | ||
|
|
be4aeaacde | ||
|
|
26dc2f4d61 | ||
|
|
6aac78fc36 | ||
|
|
f6c53f0450 |
2
.github/ISSUE_TEMPLATE.md
vendored
2
.github/ISSUE_TEMPLATE.md
vendored
@@ -22,7 +22,7 @@ If you intend to ask a support question: DO NOT FILE AN ISSUE.
|
||||
|
||||
HOW TO WRITE A GOOD ISSUE?
|
||||
|
||||
- Respect the issue template as more as possible.
|
||||
- Respect the issue template as much as possible.
|
||||
- If it's possible use the command `traefik bug`. See https://www.youtube.com/watch?v=Lyz62L8m93I.
|
||||
- The title must be short and descriptive.
|
||||
- Explain the conditions which led you to write this issue: the context.
|
||||
|
||||
68
.github/ISSUE_TEMPLATE/bugs.md
vendored
Normal file
68
.github/ISSUE_TEMPLATE/bugs.md
vendored
Normal file
@@ -0,0 +1,68 @@
|
||||
<!--
|
||||
DO NOT FILE ISSUES FOR GENERAL SUPPORT QUESTIONS.
|
||||
|
||||
The issue tracker is for reporting bugs and feature requests only.
|
||||
For end-user related support questions, refer to one of the following:
|
||||
|
||||
- Stack Overflow (using the "traefik" tag): https://stackoverflow.com/questions/tagged/traefik
|
||||
- the Traefik community Slack channel: https://traefik.herokuapp.com
|
||||
|
||||
-->
|
||||
|
||||
|
||||
### Do you want to request a *feature* or report a *bug*?
|
||||
|
||||
Bug
|
||||
|
||||
### What did you do?
|
||||
|
||||
<!--
|
||||
|
||||
HOW TO WRITE A GOOD ISSUE?
|
||||
|
||||
- Respect the issue template as much as possible.
|
||||
- If it's possible use the command `traefik bug`. See https://www.youtube.com/watch?v=Lyz62L8m93I.
|
||||
- The title must be short and descriptive.
|
||||
- Explain the conditions which led you to write this issue: the context.
|
||||
- The context should lead to something, an idea or a problem that you’re facing.
|
||||
- Remain clear and concise.
|
||||
- Format your messages to help the reader focus on what matters and understand the structure of your message, use Markdown syntax https://help.github.com/articles/github-flavored-markdown
|
||||
|
||||
-->
|
||||
|
||||
### What did you expect to see?
|
||||
|
||||
|
||||
|
||||
### What did you see instead?
|
||||
|
||||
|
||||
|
||||
### Output of `traefik version`: (_What version of Traefik are you using?_)
|
||||
|
||||
<!--
|
||||
For the Traefik Docker image:
|
||||
docker run [IMAGE] version
|
||||
ex: docker run traefik version
|
||||
-->
|
||||
|
||||
```
|
||||
(paste your output here)
|
||||
```
|
||||
|
||||
### What is your environment & configuration (arguments, toml, provider, platform, ...)?
|
||||
|
||||
```toml
|
||||
# (paste your configuration here)
|
||||
```
|
||||
|
||||
<!--
|
||||
Add more configuration information here.
|
||||
-->
|
||||
|
||||
|
||||
### If applicable, please paste the log output in debug mode (`--debug` switch)
|
||||
|
||||
```
|
||||
(paste your output here)
|
||||
```
|
||||
32
.github/ISSUE_TEMPLATE/features.md
vendored
Normal file
32
.github/ISSUE_TEMPLATE/features.md
vendored
Normal file
@@ -0,0 +1,32 @@
|
||||
<!--
|
||||
DO NOT FILE ISSUES FOR GENERAL SUPPORT QUESTIONS.
|
||||
|
||||
The issue tracker is for reporting bugs and feature requests only.
|
||||
For end-user related support questions, refer to one of the following:
|
||||
|
||||
- Stack Overflow (using the "traefik" tag): https://stackoverflow.com/questions/tagged/traefik
|
||||
- the Traefik community Slack channel: https://traefik.herokuapp.com
|
||||
|
||||
-->
|
||||
|
||||
|
||||
### Do you want to request a *feature* or report a *bug*?
|
||||
|
||||
Feature
|
||||
|
||||
### What did you expect to see?
|
||||
|
||||
<!--
|
||||
|
||||
HOW TO WRITE A GOOD ISSUE?
|
||||
|
||||
- Respect the issue template as much as possible.
|
||||
- If it's possible use the command `traefik bug`. See https://www.youtube.com/watch?v=Lyz62L8m93I.
|
||||
- The title must be short and descriptive.
|
||||
- Explain the conditions which led you to write this issue: the context.
|
||||
- The context should lead to something, an idea or a problem that you’re facing.
|
||||
- Remain clear and concise.
|
||||
- Format your messages to help the reader focus on what matters and understand the structure of your message, use Markdown syntax https://help.github.com/articles/github-flavored-markdown
|
||||
|
||||
-->
|
||||
|
||||
7
.github/PULL_REQUEST_TEMPLATE/mergeback.md
vendored
Normal file
7
.github/PULL_REQUEST_TEMPLATE/mergeback.md
vendored
Normal file
@@ -0,0 +1,7 @@
|
||||
### What does this PR do?
|
||||
|
||||
Merge v{{.Version}} into master
|
||||
|
||||
### Motivation
|
||||
|
||||
Be sync.
|
||||
7
.github/PULL_REQUEST_TEMPLATE/release.md
vendored
Normal file
7
.github/PULL_REQUEST_TEMPLATE/release.md
vendored
Normal file
@@ -0,0 +1,7 @@
|
||||
### What does this PR do?
|
||||
|
||||
Prepare release v{{.Version}}.
|
||||
|
||||
### Motivation
|
||||
|
||||
Create a new release.
|
||||
@@ -24,9 +24,9 @@ before_deploy:
|
||||
sudo -E apt-get -yq update;
|
||||
sudo -E apt-get -yq --no-install-suggests --no-install-recommends --force-yes install docker-ce=${DOCKER_VERSION}*;
|
||||
docker version;
|
||||
make image;
|
||||
if [ "$TRAVIS_TAG" ]; then
|
||||
make -j${N_MAKE_JOBS} crossbinary-parallel;
|
||||
make image-dirty;
|
||||
tar cfz dist/traefik-${VERSION}.src.tar.gz --exclude-vcs --exclude dist .;
|
||||
fi;
|
||||
curl -sI https://github.com/containous/structor/releases/latest | grep -Fi Location | tr -d '\r' | sed "s/tag/download/g" | awk -F " " '{ print $2 "/structor_linux-amd64"}' | wget --output-document=$GOPATH/bin/structor -i -;
|
||||
|
||||
43
CHANGELOG.md
43
CHANGELOG.md
@@ -1,5 +1,48 @@
|
||||
# Change Log
|
||||
|
||||
## [v1.5.3](https://github.com/containous/traefik/tree/v1.5.3) (2018-02-27)
|
||||
[All Commits](https://github.com/containous/traefik/compare/v1.5.2...v1.5.3)
|
||||
|
||||
**Bug fixes:**
|
||||
- **[acme]** Check all the C/N and SANs of provided certificates before generating ACME certificates ([#2913](https://github.com/containous/traefik/pull/2913) by [nmengin](https://github.com/nmengin))
|
||||
- **[docker/swarm]** Empty IP address when use endpoint mode dnsrr ([#2887](https://github.com/containous/traefik/pull/2887) by [mmatur](https://github.com/mmatur))
|
||||
- **[middleware]** Infinite entry point redirection. ([#2929](https://github.com/containous/traefik/pull/2929) by [ldez](https://github.com/ldez))
|
||||
- **[provider]** Isolate backend with same name on different provider ([#2862](https://github.com/containous/traefik/pull/2862) by [Juliens](https://github.com/Juliens))
|
||||
- **[tls]** Starting Træfik even if TLS certificates are in error ([#2909](https://github.com/containous/traefik/pull/2909) by [nmengin](https://github.com/nmengin))
|
||||
- **[tls]** Add DEBUG log when no provided certificate can check a domain ([#2938](https://github.com/containous/traefik/pull/2938) by [nmengin](https://github.com/nmengin))
|
||||
- **[webui]** Smooth dashboard refresh. ([#2871](https://github.com/containous/traefik/pull/2871) by [ldez](https://github.com/ldez))
|
||||
- Fix Duration JSON unmarshal ([#2935](https://github.com/containous/traefik/pull/2935) by [ldez](https://github.com/ldez))
|
||||
- Default value for lifecycle ([#2934](https://github.com/containous/traefik/pull/2934) by [Juliens](https://github.com/Juliens))
|
||||
- Check ping configuration. ([#2852](https://github.com/containous/traefik/pull/2852) by [ldez](https://github.com/ldez))
|
||||
|
||||
**Documentation:**
|
||||
- **[docker]** it's -> its ([#2901](https://github.com/containous/traefik/pull/2901) by [piec](https://github.com/piec))
|
||||
- **[tls]** Fix doc cipher suites ([#2894](https://github.com/containous/traefik/pull/2894) by [emilevauge](https://github.com/emilevauge))
|
||||
- Add a CLI help command for Docker. ([#2921](https://github.com/containous/traefik/pull/2921) by [ldez](https://github.com/ldez))
|
||||
- Fix traffic pronounce dead link ([#2870](https://github.com/containous/traefik/pull/2870) by [emilevauge](https://github.com/emilevauge))
|
||||
- Update documentation on onHostRule, ping examples, and web deprecation ([#2863](https://github.com/containous/traefik/pull/2863) by [Juliens](https://github.com/Juliens))
|
||||
|
||||
## [v1.5.2](https://github.com/containous/traefik/tree/v1.5.2) (2018-02-12)
|
||||
[All Commits](https://github.com/containous/traefik/compare/v1.5.1...v1.5.2)
|
||||
|
||||
**Bug fixes:**
|
||||
- **[acme,cluster,kv]** Compress ACME certificates in KV stores. ([#2814](https://github.com/containous/traefik/pull/2814) by [nmengin](https://github.com/nmengin))
|
||||
- **[acme]** Traefik still start when Let's encrypt is down ([#2794](https://github.com/containous/traefik/pull/2794) by [Juliens](https://github.com/Juliens))
|
||||
- **[docker]** Fix dnsrr endpoint mode excluded when not using swarm LB ([#2795](https://github.com/containous/traefik/pull/2795) by [mmatur](https://github.com/mmatur))
|
||||
- **[eureka]** Continue refresh the configuration after a failure. ([#2838](https://github.com/containous/traefik/pull/2838) by [ldez](https://github.com/ldez))
|
||||
- **[logs]** Reduce oxy round trip logs to debug. ([#2821](https://github.com/containous/traefik/pull/2821) by [timoreimann](https://github.com/timoreimann))
|
||||
- **[websocket]** Fix goroutine leaks in websocket ([#2825](https://github.com/containous/traefik/pull/2825) by [Juliens](https://github.com/Juliens))
|
||||
- Hide the pflag error when displaying help. ([#2800](https://github.com/containous/traefik/pull/2800) by [ldez](https://github.com/ldez))
|
||||
|
||||
**Documentation:**
|
||||
- **[docker]** Explain how to write entrypoints definition in a compose file ([#2834](https://github.com/containous/traefik/pull/2834) by [mmatur](https://github.com/mmatur))
|
||||
- **[docker]** Fix typo ([#2813](https://github.com/containous/traefik/pull/2813) by [uschtwill](https://github.com/uschtwill))
|
||||
- **[k8s]** typo in "i"ngress annotations. ([#2780](https://github.com/containous/traefik/pull/2780) by [RRAlex](https://github.com/RRAlex))
|
||||
- Clarify how setting a frontend priority works ([#2818](https://github.com/containous/traefik/pull/2818) by [sirlatrom](https://github.com/sirlatrom))
|
||||
- Fixed typo. ([#2811](https://github.com/containous/traefik/pull/2811) by [sonus21](https://github.com/sonus21))
|
||||
- Docs: regex+replacement hints for URL rewriting ([#2802](https://github.com/containous/traefik/pull/2802) by [djeeg](https://github.com/djeeg))
|
||||
- Add documentation about entry points definition with CLI. ([#2798](https://github.com/containous/traefik/pull/2798) by [ldez](https://github.com/ldez))
|
||||
|
||||
## [v1.5.1](https://github.com/containous/traefik/tree/v1.5.1) (2018-01-29)
|
||||
[All Commits](https://github.com/containous/traefik/compare/v1.5.0...v1.5.1)
|
||||
|
||||
|
||||
@@ -64,7 +64,7 @@ Once your environment is set up and the Træfik repository cloned you can build
|
||||
cd ~/go/src/github.com/containous/traefik
|
||||
|
||||
# Get go-bindata. Please note, the ellipses are required
|
||||
go get github.com/jteeuwen/go-bindata/...
|
||||
go get github.com/containous/go-bindata/...
|
||||
|
||||
# Start build
|
||||
|
||||
@@ -87,9 +87,11 @@ If you happen to update the provider templates (in `/templates`), you need to ru
|
||||
|
||||
[dep](https://github.com/golang/dep) is not required for building; however, it is necessary to modify dependencies (i.e., add, update, or remove third-party packages)
|
||||
|
||||
You need to use [dep](https://github.com/golang/dep) >= O.4.1.
|
||||
|
||||
If you want to add a dependency, use `dep ensure -add` to have [dep](https://github.com/golang/dep) put it into the vendor folder and update the dep manifest/lock files (`Gopkg.toml` and `Gopkg.lock`, respectively).
|
||||
|
||||
A following `make prune-dep` run should be triggered to trim down the size of the vendor folder.
|
||||
A following `make dep-prune` run should be triggered to trim down the size of the vendor folder.
|
||||
The final result must be committed into VCS.
|
||||
|
||||
Here's a full example using dep to add a new dependency:
|
||||
|
||||
20
Gopkg.lock
generated
20
Gopkg.lock
generated
@@ -210,9 +210,12 @@
|
||||
|
||||
[[projects]]
|
||||
name = "github.com/containous/flaeg"
|
||||
packages = ["."]
|
||||
revision = "60c87a513a955ca7225e1b1c772581cea8420cb4"
|
||||
version = "v1.0.1"
|
||||
packages = [
|
||||
".",
|
||||
"parse"
|
||||
]
|
||||
revision = "963366c29a7acc2d6e02f4f9bcf260d5a1cf4968"
|
||||
version = "v1.1.1"
|
||||
|
||||
[[projects]]
|
||||
branch = "master"
|
||||
@@ -223,8 +226,8 @@
|
||||
[[projects]]
|
||||
name = "github.com/containous/staert"
|
||||
packages = ["."]
|
||||
revision = "af517d5b70db9c4b0505e0144fcc62b054057d2a"
|
||||
version = "v2.0.0"
|
||||
revision = "68c67b32c3a986672d994d38127cd5c78d53eb26"
|
||||
version = "v2.1.0"
|
||||
|
||||
[[projects]]
|
||||
name = "github.com/containous/traefik-extra-service-fabric"
|
||||
@@ -962,7 +965,8 @@
|
||||
"mock",
|
||||
"require"
|
||||
]
|
||||
revision = "4d4bfba8f1d1027c4fdbe371823030df51419987"
|
||||
revision = "69483b4bd14f5845b5a1e55bca19e954e827f1d0"
|
||||
version = "v1.1.4"
|
||||
|
||||
[[projects]]
|
||||
branch = "master"
|
||||
@@ -1027,7 +1031,7 @@
|
||||
"roundrobin",
|
||||
"utils"
|
||||
]
|
||||
revision = "fd6f71c694e2ab8b584c50b98ab4825027feb315"
|
||||
revision = "af377749f48ff0ae9974b30ce12a816738b94558"
|
||||
source = "https://github.com/containous/oxy.git"
|
||||
|
||||
[[projects]]
|
||||
@@ -1386,6 +1390,6 @@
|
||||
[solve-meta]
|
||||
analyzer-name = "dep"
|
||||
analyzer-version = 1
|
||||
inputs-digest = "bd1e7a1b07d95ff85c675468bbfc4bc7a91c39cf1feceeb58dfcdba9592180a5"
|
||||
inputs-digest = "7122deb7b2056ecfa444c5fb64437c1b0723afae528a331c4b032b841039383b"
|
||||
solver-name = "gps-cdcl"
|
||||
solver-version = 1
|
||||
|
||||
@@ -64,7 +64,7 @@ ignored = ["github.com/sirupsen/logrus"]
|
||||
|
||||
[[constraint]]
|
||||
name = "github.com/containous/staert"
|
||||
version = "2.0.0"
|
||||
version = "2.1.0"
|
||||
|
||||
[[constraint]]
|
||||
name = "github.com/containous/traefik-extra-service-fabric"
|
||||
@@ -190,3 +190,8 @@ ignored = ["github.com/sirupsen/logrus"]
|
||||
# remove override on master
|
||||
name = "github.com/coreos/bbolt"
|
||||
revision = "32c383e75ce054674c53b5a07e55de85332aee14"
|
||||
|
||||
[prune]
|
||||
non-go = true
|
||||
go-tests = true
|
||||
unused-packages = true
|
||||
|
||||
6
Makefile
6
Makefile
@@ -127,7 +127,11 @@ fmt:
|
||||
pull-images:
|
||||
grep --no-filename -E '^\s+image:' ./integration/resources/compose/*.yml | awk '{print $$2}' | sort | uniq | xargs -P 6 -n 1 docker pull
|
||||
|
||||
prune-dep:
|
||||
dep-ensure:
|
||||
dep ensure -v
|
||||
./script/prune-dep.sh
|
||||
|
||||
dep-prune:
|
||||
./script/prune-dep.sh
|
||||
|
||||
help: ## this help
|
||||
|
||||
@@ -12,7 +12,7 @@
|
||||
[](https://twitter.com/intent/follow?screen_name=traefikproxy)
|
||||
|
||||
|
||||
Træfik (pronounced like [traffic](https://speak-ipa.bearbin.net/speak.cgi?speak=%CB%88tr%C3%A6f%C9%AAk)) is a modern HTTP reverse proxy and load balancer made to deploy microservices with ease.
|
||||
Træfik (pronounced like _traffic_) is a modern HTTP reverse proxy and load balancer made to deploy microservices with ease.
|
||||
It supports several backends ([Docker](https://www.docker.com/), [Swarm mode](https://docs.docker.com/engine/swarm/), [Kubernetes](https://kubernetes.io), [Marathon](https://mesosphere.github.io/marathon/), [Consul](https://www.consul.io/), [Etcd](https://coreos.com/etcd/), [Rancher](https://rancher.com), [Amazon ECS](https://aws.amazon.com/ecs), and a lot more) to manage its configuration automatically and dynamically.
|
||||
|
||||
---
|
||||
|
||||
@@ -222,6 +222,24 @@ func (dc *DomainsCertificates) exists(domainToFind Domain) (*DomainsCertificate,
|
||||
return nil, false
|
||||
}
|
||||
|
||||
func (dc *DomainsCertificates) toDomainsMap() map[string]*tls.Certificate {
|
||||
domainsCertificatesMap := make(map[string]*tls.Certificate)
|
||||
for _, domainCertificate := range dc.Certs {
|
||||
certKey := domainCertificate.Domains.Main
|
||||
if domainCertificate.Domains.SANs != nil {
|
||||
sort.Strings(domainCertificate.Domains.SANs)
|
||||
for _, dnsName := range domainCertificate.Domains.SANs {
|
||||
if dnsName != domainCertificate.Domains.Main {
|
||||
certKey += fmt.Sprintf(",%s", dnsName)
|
||||
}
|
||||
}
|
||||
|
||||
}
|
||||
domainsCertificatesMap[certKey] = domainCertificate.tlsCert
|
||||
}
|
||||
return domainsCertificatesMap
|
||||
}
|
||||
|
||||
// DomainsCertificate contains a certificate for multiple domains
|
||||
type DomainsCertificate struct {
|
||||
Domains Domain
|
||||
|
||||
126
acme/acme.go
126
acme/acme.go
@@ -295,6 +295,7 @@ func (a *ACME) leadershipListener(elected bool) error {
|
||||
|
||||
// CreateLocalConfig creates a tls.config using local ACME configuration
|
||||
func (a *ACME) CreateLocalConfig(tlsConfig *tls.Config, certs *safe.Safe, checkOnDemandDomain func(domain string) bool) error {
|
||||
defer a.runJobs()
|
||||
err := a.init()
|
||||
if err != nil {
|
||||
return err
|
||||
@@ -333,7 +334,9 @@ func (a *ACME) CreateLocalConfig(tlsConfig *tls.Config, certs *safe.Safe, checkO
|
||||
|
||||
a.client, err = a.buildACMEClient(account)
|
||||
if err != nil {
|
||||
return err
|
||||
log.Errorf(`Failed to build ACME client: %s
|
||||
Let's Encrypt functionality will be limited until traefik is restarted.`, err)
|
||||
return nil
|
||||
}
|
||||
|
||||
if needRegister {
|
||||
@@ -374,7 +377,6 @@ func (a *ACME) CreateLocalConfig(tlsConfig *tls.Config, certs *safe.Safe, checkO
|
||||
|
||||
a.retrieveCertificates()
|
||||
a.renewCertificates()
|
||||
a.runJobs()
|
||||
|
||||
ticker := time.NewTicker(24 * time.Hour)
|
||||
safe.Go(func() {
|
||||
@@ -389,7 +391,7 @@ func (a *ACME) getCertificate(clientHello *tls.ClientHelloInfo) (*tls.Certificat
|
||||
domain := types.CanonicalDomain(clientHello.ServerName)
|
||||
account := a.store.Get().(*Account)
|
||||
|
||||
if providedCertificate := a.getProvidedCertificate([]string{domain}); providedCertificate != nil {
|
||||
if providedCertificate := a.getProvidedCertificate(domain); providedCertificate != nil {
|
||||
return providedCertificate, nil
|
||||
}
|
||||
|
||||
@@ -623,11 +625,6 @@ func (a *ACME) LoadCertificateForDomains(domains []string) {
|
||||
|
||||
domains = fun.Map(types.CanonicalDomain, domains).([]string)
|
||||
|
||||
// Check provided certificates
|
||||
if a.getProvidedCertificate(domains) != nil {
|
||||
return
|
||||
}
|
||||
|
||||
operation := func() error {
|
||||
if a.client == nil {
|
||||
return errors.New("ACME client still not built")
|
||||
@@ -645,32 +642,34 @@ func (a *ACME) LoadCertificateForDomains(domains []string) {
|
||||
return
|
||||
}
|
||||
account := a.store.Get().(*Account)
|
||||
var domain Domain
|
||||
if len(domains) > 1 {
|
||||
domain = Domain{Main: domains[0], SANs: domains[1:]}
|
||||
} else {
|
||||
domain = Domain{Main: domains[0]}
|
||||
}
|
||||
if _, exists := account.DomainsCertificate.exists(domain); exists {
|
||||
// domain already exists
|
||||
|
||||
// Check provided certificates
|
||||
uncheckedDomains := a.getUncheckedDomains(domains, account)
|
||||
if len(uncheckedDomains) == 0 {
|
||||
return
|
||||
}
|
||||
certificate, err := a.getDomainsCertificates(domains)
|
||||
certificate, err := a.getDomainsCertificates(uncheckedDomains)
|
||||
if err != nil {
|
||||
log.Errorf("Error getting ACME certificates %+v : %v", domains, err)
|
||||
log.Errorf("Error getting ACME certificates %+v : %v", uncheckedDomains, err)
|
||||
return
|
||||
}
|
||||
log.Debugf("Got certificate for domains %+v", domains)
|
||||
log.Debugf("Got certificate for domains %+v", uncheckedDomains)
|
||||
transaction, object, err := a.store.Begin()
|
||||
|
||||
if err != nil {
|
||||
log.Errorf("Error creating transaction %+v : %v", domains, err)
|
||||
log.Errorf("Error creating transaction %+v : %v", uncheckedDomains, err)
|
||||
return
|
||||
}
|
||||
var domain Domain
|
||||
if len(uncheckedDomains) > 1 {
|
||||
domain = Domain{Main: uncheckedDomains[0], SANs: uncheckedDomains[1:]}
|
||||
} else {
|
||||
domain = Domain{Main: uncheckedDomains[0]}
|
||||
}
|
||||
account = object.(*Account)
|
||||
_, err = account.DomainsCertificate.addCertificateForDomains(certificate, domain)
|
||||
if err != nil {
|
||||
log.Errorf("Error adding ACME certificates %+v : %v", domains, err)
|
||||
log.Errorf("Error adding ACME certificates %+v : %v", uncheckedDomains, err)
|
||||
return
|
||||
}
|
||||
if err = transaction.Commit(account); err != nil {
|
||||
@@ -682,36 +681,95 @@ func (a *ACME) LoadCertificateForDomains(domains []string) {
|
||||
|
||||
// Get provided certificate which check a domains list (Main and SANs)
|
||||
// from static and dynamic provided certificates
|
||||
func (a *ACME) getProvidedCertificate(domains []string) *tls.Certificate {
|
||||
func (a *ACME) getProvidedCertificate(domains string) *tls.Certificate {
|
||||
log.Debugf("Looking for provided certificate to validate %s...", domains)
|
||||
cert := searchProvidedCertificateForDomains(domains, a.TLSConfig.NameToCertificate)
|
||||
if cert == nil && a.dynamicCerts != nil && a.dynamicCerts.Get() != nil {
|
||||
cert = searchProvidedCertificateForDomains(domains, a.dynamicCerts.Get().(*traefikTls.DomainsCertificates).Get().(map[string]*tls.Certificate))
|
||||
}
|
||||
log.Debugf("No provided certificate found for domains %s, get ACME certificate.", domains)
|
||||
if cert == nil {
|
||||
log.Debugf("No provided certificate found for domains %s, get ACME certificate.", domains)
|
||||
}
|
||||
return cert
|
||||
}
|
||||
|
||||
func searchProvidedCertificateForDomains(domains []string, certs map[string]*tls.Certificate) *tls.Certificate {
|
||||
func searchProvidedCertificateForDomains(domain string, certs map[string]*tls.Certificate) *tls.Certificate {
|
||||
// Use regex to test for provided certs that might have been added into TLSConfig
|
||||
providedCertMatch := false
|
||||
for k := range certs {
|
||||
selector := "^" + strings.Replace(k, "*.", "[^\\.]*\\.?", -1) + "$"
|
||||
for _, domainToCheck := range domains {
|
||||
providedCertMatch, _ = regexp.MatchString(selector, domainToCheck)
|
||||
if !providedCertMatch {
|
||||
for certDomains := range certs {
|
||||
domainCheck := false
|
||||
for _, certDomain := range strings.Split(certDomains, ",") {
|
||||
selector := "^" + strings.Replace(certDomain, "*.", "[^\\.]*\\.?", -1) + "$"
|
||||
domainCheck, _ = regexp.MatchString(selector, domain)
|
||||
if domainCheck {
|
||||
break
|
||||
}
|
||||
}
|
||||
if providedCertMatch {
|
||||
log.Debugf("Got provided certificate for domains %s", domains)
|
||||
return certs[k]
|
||||
|
||||
if domainCheck {
|
||||
log.Debugf("Domain %q checked by provided certificate %q", domain, certDomains)
|
||||
return certs[certDomains]
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// Get provided certificate which check a domains list (Main and SANs)
|
||||
// from static and dynamic provided certificates
|
||||
func (a *ACME) getUncheckedDomains(domains []string, account *Account) []string {
|
||||
log.Debugf("Looking for provided certificate to validate %s...", domains)
|
||||
allCerts := make(map[string]*tls.Certificate)
|
||||
|
||||
// Get static certificates
|
||||
for domains, certificate := range a.TLSConfig.NameToCertificate {
|
||||
allCerts[domains] = certificate
|
||||
}
|
||||
|
||||
// Get dynamic certificates
|
||||
if a.dynamicCerts != nil && a.dynamicCerts.Get() != nil {
|
||||
for domains, certificate := range a.dynamicCerts.Get().(*traefikTls.DomainsCertificates).Get().(map[string]*tls.Certificate) {
|
||||
allCerts[domains] = certificate
|
||||
}
|
||||
}
|
||||
|
||||
// Get ACME certificates
|
||||
if account != nil {
|
||||
for domains, certificate := range account.DomainsCertificate.toDomainsMap() {
|
||||
allCerts[domains] = certificate
|
||||
}
|
||||
}
|
||||
|
||||
return searchUncheckedDomains(domains, allCerts)
|
||||
}
|
||||
|
||||
func searchUncheckedDomains(domains []string, certs map[string]*tls.Certificate) []string {
|
||||
uncheckedDomains := []string{}
|
||||
for _, domainToCheck := range domains {
|
||||
domainCheck := false
|
||||
for certDomains := range certs {
|
||||
domainCheck = false
|
||||
for _, certDomain := range strings.Split(certDomains, ",") {
|
||||
// Use regex to test for provided certs that might have been added into TLSConfig
|
||||
selector := "^" + strings.Replace(certDomain, "*.", "[^\\.]*\\.?", -1) + "$"
|
||||
domainCheck, _ = regexp.MatchString(selector, domainToCheck)
|
||||
if domainCheck {
|
||||
break
|
||||
}
|
||||
}
|
||||
if domainCheck {
|
||||
break
|
||||
}
|
||||
}
|
||||
if !domainCheck {
|
||||
uncheckedDomains = append(uncheckedDomains, domainToCheck)
|
||||
}
|
||||
}
|
||||
if len(uncheckedDomains) == 0 {
|
||||
log.Debugf("No ACME certificate to generate for domains %q.", domains)
|
||||
} else {
|
||||
log.Debugf("Domains %q need ACME certificates generation for domains %q.", domains, strings.Join(uncheckedDomains, ","))
|
||||
}
|
||||
return uncheckedDomains
|
||||
}
|
||||
|
||||
func (a *ACME) getDomainsCertificates(domains []string) (*Certificate, error) {
|
||||
domains = fun.Map(types.CanonicalDomain, domains).([]string)
|
||||
log.Debugf("Loading ACME certificates %s...", domains)
|
||||
|
||||
@@ -281,7 +281,7 @@ cijFkALeQp/qyeXdFld2v9gUN3eCgljgcl0QweRoIc=---`)
|
||||
}
|
||||
}
|
||||
|
||||
func TestAcme_getProvidedCertificate(t *testing.T) {
|
||||
func TestAcme_getUncheckedCertificates(t *testing.T) {
|
||||
mm := make(map[string]*tls.Certificate)
|
||||
mm["*.containo.us"] = &tls.Certificate{}
|
||||
mm["traefik.acme.io"] = &tls.Certificate{}
|
||||
@@ -289,9 +289,36 @@ func TestAcme_getProvidedCertificate(t *testing.T) {
|
||||
a := ACME{TLSConfig: &tls.Config{NameToCertificate: mm}}
|
||||
|
||||
domains := []string{"traefik.containo.us", "trae.containo.us"}
|
||||
certificate := a.getProvidedCertificate(domains)
|
||||
assert.NotNil(t, certificate)
|
||||
uncheckedDomains := a.getUncheckedDomains(domains, nil)
|
||||
assert.Empty(t, uncheckedDomains)
|
||||
domains = []string{"traefik.acme.io", "trae.acme.io"}
|
||||
certificate = a.getProvidedCertificate(domains)
|
||||
uncheckedDomains = a.getUncheckedDomains(domains, nil)
|
||||
assert.Len(t, uncheckedDomains, 1)
|
||||
domainsCertificates := DomainsCertificates{Certs: []*DomainsCertificate{
|
||||
{
|
||||
tlsCert: &tls.Certificate{},
|
||||
Domains: Domain{
|
||||
Main: "*.acme.wtf",
|
||||
SANs: []string{"trae.acme.io"},
|
||||
},
|
||||
},
|
||||
}}
|
||||
account := Account{DomainsCertificate: domainsCertificates}
|
||||
uncheckedDomains = a.getUncheckedDomains(domains, &account)
|
||||
assert.Empty(t, uncheckedDomains)
|
||||
}
|
||||
|
||||
func TestAcme_getProvidedCertificate(t *testing.T) {
|
||||
mm := make(map[string]*tls.Certificate)
|
||||
mm["*.containo.us"] = &tls.Certificate{}
|
||||
mm["traefik.acme.io"] = &tls.Certificate{}
|
||||
|
||||
a := ACME{TLSConfig: &tls.Config{NameToCertificate: mm}}
|
||||
|
||||
domain := "traefik.containo.us"
|
||||
certificate := a.getProvidedCertificate(domain)
|
||||
assert.NotNil(t, certificate)
|
||||
domain = "trae.acme.io"
|
||||
certificate = a.getProvidedCertificate(domain)
|
||||
assert.Nil(t, certificate)
|
||||
}
|
||||
|
||||
@@ -4,22 +4,20 @@ RUN apk --update upgrade \
|
||||
&& apk --no-cache --no-progress add git mercurial bash gcc musl-dev curl tar \
|
||||
&& rm -rf /var/cache/apk/*
|
||||
|
||||
RUN go get github.com/jteeuwen/go-bindata/... \
|
||||
RUN go get github.com/containous/go-bindata/... \
|
||||
&& go get github.com/golang/lint/golint \
|
||||
&& go get github.com/kisielk/errcheck \
|
||||
&& go get github.com/client9/misspell/cmd/misspell
|
||||
|
||||
# Which docker version to test on
|
||||
ARG DOCKER_VERSION=17.03.2
|
||||
ARG DEP_VERSION=0.3.2
|
||||
ARG DEP_VERSION=0.4.1
|
||||
|
||||
# Download dep binary to bin folder in $GOPATH
|
||||
RUN mkdir -p /usr/local/bin \
|
||||
&& curl -fsSL -o /usr/local/bin/dep https://github.com/golang/dep/releases/download/v${DEP_VERSION}/dep-linux-amd64 \
|
||||
&& chmod +x /usr/local/bin/dep
|
||||
|
||||
|
||||
|
||||
# Download docker
|
||||
RUN mkdir -p /usr/local/bin \
|
||||
&& curl -fL https://download.docker.com/linux/static/stable/x86_64/docker-${DOCKER_VERSION}-ce.tgz \
|
||||
|
||||
@@ -287,6 +287,9 @@ func NewTraefikConfiguration() *TraefikConfiguration {
|
||||
HealthCheck: &configuration.HealthCheckConfig{
|
||||
Interval: flaeg.Duration(configuration.DefaultHealthCheckInterval),
|
||||
},
|
||||
LifeCycle: &configuration.LifeCycle{
|
||||
GraceTimeOut: flaeg.Duration(configuration.DefaultGraceTimeout),
|
||||
},
|
||||
CheckNewVersion: true,
|
||||
},
|
||||
ConfigFile: "",
|
||||
|
||||
@@ -29,11 +29,6 @@ func runHealthCheck(traefikConfiguration *TraefikConfiguration) func() error {
|
||||
return func() error {
|
||||
traefikConfiguration.GlobalConfiguration.SetEffectiveConfiguration(traefikConfiguration.ConfigFile)
|
||||
|
||||
if traefikConfiguration.Ping == nil {
|
||||
fmt.Println("Please enable `ping` to use healtcheck.")
|
||||
os.Exit(1)
|
||||
}
|
||||
|
||||
resp, errPing := healthCheck(traefikConfiguration.GlobalConfiguration)
|
||||
if errPing != nil {
|
||||
fmt.Printf("Error calling healthcheck: %s\n", errPing)
|
||||
@@ -50,9 +45,13 @@ func runHealthCheck(traefikConfiguration *TraefikConfiguration) func() error {
|
||||
}
|
||||
|
||||
func healthCheck(globalConfiguration configuration.GlobalConfiguration) (*http.Response, error) {
|
||||
if globalConfiguration.Ping == nil {
|
||||
return nil, errors.New("please enable `ping` to use health check")
|
||||
}
|
||||
|
||||
pingEntryPoint, ok := globalConfiguration.EntryPoints[globalConfiguration.Ping.EntryPoint]
|
||||
if !ok {
|
||||
return nil, errors.New("missing ping entrypoint")
|
||||
return nil, errors.New("missing `ping` entrypoint")
|
||||
}
|
||||
|
||||
client := &http.Client{Timeout: 5 * time.Second}
|
||||
|
||||
@@ -28,6 +28,7 @@ import (
|
||||
"github.com/containous/traefik/types"
|
||||
"github.com/containous/traefik/version"
|
||||
"github.com/coreos/go-systemd/daemon"
|
||||
"github.com/ogier/pflag"
|
||||
)
|
||||
|
||||
func main() {
|
||||
@@ -75,6 +76,9 @@ Complete documentation is available at https://traefik.io`,
|
||||
}
|
||||
|
||||
if _, err := f.Parse(usedCmd); err != nil {
|
||||
if err == pflag.ErrHelp {
|
||||
os.Exit(0)
|
||||
}
|
||||
fmtlog.Printf("Error parsing command: %s\n", err)
|
||||
os.Exit(-1)
|
||||
}
|
||||
|
||||
@@ -236,7 +236,7 @@ The following rules are both `Matchers` and `Modifiers`, so the `Matcher` portio
|
||||
By default, routes will be sorted (in descending order) using rules length (to avoid path overlap):
|
||||
`PathPrefix:/12345` will be matched before `PathPrefix:/1234` that will be matched before `PathPrefix:/1`.
|
||||
|
||||
You can customize priority by frontend:
|
||||
You can customize priority by frontend. The priority value is added to the rule length during sorting:
|
||||
|
||||
```toml
|
||||
[frontends]
|
||||
@@ -254,7 +254,7 @@ You can customize priority by frontend:
|
||||
rule = "PathPrefix:/toto"
|
||||
```
|
||||
|
||||
Here, `frontend1` will be matched before `frontend2` (`10 > 5`).
|
||||
Here, `frontend1` will be matched before `frontend2` (`(3 + 10 == 13) > (4 + 5 == 9)`).
|
||||
|
||||
#### Custom headers
|
||||
|
||||
@@ -569,6 +569,11 @@ Each command is described at the beginning of the help section:
|
||||
|
||||
```bash
|
||||
traefik --help
|
||||
|
||||
# or
|
||||
|
||||
docker run traefik[:version] --help
|
||||
# ex: docker run traefik:1.5 --help
|
||||
```
|
||||
|
||||
### Command: bug
|
||||
|
||||
@@ -113,7 +113,7 @@ entryPoint = "https"
|
||||
# Required
|
||||
#
|
||||
entryPoint = "http"
|
||||
|
||||
|
||||
# Use a DNS-01 acme challenge rather than TLS-SNI-01 challenge
|
||||
#
|
||||
# Optional
|
||||
@@ -135,6 +135,7 @@ entryPoint = "https"
|
||||
#
|
||||
# delayBeforeCheck = 0
|
||||
```
|
||||
|
||||
!!! note
|
||||
Even if `TLS-SNI-01` challenge is [disabled](https://community.letsencrypt.org/t/2018-01-11-update-regarding-acme-tls-sni-and-shared-hosting-infrastructure/50188) for the moment, it stays the _by default_ ACME Challenge in Træfik.
|
||||
If `TLS-SNI-01` challenge is not re-enabled in the future, it we will be removed from Træfik.
|
||||
@@ -144,6 +145,19 @@ entryPoint = "https"
|
||||
If `HTTP-01` challenge is used, `acme.httpChallenge.entryPoint` has to be defined and reachable by Let's Encrypt through the port 80.
|
||||
These are Let's Encrypt limitations as described on the [community forum](https://community.letsencrypt.org/t/support-for-ports-other-than-80-and-443/3419/72).
|
||||
|
||||
### Let's Encrypt downtime
|
||||
|
||||
Let's Encrypt functionality will be limited until Træfik is restarted.
|
||||
|
||||
If Let's Encrypt is not reachable, these certificates will be used :
|
||||
|
||||
- ACME certificates already generated before downtime
|
||||
- Expired ACME certificates
|
||||
- Provided certificates
|
||||
|
||||
!!! note
|
||||
Default Træfik certificate will be used instead of ACME certificates for new (sub)domains (which need Let's Encrypt challenge).
|
||||
|
||||
### `storage`
|
||||
|
||||
```toml
|
||||
@@ -153,9 +167,27 @@ storage = "acme.json"
|
||||
# ...
|
||||
```
|
||||
|
||||
File or key used for certificates storage.
|
||||
The `storage` option sets where are stored your ACME certificates.
|
||||
|
||||
**WARNING:** If you use Træfik in Docker, you have 2 options:
|
||||
There are two kind of `storage` :
|
||||
|
||||
- a JSON file,
|
||||
- a KV store entry.
|
||||
|
||||
!!! danger "DEPRECATED"
|
||||
`storage` replaces `storageFile` which is deprecated.
|
||||
|
||||
!!! note
|
||||
During Træfik configuration migration from a configuration file to a KV store (thanks to `storeconfig` subcommand as described [here](/user-guide/kv-config/#store-configuration-in-key-value-store)), if ACME certificates have to be migrated too, use both `storageFile` and `storage`.
|
||||
|
||||
- `storageFile` will contain the path to the `acme.json` file to migrate.
|
||||
- `storage` will contain the key where the certificates will be stored.
|
||||
|
||||
#### Store data in a file
|
||||
|
||||
ACME certificates can be stored in a JSON file which with the `600` right mode.
|
||||
|
||||
There are two ways to store ACME certificates in a file from Docker:
|
||||
|
||||
- create a file on your host and mount it as a volume:
|
||||
```toml
|
||||
@@ -164,7 +196,6 @@ storage = "acme.json"
|
||||
```bash
|
||||
docker run -v "/my/host/acme.json:acme.json" traefik
|
||||
```
|
||||
|
||||
- mount the folder containing the file as a volume
|
||||
```toml
|
||||
storage = "/etc/traefik/acme/acme.json"
|
||||
@@ -173,14 +204,24 @@ storage = "/etc/traefik/acme/acme.json"
|
||||
docker run -v "/my/host/acme:/etc/traefik/acme" traefik
|
||||
```
|
||||
|
||||
!!! note
|
||||
`storage` replaces `storageFile` which is deprecated.
|
||||
!!! warning
|
||||
This file cannot be shared per many instances of Træfik at the same time.
|
||||
If you have to use Træfik cluster mode, please use [a KV Store entry](/configuration/acme/#storage-kv-entry).
|
||||
|
||||
#### Store data in a KV store entry
|
||||
|
||||
ACME certificates can be stored in a KV Store entry.
|
||||
|
||||
```toml
|
||||
storage = "traefik/acme/account"
|
||||
```
|
||||
|
||||
**This kind of storage is mandatory in cluster mode.**
|
||||
|
||||
Because KV stores (like Consul) have limited entries size, the certificates list is compressed before to be set in a KV store entry.
|
||||
|
||||
!!! note
|
||||
During Træfik configuration migration from a configuration file to a KV store (thanks to `storeconfig` subcommand as described [here](/user-guide/kv-config/#store-configuration-in-key-value-store)), if ACME certificates have to be migrated too, use both `storageFile` and `storage`.
|
||||
|
||||
- `storageFile` will contain the path to the `acme.json` file to migrate.
|
||||
- `storage` will contain the key where the certificates will be stored.
|
||||
It's possible to store up to approximately 100 ACME certificates in Consul.
|
||||
|
||||
### `acme.httpChallenge`
|
||||
|
||||
@@ -202,6 +243,8 @@ entryPoint = "https"
|
||||
Specify the entryPoint to use during the challenges.
|
||||
|
||||
```toml
|
||||
defaultEntryPoints = ["http", "http"]
|
||||
|
||||
[entryPoints]
|
||||
[entryPoints.http]
|
||||
address = ":80"
|
||||
@@ -234,7 +277,7 @@ Use `DNS-01` challenge to generate/renew ACME certificates.
|
||||
# ...
|
||||
```
|
||||
|
||||
#### `provider`
|
||||
#### `provider`
|
||||
|
||||
Select the provider that matches the DNS domain that will host the challenge TXT record, and provide environment variables to enable setting it:
|
||||
|
||||
@@ -276,7 +319,7 @@ Useful if internal networks block external DNS queries.
|
||||
|
||||
### `onDemand` (Deprecated)
|
||||
|
||||
!!! warning
|
||||
!!! danger "DEPRECATED"
|
||||
This option is deprecated.
|
||||
|
||||
```toml
|
||||
@@ -288,11 +331,11 @@ onDemand = true
|
||||
|
||||
Enable on demand certificate.
|
||||
|
||||
This will request a certificate from Let's Encrypt during the first TLS handshake for a hostname that does not yet have a certificate.
|
||||
This will request a certificate from Let's Encrypt during the first TLS handshake for a host name that does not yet have a certificate.
|
||||
|
||||
!!! warning
|
||||
TLS handshakes will be slow when requesting a hostname certificate for the first time, this can lead to DoS attacks.
|
||||
|
||||
TLS handshakes will be slow when requesting a host name certificate for the first time, this can lead to DoS attacks.
|
||||
|
||||
!!! warning
|
||||
Take note that Let's Encrypt have [rate limiting](https://letsencrypt.org/docs/rate-limits).
|
||||
|
||||
@@ -305,7 +348,7 @@ onHostRule = true
|
||||
# ...
|
||||
```
|
||||
|
||||
Enable certificate generation on frontends Host rules.
|
||||
Enable certificate generation on frontends `Host` rules (for frontends wired on the `acme.entryPoint`).
|
||||
|
||||
This will request a certificate from Let's Encrypt for each frontend with a Host rule.
|
||||
|
||||
@@ -353,12 +396,12 @@ Each domain & SANs will lead to a certificate request.
|
||||
|
||||
### `dnsProvider` (Deprecated)
|
||||
|
||||
!!! warning
|
||||
!!! danger "DEPRECATED"
|
||||
This option is deprecated.
|
||||
Please refer to [DNS challenge provider section](/configuration/acme/#provider)
|
||||
|
||||
### `delayDontCheckDNS` (Deprecated)
|
||||
|
||||
!!! warning
|
||||
!!! danger "DEPRECATED"
|
||||
This option is deprecated.
|
||||
Please refer to [DNS challenge delayBeforeCheck section](/configuration/acme/#delaybeforecheck)
|
||||
|
||||
@@ -1,5 +1,7 @@
|
||||
# API Definition
|
||||
|
||||
## Configuration
|
||||
|
||||
```toml
|
||||
# API definition
|
||||
[api]
|
||||
@@ -28,6 +30,8 @@
|
||||
debug = true
|
||||
```
|
||||
|
||||
For more customization, see [entry points](/configuration/entrypoints/) documentation and [examples](/user-guide/examples/#ping-health-check).
|
||||
|
||||
## Web UI
|
||||
|
||||

|
||||
@@ -42,7 +46,7 @@
|
||||
| `/health` | `GET` | json health metrics |
|
||||
| `/api` | `GET` | Configuration for all providers |
|
||||
| `/api/providers` | `GET` | Providers |
|
||||
| `/api/providers/{provider}` | `GET`, `PUT` | Get or update provider |
|
||||
| `/api/providers/{provider}` | `GET`, `PUT` | Get or update provider (1) |
|
||||
| `/api/providers/{provider}/backends` | `GET` | List backends |
|
||||
| `/api/providers/{provider}/backends/{backend}` | `GET` | Get backend |
|
||||
| `/api/providers/{provider}/backends/{backend}/servers` | `GET` | List servers in backend |
|
||||
@@ -52,6 +56,8 @@
|
||||
| `/api/providers/{provider}/frontends/{frontend}/routes` | `GET` | List routes in a frontend |
|
||||
| `/api/providers/{provider}/frontends/{frontend}/routes/{route}` | `GET` | Get a route in a frontend |
|
||||
|
||||
<1> See [Rest](/configuration/backends/rest/#api) for more information.
|
||||
|
||||
!!! warning
|
||||
For compatibility reason, when you activate the rest provider, you can use `web` or `rest` as `provider` value.
|
||||
But be careful, in the configuration for all providers the key is still `web`.
|
||||
@@ -185,6 +191,7 @@ curl -s "http://localhost:8080/health" | jq .
|
||||
## Metrics
|
||||
|
||||
You can enable Traefik to export internal metrics to different monitoring systems.
|
||||
|
||||
```toml
|
||||
[api]
|
||||
# ...
|
||||
|
||||
@@ -150,7 +150,7 @@ The following security annotations are applicable on the Ingress object:
|
||||
| `ingress.kubernetes.io/ssl-host:HOST` | This setting configures the hostname that redirects will be based on. Default is "", which is the same host as the request. |
|
||||
| `ingress.kubernetes.io/ssl-proxy-headers:EXPR` | Header combinations that would signify a proper SSL Request (Such as `X-Forwarded-For:https`). Format: <code>HEADER:value||HEADER2:value2</code> |
|
||||
| `ingress.kubernetes.io/hsts-max-age:315360000` | Sets the max-age of the HSTS header. |
|
||||
| `ngress.kubernetes.io/hsts-include-subdomains:true` | Adds the IncludeSubdomains section of the STS header. |
|
||||
| `ingress.kubernetes.io/hsts-include-subdomains:true` | Adds the IncludeSubdomains section of the STS header. |
|
||||
| `ingress.kubernetes.io/hsts-preload:true` | Adds the preload flag to the HSTS header. |
|
||||
| `ingress.kubernetes.io/force-hsts:false` | Adds the STS header to non-SSL requests. |
|
||||
| `ingress.kubernetes.io/frame-deny:false` | Adds the `X-Frame-Options` header with the value of `DENY`. |
|
||||
|
||||
@@ -36,7 +36,6 @@ address = ":8080"
|
||||
#
|
||||
readOnly = true
|
||||
|
||||
|
||||
# Set the root path for webui and API
|
||||
#
|
||||
# Deprecated
|
||||
@@ -55,13 +54,13 @@ readOnly = true
|
||||
### Authentication
|
||||
|
||||
!!! note
|
||||
The `/ping` path of the api is excluded from authentication (since 1.4).
|
||||
The `/ping` path of the API is excluded from authentication (since 1.4).
|
||||
|
||||
#### Basic Authentication
|
||||
|
||||
Passwords can be encoded in MD5, SHA1 and BCrypt: you can use `htpasswd` to generate those ones.
|
||||
|
||||
Users can be specified directly in the toml file, or indirectly by referencing an external file;
|
||||
Users can be specified directly in the TOML file, or indirectly by referencing an external file;
|
||||
if both are provided, the two are merged, with external file contents having precedence.
|
||||
|
||||
```toml
|
||||
@@ -80,7 +79,7 @@ usersFile = "/path/to/.htpasswd"
|
||||
|
||||
You can use `htdigest` to generate those ones.
|
||||
|
||||
Users can be specified directly in the toml file, or indirectly by referencing an external file;
|
||||
Users can be specified directly in the TOML file, or indirectly by referencing an external file;
|
||||
if both are provided, the two are merged, with external file contents having precedence
|
||||
|
||||
```toml
|
||||
@@ -98,7 +97,7 @@ usersFile = "/path/to/.htdigest"
|
||||
|
||||
## Metrics
|
||||
|
||||
You can enable Traefik to export internal metrics to different monitoring systems.
|
||||
You can enable Træfik to export internal metrics to different monitoring systems.
|
||||
|
||||
### Prometheus
|
||||
|
||||
@@ -114,7 +113,7 @@ You can enable Traefik to export internal metrics to different monitoring system
|
||||
# Optional
|
||||
# Default: [0.1, 0.3, 1.2, 5]
|
||||
buckets=[0.1,0.3,1.2,5.0]
|
||||
|
||||
|
||||
# ...
|
||||
```
|
||||
|
||||
@@ -221,7 +220,7 @@ recentErrors = 10
|
||||
|-----------------------------------------------------------------|:-------------:|----------------------------------------------------------------------------------------------------|
|
||||
| `/` | `GET` | Provides a simple HTML frontend of Træfik |
|
||||
| `/ping` | `GET`, `HEAD` | A simple endpoint to check for Træfik process liveness. Return a code `200` with the content: `OK` |
|
||||
| `/health` | `GET` | json health metrics |
|
||||
| `/health` | `GET` | JSON health metrics |
|
||||
| `/api` | `GET` | Configuration for all providers |
|
||||
| `/api/providers` | `GET` | Providers |
|
||||
| `/api/providers/{provider}` | `GET`, `PUT` | Get or update provider |
|
||||
@@ -244,7 +243,7 @@ curl -sv "http://localhost:8080/ping"
|
||||
```
|
||||
```shell
|
||||
* Trying ::1...
|
||||
* Connected to localhost (::1) port 8080 (#0)
|
||||
* Connected to localhost (::1) port 8080 (\#0)
|
||||
> GET /ping HTTP/1.1
|
||||
> Host: localhost:8080
|
||||
> User-Agent: curl/7.43.0
|
||||
@@ -255,7 +254,7 @@ curl -sv "http://localhost:8080/ping"
|
||||
< Content-Length: 2
|
||||
< Content-Type: text/plain; charset=utf-8
|
||||
<
|
||||
* Connection #0 to host localhost left intact
|
||||
* Connection \#0 to host localhost left intact
|
||||
OK
|
||||
```
|
||||
|
||||
@@ -309,7 +308,7 @@ curl -s "http://localhost:8080/health" | jq .
|
||||
"status": "Internal Server Error",
|
||||
// request HTTP method
|
||||
"method": "GET",
|
||||
// request hostname
|
||||
// request host name
|
||||
"host": "localhost",
|
||||
// request path
|
||||
"path": "/path",
|
||||
@@ -385,23 +384,28 @@ curl -s "http://localhost:8080/api" | jq .
|
||||
}
|
||||
```
|
||||
|
||||
## Path
|
||||
### Deprecation compatibility
|
||||
|
||||
As web is deprecated, you can handle the `Path` option like this
|
||||
#### Path
|
||||
|
||||
As the web provider is deprecated, you can handle the `Path` option like this:
|
||||
|
||||
```toml
|
||||
[entrypoints.http]
|
||||
address=":80"
|
||||
defaultEntryPoints = ["http"]
|
||||
|
||||
[entrypoints.dashboard]
|
||||
address=":8080"
|
||||
[entryPoints]
|
||||
[entryPoints.http]
|
||||
address = ":80"
|
||||
|
||||
[entrypoints.api]
|
||||
address=":8081"
|
||||
[entryPoints.dashboard]
|
||||
address = ":8080"
|
||||
|
||||
#Activate API and Dashboard
|
||||
[entryPoints.api]
|
||||
address = ":8081"
|
||||
|
||||
# Activate API and Dashboard
|
||||
[api]
|
||||
entrypoint="api"
|
||||
entryPoint = "api"
|
||||
|
||||
[file]
|
||||
[backends]
|
||||
@@ -411,8 +415,67 @@ entrypoint="api"
|
||||
|
||||
[frontends]
|
||||
[frontends.frontend1]
|
||||
entrypoints=["dashboard"]
|
||||
entryPoints = ["dashboard"]
|
||||
backend = "backend1"
|
||||
[frontends.frontend1.routes.test_1]
|
||||
rule = "PathPrefixStrip:/yourprefix;PathPrefix:/yourprefix"
|
||||
```
|
||||
```
|
||||
|
||||
#### Address
|
||||
|
||||
As the web provider is deprecated, you can handle the `Address` option like this:
|
||||
|
||||
```toml
|
||||
defaultEntryPoints = ["http"]
|
||||
|
||||
[entryPoints]
|
||||
[entryPoints.http]
|
||||
address = ":80"
|
||||
|
||||
[entryPoints.ping]
|
||||
address = ":8082"
|
||||
|
||||
[entryPoints.api]
|
||||
address = ":8083"
|
||||
|
||||
[ping]
|
||||
entryPoint = "ping"
|
||||
|
||||
[api]
|
||||
entryPoint = "api"
|
||||
```
|
||||
|
||||
In the above example, you would access a regular path, administration panel, and health-check as follows:
|
||||
|
||||
* Regular path: `http://hostname:80/foo`
|
||||
* Admin Panel: `http://hostname:8083/`
|
||||
* Ping URL: `http://hostname:8082/ping`
|
||||
|
||||
In the above example, it is _very_ important to create a named dedicated entry point, and do **not** include it in `defaultEntryPoints`.
|
||||
Otherwise, you are likely to expose _all_ services via that entry point.
|
||||
|
||||
#### Authentication
|
||||
|
||||
As the web provider is deprecated, you can handle the `auth` option like this:
|
||||
|
||||
```toml
|
||||
defaultEntryPoints = ["http"]
|
||||
|
||||
[entryPoints]
|
||||
[entryPoints.http]
|
||||
address = ":80"
|
||||
|
||||
[entryPoints.api]
|
||||
address=":8080"
|
||||
[entryPoints.api.auth]
|
||||
[entryPoints.api.auth.basic]
|
||||
users = [
|
||||
"test:$apr1$H6uskkkW$IgXLP6ewTrSuBkTrqE8wj/",
|
||||
"test2:$apr1$d9hr9HBB$4HxwgUir3HP4EsggP/QNo0",
|
||||
]
|
||||
|
||||
[api]
|
||||
entrypoint="api"
|
||||
```
|
||||
|
||||
For more information, see [entry points](/configuration/entrypoints/) .
|
||||
|
||||
@@ -2,6 +2,8 @@
|
||||
|
||||
## Reference
|
||||
|
||||
### TOML
|
||||
|
||||
```toml
|
||||
[entryPoints]
|
||||
[entryPoints.http]
|
||||
@@ -11,7 +13,10 @@
|
||||
|
||||
[entryPoints.http.tls]
|
||||
minVersion = "VersionTLS12"
|
||||
cipherSuites = ["TLS_RSA_WITH_AES_256_GCM_SHA384"]
|
||||
cipherSuites = [
|
||||
"TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256",
|
||||
"TLS_RSA_WITH_AES_256_GCM_SHA384"
|
||||
]
|
||||
[[entryPoints.http.tls.certificates]]
|
||||
certFile = "path/to/my.cert"
|
||||
keyFile = "path/to/my.key"
|
||||
@@ -27,7 +32,6 @@
|
||||
entryPoint = "https"
|
||||
regex = "^http://localhost/(.*)"
|
||||
replacement = "http://mydomain/$1"
|
||||
permanent = true
|
||||
|
||||
[entryPoints.http.auth]
|
||||
headerField = "X-WebAuth-User"
|
||||
@@ -64,6 +68,53 @@
|
||||
# ...
|
||||
```
|
||||
|
||||
### CLI
|
||||
|
||||
For more information about the CLI, see the documentation about [Traefik command](/basics/#traefik).
|
||||
|
||||
```shell
|
||||
--entryPoints='Name:http Address::80'
|
||||
--entryPoints='Name:https Address::443 TLS'
|
||||
```
|
||||
|
||||
!!! note
|
||||
Whitespace is used as option separator and `,` is used as value separator for the list.
|
||||
The names of the options are case-insensitive.
|
||||
|
||||
In compose file the entrypoint syntax is different:
|
||||
|
||||
```yaml
|
||||
traefik:
|
||||
image: traefik
|
||||
command:
|
||||
- --defaultentrypoints=powpow
|
||||
- "--entryPoints=Name:powpow Address::42 Compress:true"
|
||||
```
|
||||
or
|
||||
```yaml
|
||||
traefik:
|
||||
image: traefik
|
||||
command: --defaultentrypoints=powpow --entryPoints='Name:powpow Address::42 Compress:true'
|
||||
```
|
||||
|
||||
#### All available options:
|
||||
|
||||
```ini
|
||||
Name:foo
|
||||
Address::80
|
||||
TLS:goo,gii
|
||||
TLS
|
||||
CA:car
|
||||
CA.Optional:true
|
||||
Redirect.EntryPoint:https
|
||||
Redirect.Regex:http://localhost/(.*)
|
||||
Redirect.Replacement:http://mydomain/$1
|
||||
Compress:true
|
||||
WhiteListSourceRange:10.42.0.0/16,152.89.1.33/32,afed:be44::/16
|
||||
ProxyProtocol.TrustedIPs:192.168.0.1
|
||||
ProxyProtocol.Insecure:tue
|
||||
ForwardedHeaders.TrustedIPs:10.0.0.3/24,20.0.0.3/24
|
||||
```
|
||||
|
||||
## Basic
|
||||
|
||||
@@ -118,7 +169,11 @@ To redirect an entrypoint rewriting the URL.
|
||||
```
|
||||
|
||||
!!! note
|
||||
Please note that `regex` and `replacement` do not have to be set in the `redirect` structure if an entrypoint is defined for the redirection (they will not be used in this case).
|
||||
Please note that `regex` and `replacement` do not have to be set in the `redirect` structure if an `entrypoint` is defined for the redirection (they will not be used in this case).
|
||||
|
||||
Care should be taken when defining replacement expand variables: `$1x` is equivalent to `${1x}`, not `${1}x` (see [Regexp.Expand](https://golang.org/pkg/regexp/#Regexp.Expand)), so use `${1}` syntax.
|
||||
|
||||
Regular expressions and replacements can be tested using online tools such as [Go Playground](https://play.golang.org/p/mWU9p-wk2ru) or the [Regex101](https://regex101.com/r/58sIgx/2).
|
||||
|
||||
## TLS
|
||||
|
||||
@@ -175,17 +230,16 @@ In the example below both `snitest.com` and `snitest.org` will require client ce
|
||||
```
|
||||
|
||||
!!! note
|
||||
|
||||
The deprecated argument `ClientCAFiles` allows adding Client CA files which are mandatory.
|
||||
If this parameter exists, the new ones are not checked.
|
||||
The deprecated argument `ClientCAFiles` allows adding Client CA files which are mandatory.
|
||||
If this parameter exists, the new ones are not checked.
|
||||
|
||||
## Authentication
|
||||
|
||||
### Basic Authentication
|
||||
|
||||
Passwords can be encoded in MD5, SHA1 and BCrypt: you can use `htpasswd` to generate those ones.
|
||||
Passwords can be encoded in MD5, SHA1 and BCrypt: you can use `htpasswd` to generate them.
|
||||
|
||||
Users can be specified directly in the toml file, or indirectly by referencing an external file;
|
||||
Users can be specified directly in the TOML file, or indirectly by referencing an external file;
|
||||
if both are provided, the two are merged, with external file contents having precedence.
|
||||
|
||||
```toml
|
||||
@@ -200,9 +254,9 @@ Users can be specified directly in the toml file, or indirectly by referencing a
|
||||
|
||||
### Digest Authentication
|
||||
|
||||
You can use `htdigest` to generate those ones.
|
||||
You can use `htdigest` to generate them.
|
||||
|
||||
Users can be specified directly in the toml file, or indirectly by referencing an external file;
|
||||
Users can be specified directly in the TOML file, or indirectly by referencing an external file;
|
||||
if both are provided, the two are merged, with external file contents having precedence
|
||||
|
||||
```toml
|
||||
@@ -220,7 +274,7 @@ Users can be specified directly in the toml file, or indirectly by referencing a
|
||||
This configuration will first forward the request to `http://authserver.com/auth`.
|
||||
|
||||
If the response code is 2XX, access is granted and the original request is performed.
|
||||
Otherwise, the response from the auth server is returned.
|
||||
Otherwise, the response from the authentication server is returned.
|
||||
|
||||
```toml
|
||||
[entryPoints]
|
||||
@@ -257,7 +311,10 @@ To specify an https entry point with a minimum TLS version, and specifying an ar
|
||||
address = ":443"
|
||||
[entryPoints.https.tls]
|
||||
minVersion = "VersionTLS12"
|
||||
cipherSuites = ["TLS_RSA_WITH_AES_256_GCM_SHA384"]
|
||||
cipherSuites = [
|
||||
"TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256",
|
||||
"TLS_RSA_WITH_AES_256_GCM_SHA384"
|
||||
]
|
||||
[[entryPoints.https.tls.certificates]]
|
||||
certFile = "integration/fixtures/https/snitest.com.cert"
|
||||
keyFile = "integration/fixtures/https/snitest.com.key"
|
||||
|
||||
@@ -1,5 +1,7 @@
|
||||
# Ping Definition
|
||||
|
||||
## Configuration
|
||||
|
||||
```toml
|
||||
# Ping definition
|
||||
[ping]
|
||||
@@ -19,7 +21,7 @@
|
||||
!!! warning
|
||||
Even if you have authentication configured on entry point, the `/ping` path of the api is excluded from authentication.
|
||||
|
||||
### Example
|
||||
## Example
|
||||
|
||||
```shell
|
||||
curl -sv "http://localhost:8080/ping"
|
||||
|
||||
@@ -10,7 +10,7 @@
|
||||
[](https://twitter.com/intent/follow?screen_name=traefikproxy)
|
||||
|
||||
|
||||
Træfik (pronounced like [traffic](https://speak-ipa.bearbin.net/speak.cgi?speak=%CB%88tr%C3%A6f%C9%AAk)) is a modern HTTP reverse proxy and load balancer made to deploy microservices with ease.
|
||||
Træfik (pronounced like _traffic_) is a modern HTTP reverse proxy and load balancer made to deploy microservices with ease.
|
||||
It supports several backends ([Docker](https://www.docker.com/), [Swarm mode](https://docs.docker.com/engine/swarm/), [Kubernetes](https://kubernetes.io), [Marathon](https://mesosphere.github.io/marathon/), [Consul](https://www.consul.io/), [Etcd](https://coreos.com/etcd/), [Rancher](https://rancher.com), [Amazon ECS](https://aws.amazon.com/ecs), and a lot more) to manage its configuration automatically and dynamically.
|
||||
|
||||
## Overview
|
||||
|
||||
@@ -35,14 +35,14 @@ TL;DR:
|
||||
|
||||
```shell
|
||||
$ traefik \
|
||||
--entrypoints=Name:http Address::80 Redirect.EntryPoint:https \
|
||||
--entrypoints=Name:https Address::443 TLS \
|
||||
--entrypoints='Name:http Address::80 Redirect.EntryPoint:https' \
|
||||
--entrypoints='Name:https Address::443 TLS' \
|
||||
--defaultentrypoints=http,https
|
||||
```
|
||||
|
||||
To listen to different ports, we need to create an entry point for each.
|
||||
|
||||
The CLI syntax is `--entrypoints=Name:a_name Address:an_ip_or_empty:a_port options`.
|
||||
The CLI syntax is `--entrypoints='Name:a_name Address:an_ip_or_empty:a_port options'`.
|
||||
If you want to redirect traffic from one entry point to another, it's the option `Redirect.EntryPoint:entrypoint_name`.
|
||||
|
||||
By default, we don't want to configure all our services to listen on http and https, we add a default entry point configuration: `--defaultentrypoints=http,https`.
|
||||
|
||||
@@ -23,3 +23,11 @@ A Træfik cluster is based on a manager/worker model.
|
||||
|
||||
When starting, Træfik will elect a manager.
|
||||
If this instance fails, another manager will be automatically elected.
|
||||
|
||||
## Træfik cluster and Let's Encrypt
|
||||
|
||||
**In cluster mode, ACME certificates have to be stored in [a KV Store entry](/configuration/acme/#storage-kv-entry).**
|
||||
|
||||
Thanks to the Træfik cluster mode algorithm (based on [the Raft Consensus Algorithm](https://raft.github.io/)), only one instance will contact Let's encrypt to solve the challenges.
|
||||
|
||||
The others instances will get ACME certificate from the KV Store entry.
|
||||
@@ -69,7 +69,7 @@ networks:
|
||||
```
|
||||
|
||||
As you can see, we're mounting the `traefik.toml` file as well as the (empty) `acme.json` file in the container.
|
||||
Also, we're mounting the `/var/run/docker.sock` Docker socket in the container as well, so Træfik can listen to Docker events and reconfigure it's own internal configuration when containers are created (or shut down).
|
||||
Also, we're mounting the `/var/run/docker.sock` Docker socket in the container as well, so Træfik can listen to Docker events and reconfigure its own internal configuration when containers are created (or shut down).
|
||||
Also, we're making sure the container is automatically restarted by the Docker engine in case of problems (or: if the server is rebooted).
|
||||
We're publishing the default HTTP ports `80` and `443` on the host, and making sure the container is placed within the `web` network we've created earlier on.
|
||||
Finally, we're giving this container a static name called `traefik`.
|
||||
@@ -110,7 +110,7 @@ entryPoint = "http"
|
||||
|
||||
This is the minimum configuration required to do the following:
|
||||
|
||||
- Log `ERROR`-level messages (or more severe) to the console, but silence `DEBUG`-level messagse
|
||||
- Log `ERROR`-level messages (or more severe) to the console, but silence `DEBUG`-level messages
|
||||
- Check for new versions of Træfik periodically
|
||||
- Create two entry points, namely an `HTTP` endpoint on port `80`, and an `HTTPS` endpoint on port `443` where all incoming traffic on port `80` will immediately get redirected to `HTTPS`.
|
||||
- Enable the Docker configuration backend and listen for container events on the Docker unix socket we've mounted earlier. However, **new containers will not be exposed by Træfik by default, we'll get into this in a bit!**
|
||||
@@ -199,7 +199,7 @@ Since the `traefik` container we've created and started earlier is also attached
|
||||
As mentioned earlier, we don't want containers exposed automatically by Træfik.
|
||||
|
||||
The reason behind this is simple: we want to have control over this process ourselves.
|
||||
Thanks to Docker labels, we can tell Træfik how to create it's internal routing configuration.
|
||||
Thanks to Docker labels, we can tell Træfik how to create its internal routing configuration.
|
||||
|
||||
Let's take a look at the labels themselves for the `app` service, which is a HTTP webservice listing on port 9000:
|
||||
|
||||
@@ -222,7 +222,7 @@ We use both `container labels` and `service labels`.
|
||||
First, we specify the `backend` name which corresponds to the actual service we're routing **to**.
|
||||
|
||||
We also tell Træfik to use the `web` network to route HTTP traffic to this container.
|
||||
With the `traefik.enable` label, we tell Træfik to include this container in it's internal configuration.
|
||||
With the `traefik.enable` label, we tell Træfik to include this container in its internal configuration.
|
||||
|
||||
With the `frontend.rule` label, we tell Træfik that we want to route to this container if the incoming HTTP request contains the `Host` `app.my-awesome-app.org`.
|
||||
Essentially, this is the actual rule used for Layer-7 load balancing.
|
||||
|
||||
@@ -91,7 +91,7 @@ entryPoint = "https"
|
||||
|
||||
This configuration allows generating Let's Encrypt certificates (thanks to `HTTP-01` challenge) for the four domains `local[1-4].com` with described SANs.
|
||||
|
||||
Traefik generates these certificates when it starts and it needs to be restart if new domains are added.
|
||||
Træfik generates these certificates when it starts and it needs to be restart if new domains are added.
|
||||
|
||||
### OnHostRule option (with HTTP challenge)
|
||||
|
||||
@@ -126,9 +126,9 @@ entryPoint = "https"
|
||||
|
||||
This configuration allows generating Let's Encrypt certificates (thanks to `HTTP-01` challenge) for the four domains `local[1-4].com`.
|
||||
|
||||
Traefik generates these certificates when it starts.
|
||||
Træfik generates these certificates when it starts.
|
||||
|
||||
If a backend is added with a `onHost` rule, Traefik will automatically generate the Let's Encrypt certificate for the new domain.
|
||||
If a backend is added with a `onHost` rule, Træfik will automatically generate the Let's Encrypt certificate for the new domain (for frontends wired on the `acme.entryPoint`).
|
||||
|
||||
### OnDemand option (with HTTP challenge)
|
||||
|
||||
@@ -152,11 +152,10 @@ entryPoint = "https"
|
||||
|
||||
This configuration allows generating a Let's Encrypt certificate (thanks to `HTTP-01` challenge) during the first HTTPS request on a new domain.
|
||||
|
||||
|
||||
!!! note
|
||||
This option simplifies the configuration but :
|
||||
|
||||
* TLS handshakes will be slow when requesting a hostname certificate for the first time, this can leads to DDoS attacks.
|
||||
* TLS handshakes will be slow when requesting a host name certificate for the first time, this can leads to DDoS attacks.
|
||||
* Let's Encrypt have rate limiting: https://letsencrypt.org/docs/rate-limits
|
||||
|
||||
That's why, it's better to use the `onHostRule` option if possible.
|
||||
@@ -191,7 +190,7 @@ entryPoint = "https"
|
||||
```
|
||||
|
||||
DNS challenge needs environment variables to be executed.
|
||||
This variables have to be set on the machine/container which host Traefik.
|
||||
These variables have to be set on the machine/container which host Træfik.
|
||||
|
||||
These variables are described [in this section](/configuration/acme/#provider).
|
||||
|
||||
@@ -218,7 +217,7 @@ entryPoint = "https"
|
||||
entryPoint = "http"
|
||||
```
|
||||
|
||||
Traefik will only try to generate a Let's encrypt certificate (thanks to `HTTP-01` challenge) if the domain cannot be checked by the provided certificates.
|
||||
Træfik will only try to generate a Let's encrypt certificate (thanks to `HTTP-01` challenge) if the domain cannot be checked by the provided certificates.
|
||||
|
||||
### Cluster mode
|
||||
|
||||
@@ -292,14 +291,14 @@ The `consul` provider contains the configuration.
|
||||
rule = "Path:/test"
|
||||
```
|
||||
|
||||
## Enable Basic authentication in an entrypoint
|
||||
## Enable Basic authentication in an entry point
|
||||
|
||||
With two user/pass:
|
||||
|
||||
- `test`:`test`
|
||||
- `test2`:`test2`
|
||||
|
||||
Passwords are encoded in MD5: you can use htpasswd to generate those ones.
|
||||
Passwords are encoded in MD5: you can use `htpasswd` to generate them.
|
||||
|
||||
```toml
|
||||
defaultEntryPoints = ["http"]
|
||||
@@ -337,7 +336,7 @@ providersThrottleDuration = "5s"
|
||||
idleTimeout = "360s"
|
||||
```
|
||||
|
||||
## Securing Ping Health Check
|
||||
## Ping Health Check
|
||||
|
||||
The `/ping` health-check URL is enabled with the command-line `--ping` or config file option `[ping]`.
|
||||
Thus, if you have a regular path for `/foo` and an entrypoint on `:80`, you would access them as follows:
|
||||
@@ -346,40 +345,36 @@ Thus, if you have a regular path for `/foo` and an entrypoint on `:80`, you woul
|
||||
* Admin panel: `http://hostname:8080/`
|
||||
* Ping URL: `http://hostname:8080/ping`
|
||||
|
||||
However, for security reasons, you may want to be able to expose the `/ping` health-check URL to outside health-checkers, e.g. an Internet service or cloud load-balancer, _without_ exposing your admin panel's port.
|
||||
However, for security reasons, you may want to be able to expose the `/ping` health-check URL to outside health-checkers, e.g. an Internet service or cloud load-balancer, _without_ exposing your administration panel's port.
|
||||
In many environments, the security staff may not _allow_ you to expose it.
|
||||
|
||||
You have two options:
|
||||
|
||||
* Enable `/ping` on a regular entrypoint
|
||||
* Enable `/ping` on a regular entry point
|
||||
* Enable `/ping` on a dedicated port
|
||||
|
||||
### Enable ping health check on a regular entrypoint
|
||||
### Enable ping health check on a regular entry point
|
||||
|
||||
To proxy `/ping` from a regular entrypoint to the admin one without exposing the panel, do the following:
|
||||
To proxy `/ping` from a regular entry point to the administration one without exposing the panel, do the following:
|
||||
|
||||
```toml
|
||||
[backends]
|
||||
[backends.traefik]
|
||||
[backends.traefik.servers.server1]
|
||||
url = "http://localhost:8080"
|
||||
weight = 10
|
||||
defaultEntryPoints = ["http"]
|
||||
|
||||
[entryPoints]
|
||||
[entryPoints.http]
|
||||
address = ":80"
|
||||
|
||||
[ping]
|
||||
entryPoint = "http"
|
||||
|
||||
[frontends]
|
||||
[frontends.traefikadmin]
|
||||
backend = "traefik"
|
||||
[frontends.traefikadmin.routes.ping]
|
||||
rule = "Path:/ping"
|
||||
```
|
||||
|
||||
The above creates a new backend called `traefik`, listening on `http://localhost:8080`, i.e. the local admin port.
|
||||
We only expose the admin panel via the `frontend` named `traefikadmin`, and only expose the `/ping` Path.
|
||||
Be careful with the `traefikadmin` frontend. If you do _not_ specify a `Path:` rule, you would expose the entire dashboard.
|
||||
The above link `ping` on the `http` entry point and then expose it on port `80`
|
||||
|
||||
### Enable ping health check on dedicated port
|
||||
|
||||
If you do not want to or cannot expose the health-check on a regular entrypoint - e.g. your security rules do not allow it, or you have a conflicting path - then you can enable health-check on its own entrypoint.
|
||||
Use the following config:
|
||||
If you do not want to or cannot expose the health-check on a regular entry point - e.g. your security rules do not allow it, or you have a conflicting path - then you can enable health-check on its own entry point.
|
||||
Use the following configuration:
|
||||
|
||||
```toml
|
||||
defaultEntryPoints = ["http"]
|
||||
@@ -390,32 +385,18 @@ defaultEntryPoints = ["http"]
|
||||
[entryPoints.ping]
|
||||
address = ":8082"
|
||||
|
||||
[backends]
|
||||
[backends.traefik]
|
||||
[backends.traefik.servers.server1]
|
||||
url = "http://localhost:8080"
|
||||
weight = 10
|
||||
|
||||
[frontends]
|
||||
[frontends.traefikadmin]
|
||||
backend = "traefik"
|
||||
entrypoints = ["ping"]
|
||||
[frontends.traefikadmin.routes.ping]
|
||||
rule = "Path:/ping"
|
||||
[ping]
|
||||
entryPoint = "ping"
|
||||
```
|
||||
|
||||
The above is similar to the previous example, but instead of enabling `/ping` on the _default_ entrypoint, we enable it on a _dedicated_ entrypoint.
|
||||
The above is similar to the previous example, but instead of enabling `/ping` on the _default_ entry point, we enable it on a _dedicated_ entry point.
|
||||
|
||||
In the above example, you would access a regular path, admin panel and health-check as follows:
|
||||
In the above example, you would access a regular path and health-check as follows:
|
||||
|
||||
* Regular path: `http://hostname:80/foo`
|
||||
* Admin panel: `http://hostname:8080/`
|
||||
* Ping URL: `http://hostname:8082/ping`
|
||||
|
||||
Note the dedicated port `:8082` for `/ping`.
|
||||
|
||||
In the above example, it is _very_ important to create a named dedicated entrypoint, and do **not** include it in `defaultEntryPoints`.
|
||||
Otherwise, you are likely to expose _all_ services via that entrypoint.
|
||||
|
||||
In the above example, we have two entrypoints, `http` and `ping`, but we only included `http` in `defaultEntryPoints`, while explicitly tying `frontend.traefikadmin` to the `ping` entrypoint.
|
||||
This ensures that all the "normal" frontends will be exposed via entrypoint `http` and _not_ via entrypoint `ping`.
|
||||
In the above example, it is _very_ important to create a named dedicated entry point, and do **not** include it in `defaultEntryPoints`.
|
||||
Otherwise, you are likely to expose _all_ services via this entry point.
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
# Key-value store configuration
|
||||
|
||||
Both [static global configuration](/user-guide/kv-config/#static-configuration-in-key-value-store) and [dynamic](/user-guide/kv-config/#dynamic-configuration-in-key-value-store) configuration can be sorted in a Key-value store.
|
||||
Both [static global configuration](/user-guide/kv-config/#static-configuration-in-key-value-store) and [dynamic](/user-guide/kv-config/#dynamic-configuration-in-key-value-store) configuration can be stored in a Key-value store.
|
||||
|
||||
This section explains how to launch Træfik using a configuration loaded from a Key-value store.
|
||||
|
||||
|
||||
@@ -29,6 +29,8 @@ services :
|
||||
- bhsm
|
||||
- bmysql
|
||||
- brabbitmq
|
||||
volumes:
|
||||
- "./rate-limit-policies.yml:/go/src/github.com/letsencrypt/boulder/test/rate-limit-policies.yml:ro"
|
||||
|
||||
bhsm:
|
||||
image: letsencrypt/boulder-tools:2016-11-02
|
||||
|
||||
42
examples/acme/rate-limit-policies.yml
Normal file
42
examples/acme/rate-limit-policies.yml
Normal file
@@ -0,0 +1,42 @@
|
||||
totalCertificates:
|
||||
window: 1h
|
||||
threshold: 100000
|
||||
certificatesPerName:
|
||||
window: 1h
|
||||
threshold: 100000
|
||||
overrides:
|
||||
ratelimit.me: 1
|
||||
lim.it: 0
|
||||
# Hostnames used by the letsencrypt client integration test.
|
||||
le.wtf: 10000
|
||||
le1.wtf: 10000
|
||||
le2.wtf: 10000
|
||||
le3.wtf: 10000
|
||||
nginx.wtf: 10000
|
||||
good-caa-reserved.com: 10000
|
||||
bad-caa-reserved.com: 10000
|
||||
ecdsa.le.wtf: 10000
|
||||
must-staple.le.wtf: 10000
|
||||
registrationOverrides:
|
||||
101: 1000
|
||||
registrationsPerIP:
|
||||
window: 1h
|
||||
threshold: 100000
|
||||
overrides:
|
||||
127.0.0.1: 1000000
|
||||
pendingAuthorizationsPerAccount:
|
||||
window: 1h
|
||||
threshold: 100000
|
||||
certificatesPerFQDNSet:
|
||||
window: 1h
|
||||
threshold: 100000
|
||||
overrides:
|
||||
le.wtf: 10000
|
||||
le1.wtf: 10000
|
||||
le2.wtf: 10000
|
||||
le3.wtf: 10000
|
||||
le.wtf,le1.wtf: 10000
|
||||
good-caa-reserved.com: 10000
|
||||
nginx.wtf: 10000
|
||||
ecdsa.le.wtf: 10000
|
||||
must-staple.le.wtf: 10000
|
||||
@@ -73,6 +73,8 @@ services:
|
||||
- bhsm
|
||||
- bmysql
|
||||
- brabbitmq
|
||||
volumes:
|
||||
- "./rate-limit-policies.yml:/go/src/github.com/letsencrypt/boulder/test/rate-limit-policies.yml:ro"
|
||||
networks:
|
||||
net:
|
||||
ipv4_address: 10.0.1.3
|
||||
|
||||
42
examples/cluster/rate-limit-policies.yml
Normal file
42
examples/cluster/rate-limit-policies.yml
Normal file
@@ -0,0 +1,42 @@
|
||||
totalCertificates:
|
||||
window: 1h
|
||||
threshold: 100000
|
||||
certificatesPerName:
|
||||
window: 1h
|
||||
threshold: 100000
|
||||
overrides:
|
||||
ratelimit.me: 1
|
||||
lim.it: 0
|
||||
# Hostnames used by the letsencrypt client integration test.
|
||||
le.wtf: 10000
|
||||
le1.wtf: 10000
|
||||
le2.wtf: 10000
|
||||
le3.wtf: 10000
|
||||
nginx.wtf: 10000
|
||||
good-caa-reserved.com: 10000
|
||||
bad-caa-reserved.com: 10000
|
||||
ecdsa.le.wtf: 10000
|
||||
must-staple.le.wtf: 10000
|
||||
registrationOverrides:
|
||||
101: 1000
|
||||
registrationsPerIP:
|
||||
window: 1h
|
||||
threshold: 100000
|
||||
overrides:
|
||||
127.0.0.1: 1000000
|
||||
pendingAuthorizationsPerAccount:
|
||||
window: 1h
|
||||
threshold: 100000
|
||||
certificatesPerFQDNSet:
|
||||
window: 1h
|
||||
threshold: 100000
|
||||
overrides:
|
||||
le.wtf: 10000
|
||||
le1.wtf: 10000
|
||||
le2.wtf: 10000
|
||||
le3.wtf: 10000
|
||||
le.wtf,le1.wtf: 10000
|
||||
good-caa-reserved.com: 10000
|
||||
nginx.wtf: 10000
|
||||
ecdsa.le.wtf: 10000
|
||||
must-staple.le.wtf: 10000
|
||||
@@ -19,8 +19,7 @@ caServer = "http://traefik.boulder.com:4000/directory"
|
||||
entryPoint="http"
|
||||
|
||||
|
||||
[web]
|
||||
address = ":8080"
|
||||
[api]
|
||||
|
||||
[docker]
|
||||
endpoint = "unix:///var/run/docker.sock"
|
||||
|
||||
@@ -142,6 +142,19 @@ func (s *AcmeSuite) TestOnHostRuleRetrieveAcmeCertificateWithDynamicWildcard(c *
|
||||
s.retrieveAcmeCertificate(c, testCase)
|
||||
}
|
||||
|
||||
// Test Let's encrypt down
|
||||
func (s *AcmeSuite) TestNoValidLetsEncryptServer(c *check.C) {
|
||||
cmd, display := s.traefikCmd(withConfigFile("fixtures/acme/wrong_acme.toml"))
|
||||
defer display(c)
|
||||
err := cmd.Start()
|
||||
c.Assert(err, checker.IsNil)
|
||||
defer cmd.Process.Kill()
|
||||
|
||||
// Expected traefik works
|
||||
err = try.GetRequest("http://127.0.0.1:8080/api/providers", 10*time.Second, try.StatusCodeIs(http.StatusOK))
|
||||
c.Assert(err, checker.IsNil)
|
||||
}
|
||||
|
||||
// Doing an HTTPS request and test the response certificate
|
||||
func (s *AcmeSuite) retrieveAcmeCertificate(c *check.C, testCase AcmeTestCase) {
|
||||
file := s.adaptFile(c, testCase.traefikConfFilePath, struct {
|
||||
|
||||
@@ -358,3 +358,31 @@ func (s *SimpleSuite) TestMetricsPrometheusDefaultEntrypoint(c *check.C) {
|
||||
err = try.GetRequest("http://127.0.0.1:8080/metrics", 1*time.Second, try.StatusCodeIs(http.StatusOK))
|
||||
c.Assert(err, checker.IsNil)
|
||||
}
|
||||
|
||||
func (s *SimpleSuite) TestMultipleProviderSameBackendName(c *check.C) {
|
||||
|
||||
s.createComposeProject(c, "base")
|
||||
s.composeProject.Start(c)
|
||||
ipWhoami01 := s.composeProject.Container(c, "whoami1").NetworkSettings.IPAddress
|
||||
ipWhoami02 := s.composeProject.Container(c, "whoami2").NetworkSettings.IPAddress
|
||||
file := s.adaptFile(c, "fixtures/multiple_provider.toml", struct{ IP string }{
|
||||
IP: ipWhoami02,
|
||||
})
|
||||
defer os.Remove(file)
|
||||
cmd, output := s.traefikCmd(withConfigFile(file))
|
||||
defer output(c)
|
||||
|
||||
err := cmd.Start()
|
||||
c.Assert(err, checker.IsNil)
|
||||
defer cmd.Process.Kill()
|
||||
|
||||
err = try.GetRequest("http://127.0.0.1:8080/api/providers", 1*time.Second, try.BodyContains("PathPrefix"))
|
||||
c.Assert(err, checker.IsNil)
|
||||
|
||||
err = try.GetRequest("http://127.0.0.1:8000/whoami", 1*time.Second, try.BodyContains(ipWhoami01))
|
||||
c.Assert(err, checker.IsNil)
|
||||
|
||||
err = try.GetRequest("http://127.0.0.1:8000/file", 1*time.Second, try.BodyContains(ipWhoami02))
|
||||
c.Assert(err, checker.IsNil)
|
||||
|
||||
}
|
||||
|
||||
@@ -91,11 +91,11 @@ func (s *ConstraintSuite) TestMatchConstraintGlobal(c *check.C) {
|
||||
c.Assert(err, checker.IsNil)
|
||||
defer cmd.Process.Kill()
|
||||
|
||||
nginx := s.composeProject.Container(c, "nginx")
|
||||
whoami := s.composeProject.Container(c, "whoami")
|
||||
|
||||
err = s.registerService("test", nginx.NetworkSettings.IPAddress, 80, []string{"traefik.tags=api"})
|
||||
err = s.registerService("test", whoami.NetworkSettings.IPAddress, 80, []string{"traefik.tags=api"})
|
||||
c.Assert(err, checker.IsNil, check.Commentf("Error registering service"))
|
||||
defer s.deregisterService("test", nginx.NetworkSettings.IPAddress)
|
||||
defer s.deregisterService("test", whoami.NetworkSettings.IPAddress)
|
||||
|
||||
req, err := http.NewRequest(http.MethodGet, "http://127.0.0.1:8000/", nil)
|
||||
c.Assert(err, checker.IsNil)
|
||||
@@ -117,11 +117,11 @@ func (s *ConstraintSuite) TestDoesNotMatchConstraintGlobal(c *check.C) {
|
||||
c.Assert(err, checker.IsNil)
|
||||
defer cmd.Process.Kill()
|
||||
|
||||
nginx := s.composeProject.Container(c, "nginx")
|
||||
whoami := s.composeProject.Container(c, "whoami")
|
||||
|
||||
err = s.registerService("test", nginx.NetworkSettings.IPAddress, 80, []string{})
|
||||
err = s.registerService("test", whoami.NetworkSettings.IPAddress, 80, []string{})
|
||||
c.Assert(err, checker.IsNil, check.Commentf("Error registering service"))
|
||||
defer s.deregisterService("test", nginx.NetworkSettings.IPAddress)
|
||||
defer s.deregisterService("test", whoami.NetworkSettings.IPAddress)
|
||||
|
||||
req, err := http.NewRequest(http.MethodGet, "http://127.0.0.1:8000/", nil)
|
||||
c.Assert(err, checker.IsNil)
|
||||
@@ -143,11 +143,11 @@ func (s *ConstraintSuite) TestMatchConstraintProvider(c *check.C) {
|
||||
c.Assert(err, checker.IsNil)
|
||||
defer cmd.Process.Kill()
|
||||
|
||||
nginx := s.composeProject.Container(c, "nginx")
|
||||
whoami := s.composeProject.Container(c, "whoami")
|
||||
|
||||
err = s.registerService("test", nginx.NetworkSettings.IPAddress, 80, []string{"traefik.tags=api"})
|
||||
err = s.registerService("test", whoami.NetworkSettings.IPAddress, 80, []string{"traefik.tags=api"})
|
||||
c.Assert(err, checker.IsNil, check.Commentf("Error registering service"))
|
||||
defer s.deregisterService("test", nginx.NetworkSettings.IPAddress)
|
||||
defer s.deregisterService("test", whoami.NetworkSettings.IPAddress)
|
||||
|
||||
req, err := http.NewRequest(http.MethodGet, "http://127.0.0.1:8000/", nil)
|
||||
c.Assert(err, checker.IsNil)
|
||||
@@ -169,11 +169,11 @@ func (s *ConstraintSuite) TestDoesNotMatchConstraintProvider(c *check.C) {
|
||||
c.Assert(err, checker.IsNil)
|
||||
defer cmd.Process.Kill()
|
||||
|
||||
nginx := s.composeProject.Container(c, "nginx")
|
||||
whoami := s.composeProject.Container(c, "whoami")
|
||||
|
||||
err = s.registerService("test", nginx.NetworkSettings.IPAddress, 80, []string{})
|
||||
err = s.registerService("test", whoami.NetworkSettings.IPAddress, 80, []string{})
|
||||
c.Assert(err, checker.IsNil, check.Commentf("Error registering service"))
|
||||
defer s.deregisterService("test", nginx.NetworkSettings.IPAddress)
|
||||
defer s.deregisterService("test", whoami.NetworkSettings.IPAddress)
|
||||
|
||||
req, err := http.NewRequest(http.MethodGet, "http://127.0.0.1:8000/", nil)
|
||||
c.Assert(err, checker.IsNil)
|
||||
@@ -196,11 +196,11 @@ func (s *ConstraintSuite) TestMatchMultipleConstraint(c *check.C) {
|
||||
c.Assert(err, checker.IsNil)
|
||||
defer cmd.Process.Kill()
|
||||
|
||||
nginx := s.composeProject.Container(c, "nginx")
|
||||
whoami := s.composeProject.Container(c, "whoami")
|
||||
|
||||
err = s.registerService("test", nginx.NetworkSettings.IPAddress, 80, []string{"traefik.tags=api", "traefik.tags=eu-1"})
|
||||
err = s.registerService("test", whoami.NetworkSettings.IPAddress, 80, []string{"traefik.tags=api", "traefik.tags=eu-1"})
|
||||
c.Assert(err, checker.IsNil, check.Commentf("Error registering service"))
|
||||
defer s.deregisterService("test", nginx.NetworkSettings.IPAddress)
|
||||
defer s.deregisterService("test", whoami.NetworkSettings.IPAddress)
|
||||
|
||||
req, err := http.NewRequest(http.MethodGet, "http://127.0.0.1:8000/", nil)
|
||||
c.Assert(err, checker.IsNil)
|
||||
@@ -223,11 +223,11 @@ func (s *ConstraintSuite) TestDoesNotMatchMultipleConstraint(c *check.C) {
|
||||
c.Assert(err, checker.IsNil)
|
||||
defer cmd.Process.Kill()
|
||||
|
||||
nginx := s.composeProject.Container(c, "nginx")
|
||||
whoami := s.composeProject.Container(c, "whoami")
|
||||
|
||||
err = s.registerService("test", nginx.NetworkSettings.IPAddress, 80, []string{"traefik.tags=api", "traefik.tags=us-1"})
|
||||
err = s.registerService("test", whoami.NetworkSettings.IPAddress, 80, []string{"traefik.tags=api", "traefik.tags=us-1"})
|
||||
c.Assert(err, checker.IsNil, check.Commentf("Error registering service"))
|
||||
defer s.deregisterService("test", nginx.NetworkSettings.IPAddress)
|
||||
defer s.deregisterService("test", whoami.NetworkSettings.IPAddress)
|
||||
|
||||
req, err := http.NewRequest(http.MethodGet, "http://127.0.0.1:8000/", nil)
|
||||
c.Assert(err, checker.IsNil)
|
||||
|
||||
@@ -145,9 +145,9 @@ func (s *ConsulCatalogSuite) TestSingleService(c *check.C) {
|
||||
err = try.GetRequest("http://127.0.0.1:8000/", 2*time.Second, try.StatusCodeIs(http.StatusNotFound))
|
||||
c.Assert(err, checker.IsNil)
|
||||
|
||||
nginx := s.composeProject.Container(c, "nginx1")
|
||||
whoami := s.composeProject.Container(c, "whoami1")
|
||||
|
||||
err = s.registerService("test", nginx.NetworkSettings.IPAddress, 80, []string{})
|
||||
err = s.registerService("test", whoami.NetworkSettings.IPAddress, 80, []string{})
|
||||
c.Assert(err, checker.IsNil, check.Commentf("Error registering service"))
|
||||
|
||||
req, err := http.NewRequest(http.MethodGet, "http://127.0.0.1:8000/", nil)
|
||||
@@ -157,7 +157,7 @@ func (s *ConsulCatalogSuite) TestSingleService(c *check.C) {
|
||||
err = try.Request(req, 10*time.Second, try.StatusCodeIs(http.StatusOK), try.HasBody())
|
||||
c.Assert(err, checker.IsNil)
|
||||
|
||||
s.deregisterService("test", nginx.NetworkSettings.IPAddress)
|
||||
s.deregisterService("test", whoami.NetworkSettings.IPAddress)
|
||||
err = try.Request(req, 10*time.Second, try.StatusCodeIs(http.StatusNotFound), try.HasBody())
|
||||
c.Assert(err, checker.IsNil)
|
||||
|
||||
@@ -175,11 +175,11 @@ func (s *ConsulCatalogSuite) TestExposedByDefaultFalseSingleService(c *check.C)
|
||||
c.Assert(err, checker.IsNil)
|
||||
defer cmd.Process.Kill()
|
||||
|
||||
nginx := s.composeProject.Container(c, "nginx1")
|
||||
whoami := s.composeProject.Container(c, "whoami1")
|
||||
|
||||
err = s.registerService("test", nginx.NetworkSettings.IPAddress, 80, []string{})
|
||||
err = s.registerService("test", whoami.NetworkSettings.IPAddress, 80, []string{})
|
||||
c.Assert(err, checker.IsNil, check.Commentf("Error registering service"))
|
||||
defer s.deregisterService("test", nginx.NetworkSettings.IPAddress)
|
||||
defer s.deregisterService("test", whoami.NetworkSettings.IPAddress)
|
||||
|
||||
req, err := http.NewRequest(http.MethodGet, "http://127.0.0.1:8000/", nil)
|
||||
c.Assert(err, checker.IsNil)
|
||||
@@ -201,16 +201,16 @@ func (s *ConsulCatalogSuite) TestExposedByDefaultFalseSimpleServiceMultipleNode(
|
||||
c.Assert(err, checker.IsNil)
|
||||
defer cmd.Process.Kill()
|
||||
|
||||
nginx := s.composeProject.Container(c, "nginx1")
|
||||
nginx2 := s.composeProject.Container(c, "nginx2")
|
||||
whoami := s.composeProject.Container(c, "whoami1")
|
||||
whoami2 := s.composeProject.Container(c, "whoami2")
|
||||
|
||||
err = s.registerService("test", nginx.NetworkSettings.IPAddress, 80, []string{})
|
||||
err = s.registerService("test", whoami.NetworkSettings.IPAddress, 80, []string{})
|
||||
c.Assert(err, checker.IsNil, check.Commentf("Error registering service"))
|
||||
defer s.deregisterService("test", nginx.NetworkSettings.IPAddress)
|
||||
defer s.deregisterService("test", whoami.NetworkSettings.IPAddress)
|
||||
|
||||
err = s.registerService("test", nginx2.NetworkSettings.IPAddress, 80, []string{"traefik.enable=true"})
|
||||
err = s.registerService("test", whoami2.NetworkSettings.IPAddress, 80, []string{"traefik.enable=true"})
|
||||
c.Assert(err, checker.IsNil, check.Commentf("Error registering service"))
|
||||
defer s.deregisterService("test", nginx2.NetworkSettings.IPAddress)
|
||||
defer s.deregisterService("test", whoami2.NetworkSettings.IPAddress)
|
||||
|
||||
req, err := http.NewRequest(http.MethodGet, "http://127.0.0.1:8000/", nil)
|
||||
c.Assert(err, checker.IsNil)
|
||||
@@ -232,16 +232,16 @@ func (s *ConsulCatalogSuite) TestExposedByDefaultTrueSimpleServiceMultipleNode(c
|
||||
c.Assert(err, checker.IsNil)
|
||||
defer cmd.Process.Kill()
|
||||
|
||||
nginx := s.composeProject.Container(c, "nginx1")
|
||||
nginx2 := s.composeProject.Container(c, "nginx2")
|
||||
whoami := s.composeProject.Container(c, "whoami1")
|
||||
whoami2 := s.composeProject.Container(c, "whoami2")
|
||||
|
||||
err = s.registerService("test", nginx.NetworkSettings.IPAddress, 80, []string{"name=nginx1"})
|
||||
err = s.registerService("test", whoami.NetworkSettings.IPAddress, 80, []string{"name=whoami1"})
|
||||
c.Assert(err, checker.IsNil, check.Commentf("Error registering service"))
|
||||
defer s.deregisterService("test", nginx.NetworkSettings.IPAddress)
|
||||
defer s.deregisterService("test", whoami.NetworkSettings.IPAddress)
|
||||
|
||||
err = s.registerService("test", nginx2.NetworkSettings.IPAddress, 80, []string{"name=nginx2"})
|
||||
err = s.registerService("test", whoami2.NetworkSettings.IPAddress, 80, []string{"name=whoami2"})
|
||||
c.Assert(err, checker.IsNil, check.Commentf("Error registering service"))
|
||||
defer s.deregisterService("test", nginx2.NetworkSettings.IPAddress)
|
||||
defer s.deregisterService("test", whoami2.NetworkSettings.IPAddress)
|
||||
|
||||
req, err := http.NewRequest(http.MethodGet, "http://127.0.0.1:8000/", nil)
|
||||
c.Assert(err, checker.IsNil)
|
||||
@@ -250,7 +250,7 @@ func (s *ConsulCatalogSuite) TestExposedByDefaultTrueSimpleServiceMultipleNode(c
|
||||
err = try.Request(req, 5*time.Second, try.StatusCodeIs(http.StatusOK), try.HasBody())
|
||||
c.Assert(err, checker.IsNil)
|
||||
|
||||
err = try.GetRequest("http://127.0.0.1:8080/api/providers/consul_catalog/backends", 60*time.Second, try.BodyContains("nginx1", "nginx2"))
|
||||
err = try.GetRequest("http://127.0.0.1:8080/api/providers/consul_catalog/backends", 60*time.Second, try.BodyContains("whoami1", "whoami2"))
|
||||
c.Assert(err, checker.IsNil)
|
||||
|
||||
}
|
||||
@@ -267,16 +267,16 @@ func (s *ConsulCatalogSuite) TestRefreshConfigWithMultipleNodeWithoutHealthCheck
|
||||
c.Assert(err, checker.IsNil)
|
||||
defer cmd.Process.Kill()
|
||||
|
||||
nginx := s.composeProject.Container(c, "nginx1")
|
||||
nginx2 := s.composeProject.Container(c, "nginx2")
|
||||
whoami := s.composeProject.Container(c, "whoami1")
|
||||
whoami2 := s.composeProject.Container(c, "whoami2")
|
||||
|
||||
err = s.registerService("test", nginx.NetworkSettings.IPAddress, 80, []string{"name=nginx1"})
|
||||
err = s.registerService("test", whoami.NetworkSettings.IPAddress, 80, []string{"name=whoami1"})
|
||||
c.Assert(err, checker.IsNil, check.Commentf("Error registering service"))
|
||||
defer s.deregisterService("test", nginx.NetworkSettings.IPAddress)
|
||||
defer s.deregisterService("test", whoami.NetworkSettings.IPAddress)
|
||||
|
||||
err = s.registerAgentService("test", nginx.NetworkSettings.IPAddress, 80, []string{"name=nginx1"})
|
||||
err = s.registerAgentService("test", whoami.NetworkSettings.IPAddress, 80, []string{"name=whoami1"})
|
||||
c.Assert(err, checker.IsNil, check.Commentf("Error registering agent service"))
|
||||
defer s.deregisterAgentService(nginx.NetworkSettings.IPAddress)
|
||||
defer s.deregisterAgentService(whoami.NetworkSettings.IPAddress)
|
||||
|
||||
req, err := http.NewRequest(http.MethodGet, "http://127.0.0.1:8000/", nil)
|
||||
c.Assert(err, checker.IsNil)
|
||||
@@ -285,25 +285,25 @@ func (s *ConsulCatalogSuite) TestRefreshConfigWithMultipleNodeWithoutHealthCheck
|
||||
err = try.Request(req, 5*time.Second, try.StatusCodeIs(http.StatusOK), try.HasBody())
|
||||
c.Assert(err, checker.IsNil)
|
||||
|
||||
err = try.GetRequest("http://127.0.0.1:8080/api/providers/consul_catalog/backends", 60*time.Second, try.BodyContains("nginx1"))
|
||||
err = try.GetRequest("http://127.0.0.1:8080/api/providers/consul_catalog/backends", 60*time.Second, try.BodyContains("whoami1"))
|
||||
c.Assert(err, checker.IsNil)
|
||||
|
||||
err = s.registerService("test", nginx2.NetworkSettings.IPAddress, 80, []string{"name=nginx2"})
|
||||
err = s.registerService("test", whoami2.NetworkSettings.IPAddress, 80, []string{"name=whoami2"})
|
||||
c.Assert(err, checker.IsNil, check.Commentf("Error registering service"))
|
||||
|
||||
err = try.GetRequest("http://127.0.0.1:8080/api/providers/consul_catalog/backends", 60*time.Second, try.BodyContains("nginx1", "nginx2"))
|
||||
err = try.GetRequest("http://127.0.0.1:8080/api/providers/consul_catalog/backends", 60*time.Second, try.BodyContains("whoami1", "whoami2"))
|
||||
c.Assert(err, checker.IsNil)
|
||||
|
||||
s.deregisterService("test", nginx2.NetworkSettings.IPAddress)
|
||||
s.deregisterService("test", whoami2.NetworkSettings.IPAddress)
|
||||
|
||||
err = try.GetRequest("http://127.0.0.1:8080/api/providers/consul_catalog/backends", 60*time.Second, try.BodyContains("nginx1"))
|
||||
err = try.GetRequest("http://127.0.0.1:8080/api/providers/consul_catalog/backends", 60*time.Second, try.BodyContains("whoami1"))
|
||||
c.Assert(err, checker.IsNil)
|
||||
|
||||
err = s.registerService("test", nginx2.NetworkSettings.IPAddress, 80, []string{"name=nginx2"})
|
||||
err = s.registerService("test", whoami2.NetworkSettings.IPAddress, 80, []string{"name=whoami2"})
|
||||
c.Assert(err, checker.IsNil, check.Commentf("Error registering service"))
|
||||
defer s.deregisterService("test", nginx2.NetworkSettings.IPAddress)
|
||||
defer s.deregisterService("test", whoami2.NetworkSettings.IPAddress)
|
||||
|
||||
err = try.GetRequest("http://127.0.0.1:8080/api/providers/consul_catalog/backends", 60*time.Second, try.BodyContains("nginx1", "nginx2"))
|
||||
err = try.GetRequest("http://127.0.0.1:8080/api/providers/consul_catalog/backends", 60*time.Second, try.BodyContains("whoami1", "whoami2"))
|
||||
c.Assert(err, checker.IsNil)
|
||||
|
||||
}
|
||||
@@ -320,13 +320,13 @@ func (s *ConsulCatalogSuite) TestBasicAuthSimpleService(c *check.C) {
|
||||
c.Assert(err, checker.IsNil)
|
||||
defer cmd.Process.Kill()
|
||||
|
||||
nginx := s.composeProject.Container(c, "nginx1")
|
||||
whoami := s.composeProject.Container(c, "whoami1")
|
||||
|
||||
err = s.registerService("test", nginx.NetworkSettings.IPAddress, 80, []string{
|
||||
err = s.registerService("test", whoami.NetworkSettings.IPAddress, 80, []string{
|
||||
"traefik.frontend.auth.basic=test:$2a$06$O5NksJPAcgrC9MuANkSoE.Xe9DSg7KcLLFYNr1Lj6hPcMmvgwxhme,test2:$2y$10$xP1SZ70QbZ4K2bTGKJOhpujkpcLxQcB3kEPF6XAV19IdcqsZTyDEe",
|
||||
})
|
||||
c.Assert(err, checker.IsNil, check.Commentf("Error registering service"))
|
||||
defer s.deregisterService("test", nginx.NetworkSettings.IPAddress)
|
||||
defer s.deregisterService("test", whoami.NetworkSettings.IPAddress)
|
||||
|
||||
req, err := http.NewRequest(http.MethodGet, "http://127.0.0.1:8000/", nil)
|
||||
c.Assert(err, checker.IsNil)
|
||||
@@ -357,16 +357,16 @@ func (s *ConsulCatalogSuite) TestRefreshConfigTagChange(c *check.C) {
|
||||
c.Assert(err, checker.IsNil)
|
||||
defer cmd.Process.Kill()
|
||||
|
||||
nginx := s.composeProject.Container(c, "nginx1")
|
||||
whoami := s.composeProject.Container(c, "whoami1")
|
||||
|
||||
err = s.registerService("test", nginx.NetworkSettings.IPAddress, 80, []string{"name=nginx1", "traefik.enable=false", "traefik.backend.circuitbreaker=NetworkErrorRatio() > 0.5"})
|
||||
err = s.registerService("test", whoami.NetworkSettings.IPAddress, 80, []string{"name=whoami1", "traefik.enable=false", "traefik.backend.circuitbreaker=NetworkErrorRatio() > 0.5"})
|
||||
c.Assert(err, checker.IsNil, check.Commentf("Error registering service"))
|
||||
defer s.deregisterService("test", nginx.NetworkSettings.IPAddress)
|
||||
defer s.deregisterService("test", whoami.NetworkSettings.IPAddress)
|
||||
|
||||
err = try.GetRequest("http://127.0.0.1:8080/api/providers/consul_catalog/backends", 5*time.Second, try.BodyContains("nginx1"))
|
||||
err = try.GetRequest("http://127.0.0.1:8080/api/providers/consul_catalog/backends", 5*time.Second, try.BodyContains("whoami1"))
|
||||
c.Assert(err, checker.NotNil)
|
||||
|
||||
err = s.registerService("test", nginx.NetworkSettings.IPAddress, 80, []string{"name=nginx1", "traefik.enable=true", "traefik.backend.circuitbreaker=ResponseCodeRatio(500, 600, 0, 600) > 0.5"})
|
||||
err = s.registerService("test", whoami.NetworkSettings.IPAddress, 80, []string{"name=whoami1", "traefik.enable=true", "traefik.backend.circuitbreaker=ResponseCodeRatio(500, 600, 0, 600) > 0.5"})
|
||||
c.Assert(err, checker.IsNil, check.Commentf("Error registering service"))
|
||||
|
||||
req, err := http.NewRequest(http.MethodGet, "http://127.0.0.1:8000/", nil)
|
||||
@@ -376,7 +376,7 @@ func (s *ConsulCatalogSuite) TestRefreshConfigTagChange(c *check.C) {
|
||||
err = try.Request(req, 20*time.Second, try.StatusCodeIs(http.StatusOK), try.HasBody())
|
||||
c.Assert(err, checker.IsNil)
|
||||
|
||||
err = try.GetRequest("http://127.0.0.1:8080/api/providers/consul_catalog/backends", 60*time.Second, try.BodyContains("nginx1"))
|
||||
err = try.GetRequest("http://127.0.0.1:8080/api/providers/consul_catalog/backends", 60*time.Second, try.BodyContains("whoami1"))
|
||||
c.Assert(err, checker.IsNil)
|
||||
}
|
||||
|
||||
@@ -397,19 +397,19 @@ func (s *ConsulCatalogSuite) TestCircuitBreaker(c *check.C) {
|
||||
c.Assert(err, checker.IsNil)
|
||||
defer cmd.Process.Kill()
|
||||
|
||||
nginx := s.composeProject.Container(c, "nginx1")
|
||||
nginx2 := s.composeProject.Container(c, "nginx2")
|
||||
nginx3 := s.composeProject.Container(c, "nginx3")
|
||||
whoami := s.composeProject.Container(c, "whoami1")
|
||||
whoami2 := s.composeProject.Container(c, "whoami2")
|
||||
whoami3 := s.composeProject.Container(c, "whoami3")
|
||||
|
||||
err = s.registerService("test", nginx.NetworkSettings.IPAddress, 80, []string{"name=nginx1", "traefik.enable=true", "traefik.backend.circuitbreaker=NetworkErrorRatio() > 0.5"})
|
||||
err = s.registerService("test", whoami.NetworkSettings.IPAddress, 80, []string{"name=whoami1", "traefik.enable=true", "traefik.backend.circuitbreaker=NetworkErrorRatio() > 0.5"})
|
||||
c.Assert(err, checker.IsNil, check.Commentf("Error registering service"))
|
||||
defer s.deregisterService("test", nginx.NetworkSettings.IPAddress)
|
||||
err = s.registerService("test", nginx2.NetworkSettings.IPAddress, 42, []string{"name=nginx2", "traefik.enable=true", "traefik.backend.circuitbreaker=NetworkErrorRatio() > 0.5"})
|
||||
defer s.deregisterService("test", whoami.NetworkSettings.IPAddress)
|
||||
err = s.registerService("test", whoami2.NetworkSettings.IPAddress, 42, []string{"name=whoami2", "traefik.enable=true", "traefik.backend.circuitbreaker=NetworkErrorRatio() > 0.5"})
|
||||
c.Assert(err, checker.IsNil, check.Commentf("Error registering service"))
|
||||
defer s.deregisterService("test", nginx2.NetworkSettings.IPAddress)
|
||||
err = s.registerService("test", nginx3.NetworkSettings.IPAddress, 42, []string{"name=nginx3", "traefik.enable=true", "traefik.backend.circuitbreaker=NetworkErrorRatio() > 0.5"})
|
||||
defer s.deregisterService("test", whoami2.NetworkSettings.IPAddress)
|
||||
err = s.registerService("test", whoami3.NetworkSettings.IPAddress, 42, []string{"name=whoami3", "traefik.enable=true", "traefik.backend.circuitbreaker=NetworkErrorRatio() > 0.5"})
|
||||
c.Assert(err, checker.IsNil, check.Commentf("Error registering service"))
|
||||
defer s.deregisterService("test", nginx3.NetworkSettings.IPAddress)
|
||||
defer s.deregisterService("test", whoami3.NetworkSettings.IPAddress)
|
||||
|
||||
req, err := http.NewRequest(http.MethodGet, "http://127.0.0.1:8000/", nil)
|
||||
c.Assert(err, checker.IsNil)
|
||||
@@ -432,9 +432,9 @@ func (s *ConsulCatalogSuite) TestRefreshConfigPortChange(c *check.C) {
|
||||
c.Assert(err, checker.IsNil)
|
||||
defer cmd.Process.Kill()
|
||||
|
||||
nginx := s.composeProject.Container(c, "nginx1")
|
||||
whoami := s.composeProject.Container(c, "whoami1")
|
||||
|
||||
err = s.registerService("test", nginx.NetworkSettings.IPAddress, 81, []string{"name=nginx1", "traefik.enable=true"})
|
||||
err = s.registerService("test", whoami.NetworkSettings.IPAddress, 81, []string{"name=whoami1", "traefik.enable=true"})
|
||||
c.Assert(err, checker.IsNil, check.Commentf("Error registering service"))
|
||||
|
||||
req, err := http.NewRequest(http.MethodGet, "http://127.0.0.1:8000/", nil)
|
||||
@@ -444,15 +444,15 @@ func (s *ConsulCatalogSuite) TestRefreshConfigPortChange(c *check.C) {
|
||||
err = try.Request(req, 20*time.Second, try.StatusCodeIs(http.StatusBadGateway))
|
||||
c.Assert(err, checker.IsNil)
|
||||
|
||||
err = try.GetRequest("http://127.0.0.1:8080/api/providers/consul_catalog/backends", 5*time.Second, try.BodyContains("nginx1"))
|
||||
err = try.GetRequest("http://127.0.0.1:8080/api/providers/consul_catalog/backends", 5*time.Second, try.BodyContains("whoami1"))
|
||||
c.Assert(err, checker.IsNil)
|
||||
|
||||
err = s.registerService("test", nginx.NetworkSettings.IPAddress, 80, []string{"name=nginx1", "traefik.enable=true"})
|
||||
err = s.registerService("test", whoami.NetworkSettings.IPAddress, 80, []string{"name=whoami1", "traefik.enable=true"})
|
||||
c.Assert(err, checker.IsNil, check.Commentf("Error registering service"))
|
||||
|
||||
defer s.deregisterService("test", nginx.NetworkSettings.IPAddress)
|
||||
defer s.deregisterService("test", whoami.NetworkSettings.IPAddress)
|
||||
|
||||
err = try.GetRequest("http://127.0.0.1:8080/api/providers/consul_catalog/backends", 60*time.Second, try.BodyContains("nginx1"))
|
||||
err = try.GetRequest("http://127.0.0.1:8080/api/providers/consul_catalog/backends", 60*time.Second, try.BodyContains("whoami1"))
|
||||
c.Assert(err, checker.IsNil)
|
||||
|
||||
err = try.Request(req, 20*time.Second, try.StatusCodeIs(http.StatusOK), try.HasBody())
|
||||
@@ -491,9 +491,9 @@ func (s *ConsulCatalogSuite) TestRetryWithConsulServer(c *check.C) {
|
||||
s.composeProject.Scale(c, "consul", 1)
|
||||
s.waitToElectConsulLeader()
|
||||
|
||||
nginx := s.composeProject.Container(c, "nginx1")
|
||||
whoami := s.composeProject.Container(c, "whoami1")
|
||||
// Register service
|
||||
err = s.registerService("test", nginx.NetworkSettings.IPAddress, 80, []string{})
|
||||
err = s.registerService("test", whoami.NetworkSettings.IPAddress, 80, []string{})
|
||||
c.Assert(err, checker.IsNil, check.Commentf("Error registering service"))
|
||||
|
||||
// Provider consul catalog should be present
|
||||
|
||||
@@ -14,7 +14,7 @@ import (
|
||||
"github.com/docker/docker/pkg/namesgenerator"
|
||||
"github.com/go-check/check"
|
||||
d "github.com/libkermit/docker"
|
||||
docker "github.com/libkermit/docker-check"
|
||||
"github.com/libkermit/docker-check"
|
||||
checker "github.com/vdemeester/shakers"
|
||||
)
|
||||
|
||||
@@ -25,8 +25,8 @@ var (
|
||||
// Images to have or pull before the build in order to make it work
|
||||
// FIXME handle this offline but loading them before build
|
||||
RequiredImages = map[string]string{
|
||||
"swarm": "1.0.0",
|
||||
"nginx": "1",
|
||||
"swarm": "1.0.0",
|
||||
"emilevauge/whoami": "latest",
|
||||
}
|
||||
)
|
||||
|
||||
|
||||
34
integration/fixtures/acme/wrong_acme.toml
Normal file
34
integration/fixtures/acme/wrong_acme.toml
Normal file
@@ -0,0 +1,34 @@
|
||||
logLevel = "DEBUG"
|
||||
|
||||
defaultEntryPoints = ["http", "https"]
|
||||
|
||||
[api]
|
||||
|
||||
[entryPoints]
|
||||
[entryPoints.http]
|
||||
address = ":8081"
|
||||
[entryPoints.https]
|
||||
address = ":5001"
|
||||
[entryPoints.https.tls]
|
||||
|
||||
|
||||
[acme]
|
||||
email = "test@traefik.io"
|
||||
storage = "/dev/null"
|
||||
entryPoint = "https"
|
||||
OnHostRule = true
|
||||
caServer = "http://wrongurl:4000/directory"
|
||||
|
||||
[file]
|
||||
|
||||
[backends]
|
||||
[backends.backend]
|
||||
[backends.backend.servers.server1]
|
||||
url = "http://127.0.0.1:9010"
|
||||
|
||||
|
||||
[frontends]
|
||||
[frontends.frontend]
|
||||
backend = "backend"
|
||||
[frontends.frontend.routes.test]
|
||||
rule = "Host:traefik.acme.wtf"
|
||||
25
integration/fixtures/multiple_provider.toml
Normal file
25
integration/fixtures/multiple_provider.toml
Normal file
@@ -0,0 +1,25 @@
|
||||
defaultEntryPoints = ["http"]
|
||||
|
||||
debug=true
|
||||
|
||||
[entryPoints]
|
||||
[entryPoints.http]
|
||||
address = ":8000"
|
||||
|
||||
[api]
|
||||
|
||||
[docker]
|
||||
endpoint = "unix:///var/run/docker.sock"
|
||||
watch = true
|
||||
exposedbydefault = false
|
||||
|
||||
[file]
|
||||
[frontends]
|
||||
[frontends.frontend-1]
|
||||
backend = "backend-test"
|
||||
[frontends.frontend-1.routes.test_1]
|
||||
rule = "PathPrefix:/file"
|
||||
[backends]
|
||||
[backends.backend-test]
|
||||
[backends.backend-test.servers.website]
|
||||
url = "http://{{ .IP }}"
|
||||
@@ -19,7 +19,7 @@ func (s *RateLimitSuite) SetUpSuite(c *check.C) {
|
||||
s.createComposeProject(c, "ratelimit")
|
||||
s.composeProject.Start(c)
|
||||
|
||||
s.ServerIP = s.composeProject.Container(c, "nginx1").NetworkSettings.IPAddress
|
||||
s.ServerIP = s.composeProject.Container(c, "whoami1").NetworkSettings.IPAddress
|
||||
}
|
||||
|
||||
func (s *RateLimitSuite) TestSimpleConfiguration(c *check.C) {
|
||||
|
||||
@@ -3,3 +3,9 @@ whoami1:
|
||||
labels:
|
||||
- traefik.enable=true
|
||||
- traefik.frontend.rule=PathPrefix:/whoami
|
||||
- traefik.backend="test"
|
||||
|
||||
whoami2:
|
||||
image: emilevauge/whoami
|
||||
labels:
|
||||
- traefik.enable=false
|
||||
@@ -11,7 +11,7 @@ consul:
|
||||
- "8301/udp"
|
||||
- "8302"
|
||||
- "8302/udp"
|
||||
nginx:
|
||||
image: nginx:alpine
|
||||
whoami:
|
||||
image: emilevauge/whoami
|
||||
ports:
|
||||
- "8881:80"
|
||||
|
||||
@@ -11,9 +11,9 @@ consul:
|
||||
- "8301/udp"
|
||||
- "8302"
|
||||
- "8302/udp"
|
||||
nginx1:
|
||||
image: nginx:alpine
|
||||
nginx2:
|
||||
image: nginx:alpine
|
||||
nginx3:
|
||||
image: nginx:alpine
|
||||
whoami1:
|
||||
image: emilevauge/whoami
|
||||
whoami2:
|
||||
image: emilevauge/whoami
|
||||
whoami3:
|
||||
image: emilevauge/whoami
|
||||
|
||||
@@ -1,4 +1,4 @@
|
||||
nginx1:
|
||||
image: nginx:alpine
|
||||
image: nginx:1.13.8-alpine
|
||||
nginx2:
|
||||
image: nginx:alpine
|
||||
image: nginx:1.13.8-alpine
|
||||
|
||||
@@ -1,20 +1,20 @@
|
||||
nginx1:
|
||||
image: nginx:alpine
|
||||
whoami1:
|
||||
image: emilevauge/whoami
|
||||
ports:
|
||||
- "8881:80"
|
||||
nginx2:
|
||||
image: nginx:alpine
|
||||
whoami2:
|
||||
image: emilevauge/whoami
|
||||
ports:
|
||||
- "8882:80"
|
||||
nginx3:
|
||||
image: nginx:alpine
|
||||
whoami3:
|
||||
image: emilevauge/whoami
|
||||
ports:
|
||||
- "8883:80"
|
||||
nginx4:
|
||||
image: nginx:alpine
|
||||
whoami4:
|
||||
image: emilevauge/whoami
|
||||
ports:
|
||||
- "8884:80"
|
||||
nginx5:
|
||||
image: nginx:alpine
|
||||
whoami5:
|
||||
image: emilevauge/whoami
|
||||
ports:
|
||||
- "8885:80"
|
||||
|
||||
@@ -1,2 +1,2 @@
|
||||
nginx1:
|
||||
image: nginx:alpine
|
||||
whoami1:
|
||||
image: emilevauge/whoami
|
||||
|
||||
@@ -95,6 +95,25 @@ func taskSlot(slot int) func(*swarm.Task) {
|
||||
}
|
||||
}
|
||||
|
||||
func taskNetworkAttachment(id string, name string, driver string, addresses []string) func(*swarm.Task) {
|
||||
return func(task *swarm.Task) {
|
||||
task.NetworksAttachments = append(task.NetworksAttachments, swarm.NetworkAttachment{
|
||||
Network: swarm.Network{
|
||||
ID: id,
|
||||
Spec: swarm.NetworkSpec{
|
||||
Annotations: swarm.Annotations{
|
||||
Name: name,
|
||||
},
|
||||
DriverConfiguration: &swarm.Driver{
|
||||
Name: driver,
|
||||
},
|
||||
},
|
||||
},
|
||||
Addresses: addresses,
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func taskStatus(ops ...func(*swarm.TaskStatus)) func(*swarm.Task) {
|
||||
return func(task *swarm.Task) {
|
||||
status := &swarm.TaskStatus{}
|
||||
@@ -113,6 +132,14 @@ func taskState(state swarm.TaskState) func(*swarm.TaskStatus) {
|
||||
}
|
||||
}
|
||||
|
||||
func taskContainerStatus(id string) func(*swarm.TaskStatus) {
|
||||
return func(status *swarm.TaskStatus) {
|
||||
status.ContainerStatus = swarm.ContainerStatus{
|
||||
ContainerID: id,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func swarmService(ops ...func(*swarm.Service)) swarm.Service {
|
||||
service := &swarm.Service{
|
||||
ID: "serviceID",
|
||||
|
||||
@@ -777,19 +777,21 @@ func listServices(ctx context.Context, dockerClient client.APIClient) ([]dockerD
|
||||
|
||||
for _, service := range serviceList {
|
||||
dockerData := parseService(service, networkMap)
|
||||
if len(dockerData.NetworkSettings.Networks) > 0 {
|
||||
useSwarmLB, _ := strconv.ParseBool(getIsBackendLBSwarm(dockerData))
|
||||
|
||||
if useSwarmLB {
|
||||
useSwarmLB, _ := strconv.ParseBool(getIsBackendLBSwarm(dockerData))
|
||||
|
||||
if useSwarmLB {
|
||||
if len(dockerData.NetworkSettings.Networks) > 0 {
|
||||
dockerDataList = append(dockerDataList, dockerData)
|
||||
} else {
|
||||
isGlobalSvc := service.Spec.Mode.Global != nil
|
||||
dockerDataListTasks, err = listTasks(ctx, dockerClient, service.ID, dockerData, networkMap, isGlobalSvc)
|
||||
|
||||
for _, dockerDataTask := range dockerDataListTasks {
|
||||
dockerDataList = append(dockerDataList, dockerDataTask)
|
||||
}
|
||||
}
|
||||
} else {
|
||||
isGlobalSvc := service.Spec.Mode.Global != nil
|
||||
dockerDataListTasks, err = listTasks(ctx, dockerClient, service.ID, dockerData, networkMap, isGlobalSvc)
|
||||
|
||||
for _, dockerDataTask := range dockerDataListTasks {
|
||||
dockerDataList = append(dockerDataList, dockerDataTask)
|
||||
}
|
||||
|
||||
}
|
||||
}
|
||||
return dockerDataList, err
|
||||
@@ -805,7 +807,10 @@ func parseService(service swarmtypes.Service, networkMap map[string]*dockertypes
|
||||
|
||||
if service.Spec.EndpointSpec != nil {
|
||||
if service.Spec.EndpointSpec.Mode == swarmtypes.ResolutionModeDNSRR {
|
||||
log.Warnf("Ignored endpoint-mode not supported, service name: %s", service.Spec.Annotations.Name)
|
||||
useSwarmLB, _ := strconv.ParseBool(getIsBackendLBSwarm(dockerData))
|
||||
if useSwarmLB {
|
||||
log.Warnf("Ignored %s endpoint-mode not supported, service name: %s. Fallback to Træfik load balancing", swarmtypes.ResolutionModeDNSRR, service.Spec.Annotations.Name)
|
||||
}
|
||||
} else if service.Spec.EndpointSpec.Mode == swarmtypes.ResolutionModeVIP {
|
||||
dockerData.NetworkSettings.Networks = make(map[string]*networkData)
|
||||
for _, virtualIP := range service.Endpoint.VirtualIPs {
|
||||
@@ -844,7 +849,9 @@ func listTasks(ctx context.Context, dockerClient client.APIClient, serviceID str
|
||||
continue
|
||||
}
|
||||
dockerData := parseTasks(task, serviceDockerData, networkMap, isGlobalSvc)
|
||||
dockerDataList = append(dockerDataList, dockerData)
|
||||
if len(dockerData.NetworkSettings.Networks) > 0 {
|
||||
dockerDataList = append(dockerDataList, dockerData)
|
||||
}
|
||||
}
|
||||
return dockerDataList, err
|
||||
}
|
||||
|
||||
@@ -709,14 +709,19 @@ func TestSwarmTaskParsing(t *testing.T) {
|
||||
|
||||
type fakeTasksClient struct {
|
||||
dockerclient.APIClient
|
||||
tasks []swarm.Task
|
||||
err error
|
||||
tasks []swarm.Task
|
||||
container dockertypes.ContainerJSON
|
||||
err error
|
||||
}
|
||||
|
||||
func (c *fakeTasksClient) TaskList(ctx context.Context, options dockertypes.TaskListOptions) ([]swarm.Task, error) {
|
||||
return c.tasks, c.err
|
||||
}
|
||||
|
||||
func (c *fakeTasksClient) ContainerInspect(ctx context.Context, container string) (dockertypes.ContainerJSON, error) {
|
||||
return c.container, c.err
|
||||
}
|
||||
|
||||
func TestListTasks(t *testing.T) {
|
||||
testCases := []struct {
|
||||
service swarm.Service
|
||||
@@ -728,11 +733,30 @@ func TestListTasks(t *testing.T) {
|
||||
{
|
||||
service: swarmService(serviceName("container")),
|
||||
tasks: []swarm.Task{
|
||||
swarmTask("id1", taskSlot(1), taskStatus(taskState(swarm.TaskStateRunning))),
|
||||
swarmTask("id2", taskSlot(2), taskStatus(taskState(swarm.TaskStatePending))),
|
||||
swarmTask("id3", taskSlot(3)),
|
||||
swarmTask("id4", taskSlot(4), taskStatus(taskState(swarm.TaskStateRunning))),
|
||||
swarmTask("id5", taskSlot(5), taskStatus(taskState(swarm.TaskStateFailed))),
|
||||
swarmTask("id1",
|
||||
taskSlot(1),
|
||||
taskNetworkAttachment("1", "network1", "overlay", []string{"127.0.0.1"}),
|
||||
taskStatus(taskState(swarm.TaskStateRunning)),
|
||||
),
|
||||
swarmTask("id2",
|
||||
taskSlot(2),
|
||||
taskNetworkAttachment("1", "network1", "overlay", []string{"127.0.0.2"}),
|
||||
taskStatus(taskState(swarm.TaskStatePending)),
|
||||
),
|
||||
swarmTask("id3",
|
||||
taskSlot(3),
|
||||
taskNetworkAttachment("1", "network1", "overlay", []string{"127.0.0.3"}),
|
||||
),
|
||||
swarmTask("id4",
|
||||
taskSlot(4),
|
||||
taskNetworkAttachment("1", "network1", "overlay", []string{"127.0.0.4"}),
|
||||
taskStatus(taskState(swarm.TaskStateRunning)),
|
||||
),
|
||||
swarmTask("id5",
|
||||
taskSlot(5),
|
||||
taskNetworkAttachment("1", "network1", "overlay", []string{"127.0.0.5"}),
|
||||
taskStatus(taskState(swarm.TaskStateFailed)),
|
||||
),
|
||||
},
|
||||
isGlobalSVC: false,
|
||||
expectedTasks: []string{
|
||||
@@ -753,7 +777,7 @@ func TestListTasks(t *testing.T) {
|
||||
t.Parallel()
|
||||
dockerData := parseService(test.service, test.networks)
|
||||
dockerClient := &fakeTasksClient{tasks: test.tasks}
|
||||
taskDockerData, _ := listTasks(context.Background(), dockerClient, test.service.ID, dockerData, map[string]*docker.NetworkResource{}, test.isGlobalSVC)
|
||||
taskDockerData, _ := listTasks(context.Background(), dockerClient, test.service.ID, dockerData, test.networks, test.isGlobalSVC)
|
||||
|
||||
if len(test.expectedTasks) != len(taskDockerData) {
|
||||
t.Errorf("expected tasks %v, got %v", spew.Sdump(test.expectedTasks), spew.Sdump(taskDockerData))
|
||||
@@ -773,6 +797,7 @@ type fakeServicesClient struct {
|
||||
dockerVersion string
|
||||
networks []dockertypes.NetworkResource
|
||||
services []swarm.Service
|
||||
tasks []swarm.Task
|
||||
err error
|
||||
}
|
||||
|
||||
@@ -788,10 +813,15 @@ func (c *fakeServicesClient) NetworkList(ctx context.Context, options dockertype
|
||||
return c.networks, c.err
|
||||
}
|
||||
|
||||
func (c *fakeServicesClient) TaskList(ctx context.Context, options dockertypes.TaskListOptions) ([]swarm.Task, error) {
|
||||
return c.tasks, c.err
|
||||
}
|
||||
|
||||
func TestListServices(t *testing.T) {
|
||||
testCases := []struct {
|
||||
desc string
|
||||
services []swarm.Service
|
||||
tasks []swarm.Task
|
||||
dockerVersion string
|
||||
networks []dockertypes.NetworkResource
|
||||
expectedServices []string
|
||||
@@ -813,7 +843,8 @@ func TestListServices(t *testing.T) {
|
||||
swarmService(
|
||||
serviceName("service2"),
|
||||
serviceLabels(map[string]string{
|
||||
labelDockerNetwork: "barnet",
|
||||
labelDockerNetwork: "barnet",
|
||||
labelBackendLoadBalancerSwarm: "true",
|
||||
}),
|
||||
withEndpointSpec(modeDNSSR)),
|
||||
},
|
||||
@@ -838,7 +869,8 @@ func TestListServices(t *testing.T) {
|
||||
swarmService(
|
||||
serviceName("service2"),
|
||||
serviceLabels(map[string]string{
|
||||
labelDockerNetwork: "barnet",
|
||||
labelDockerNetwork: "barnet",
|
||||
labelBackendLoadBalancerSwarm: "true",
|
||||
}),
|
||||
withEndpointSpec(modeDNSSR)),
|
||||
},
|
||||
@@ -867,14 +899,74 @@ func TestListServices(t *testing.T) {
|
||||
"service1",
|
||||
},
|
||||
},
|
||||
{
|
||||
desc: "Should return service1 and service2",
|
||||
services: []swarm.Service{
|
||||
swarmService(
|
||||
serviceName("service1"),
|
||||
serviceLabels(map[string]string{
|
||||
labelDockerNetwork: "barnet",
|
||||
}),
|
||||
withEndpointSpec(modeVIP),
|
||||
withEndpoint(
|
||||
virtualIP("yk6l57rfwizjzxxzftn4amaot", "10.11.12.13/24"),
|
||||
virtualIP("2", "10.11.12.99/24"),
|
||||
)),
|
||||
swarmService(
|
||||
serviceName("service2"),
|
||||
serviceLabels(map[string]string{
|
||||
labelDockerNetwork: "barnet",
|
||||
}),
|
||||
withEndpointSpec(modeDNSSR)),
|
||||
},
|
||||
tasks: []swarm.Task{
|
||||
swarmTask("id1",
|
||||
taskNetworkAttachment("yk6l57rfwizjzxxzftn4amaot", "network_name", "overlay", []string{"127.0.0.1"}),
|
||||
taskStatus(taskState(swarm.TaskStateRunning)),
|
||||
),
|
||||
swarmTask("id2",
|
||||
taskNetworkAttachment("yk6l57rfwizjzxxzftn4amaot", "network_name", "overlay", []string{"127.0.0.1"}),
|
||||
taskStatus(taskState(swarm.TaskStateRunning)),
|
||||
),
|
||||
},
|
||||
dockerVersion: "1.30",
|
||||
networks: []dockertypes.NetworkResource{
|
||||
{
|
||||
Name: "network_name",
|
||||
ID: "yk6l57rfwizjzxxzftn4amaot",
|
||||
Created: time.Now(),
|
||||
Scope: "swarm",
|
||||
Driver: "overlay",
|
||||
EnableIPv6: false,
|
||||
Internal: true,
|
||||
Ingress: false,
|
||||
ConfigOnly: false,
|
||||
Options: map[string]string{
|
||||
"com.docker.network.driver.overlay.vxlanid_list": "4098",
|
||||
"com.docker.network.enable_ipv6": "false",
|
||||
},
|
||||
Labels: map[string]string{
|
||||
"com.docker.stack.namespace": "test",
|
||||
},
|
||||
},
|
||||
},
|
||||
expectedServices: []string{
|
||||
"service1.0",
|
||||
"service1.0",
|
||||
"service2.0",
|
||||
"service2.0",
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
for caseID, test := range testCases {
|
||||
test := test
|
||||
t.Run(strconv.Itoa(caseID), func(t *testing.T) {
|
||||
t.Parallel()
|
||||
dockerClient := &fakeServicesClient{services: test.services, dockerVersion: test.dockerVersion, networks: test.networks}
|
||||
serviceDockerData, _ := listServices(context.Background(), dockerClient)
|
||||
dockerClient := &fakeServicesClient{services: test.services, tasks: test.tasks, dockerVersion: test.dockerVersion, networks: test.networks}
|
||||
|
||||
serviceDockerData, err := listServices(context.Background(), dockerClient)
|
||||
assert.NoError(t, err)
|
||||
|
||||
assert.Equal(t, len(test.expectedServices), len(serviceDockerData))
|
||||
for i, serviceName := range test.expectedServices {
|
||||
|
||||
@@ -55,12 +55,12 @@ func (p *Provider) Provide(configurationChan chan<- types.ConfigMessage, pool *s
|
||||
safe.Go(func() {
|
||||
for t := range ticker.C {
|
||||
|
||||
log.Debug("Refreshing Provider " + t.String())
|
||||
log.Debugf("Refreshing Provider %s", t.String())
|
||||
|
||||
configuration, err := p.buildConfiguration()
|
||||
if err != nil {
|
||||
log.Errorf("Failed to refresh Provider configuration, error: %s", err)
|
||||
return
|
||||
continue
|
||||
}
|
||||
|
||||
configurationChan <- types.ConfigMessage{
|
||||
|
||||
@@ -28,14 +28,6 @@ echo $VERSION | git commit --file -
|
||||
echo $VERSION | git tag -a $VERSION --file -
|
||||
git push -q --follow-tags -u origin master > /dev/null 2>&1
|
||||
|
||||
# create docker image emilevauge/traefik (compatibility)
|
||||
echo "Updating docker emilevauge/traefik image..."
|
||||
docker login -u $DOCKER_USER -p $DOCKER_PASS
|
||||
docker tag containous/traefik emilevauge/traefik:latest
|
||||
docker push emilevauge/traefik:latest
|
||||
docker tag emilevauge/traefik:latest emilevauge/traefik:${VERSION}
|
||||
docker push emilevauge/traefik:${VERSION}
|
||||
|
||||
cd ..
|
||||
rm -Rf traefik-library-image/
|
||||
|
||||
|
||||
@@ -6,8 +6,6 @@ set -o nounset
|
||||
|
||||
echo "Prune dependencies"
|
||||
|
||||
dep prune
|
||||
|
||||
find vendor -name '*_test.go' -exec rm {} \;
|
||||
|
||||
find vendor -type f \( ! -iname 'licen[cs]e*' \
|
||||
@@ -32,3 +30,28 @@ find vendor -type f \( ! -iname 'licen[cs]e*' \
|
||||
-a ! -iname '*.hpp' \
|
||||
-a ! -iname '*.hxx' \
|
||||
-a ! -iname '*.s' \) -exec rm -f {} +
|
||||
|
||||
find -type d \( -iname '*Godeps*' \) -exec rm -rf {} +
|
||||
|
||||
find vendor -type l \( ! -iname 'licen[cs]e*' \
|
||||
-a ! -iname '*notice*' \
|
||||
-a ! -iname '*patent*' \
|
||||
-a ! -iname '*copying*' \
|
||||
-a ! -iname '*unlicense*' \
|
||||
-a ! -iname '*copyright*' \
|
||||
-a ! -iname '*copyleft*' \
|
||||
-a ! -iname '*legal*' \
|
||||
-a ! -iname 'disclaimer*' \
|
||||
-a ! -iname 'third-party*' \
|
||||
-a ! -iname 'thirdparty*' \
|
||||
-a ! -iname '*.go' \
|
||||
-a ! -iname '*.c' \
|
||||
-a ! -iname '*.S' \
|
||||
-a ! -iname '*.cc' \
|
||||
-a ! -iname '*.cpp' \
|
||||
-a ! -iname '*.cxx' \
|
||||
-a ! -iname '*.h' \
|
||||
-a ! -iname '*.hh' \
|
||||
-a ! -iname '*.hpp' \
|
||||
-a ! -iname '*.hxx' \
|
||||
-a ! -iname '*.s' \) -exec rm -f {} +
|
||||
@@ -484,6 +484,7 @@ func (s *serverEntryPoint) getCertificate(clientHello *tls.ClientHelloInfo) (*tl
|
||||
}
|
||||
}
|
||||
}
|
||||
log.Debugf("No certificate provided dynamically can check the domain %q, a per default certificate will be used.", domainToCheck)
|
||||
}
|
||||
return nil, nil
|
||||
}
|
||||
@@ -913,7 +914,7 @@ func (s *Server) loadConfig(configurations types.Configurations, globalConfigura
|
||||
backendsHealthCheck := map[string]*healthcheck.BackendHealthCheck{}
|
||||
errorHandler := NewRecordingErrorHandler(middlewares.DefaultNetErrorRecorder{})
|
||||
|
||||
for _, config := range configurations {
|
||||
for providerName, config := range configurations {
|
||||
frontendNames := sortedFrontendNamesForConfig(config)
|
||||
frontend:
|
||||
for _, frontendName := range frontendNames {
|
||||
@@ -952,7 +953,7 @@ func (s *Server) loadConfig(configurations types.Configurations, globalConfigura
|
||||
|
||||
entryPoint := globalConfiguration.EntryPoints[entryPointName]
|
||||
n := negroni.New()
|
||||
if entryPoint.Redirect != nil {
|
||||
if entryPoint.Redirect != nil && entryPointName != entryPoint.Redirect.EntryPoint {
|
||||
if redirectHandlers[entryPointName] != nil {
|
||||
n.Use(redirectHandlers[entryPointName])
|
||||
} else if handler, err := s.buildRedirectHandler(entryPointName, entryPoint.Redirect); err != nil {
|
||||
@@ -970,7 +971,7 @@ func (s *Server) loadConfig(configurations types.Configurations, globalConfigura
|
||||
}
|
||||
}
|
||||
}
|
||||
if backends[entryPointName+frontend.Backend] == nil {
|
||||
if backends[entryPointName+providerName+frontend.Backend] == nil {
|
||||
log.Debugf("Creating backend %s", frontend.Backend)
|
||||
|
||||
roundTripper, err := s.getRoundTripper(entryPointName, globalConfiguration, frontend.PassTLSCert, entryPoint.TLS)
|
||||
@@ -1141,7 +1142,7 @@ func (s *Server) loadConfig(configurations types.Configurations, globalConfigura
|
||||
log.Infof("Configured IP Whitelists: %s", frontend.WhitelistSourceRange)
|
||||
}
|
||||
|
||||
if frontend.Redirect != nil {
|
||||
if frontend.Redirect != nil && entryPointName != frontend.Redirect.EntryPoint {
|
||||
rewrite, err := s.buildRedirectHandler(entryPointName, frontend.Redirect)
|
||||
if err != nil {
|
||||
log.Errorf("Error creating Frontend Redirect: %v", err)
|
||||
@@ -1191,14 +1192,14 @@ func (s *Server) loadConfig(configurations types.Configurations, globalConfigura
|
||||
} else {
|
||||
n.UseHandler(lb)
|
||||
}
|
||||
backends[entryPointName+frontend.Backend] = n
|
||||
backends[entryPointName+providerName+frontend.Backend] = n
|
||||
} else {
|
||||
log.Debugf("Reusing backend %s", frontend.Backend)
|
||||
}
|
||||
if frontend.Priority > 0 {
|
||||
newServerRoute.route.Priority(frontend.Priority)
|
||||
}
|
||||
s.wireFrontendBackend(newServerRoute, backends[entryPointName+frontend.Backend])
|
||||
s.wireFrontendBackend(newServerRoute, backends[entryPointName+providerName+frontend.Backend])
|
||||
|
||||
err := newServerRoute.route.GetError()
|
||||
if err != nil {
|
||||
@@ -1347,7 +1348,7 @@ func (s *Server) buildRedirect(entryPointName string) (string, string, error) {
|
||||
protocol = "https"
|
||||
}
|
||||
|
||||
replacement := protocol + "://$1" + match[0] + "$2"
|
||||
replacement := protocol + "://${1}" + match[0] + "${2}"
|
||||
return defaultRedirectRegex, replacement, nil
|
||||
}
|
||||
|
||||
|
||||
@@ -1071,7 +1071,7 @@ func TestServerBuildRedirect(t *testing.T) {
|
||||
"https": &configuration.EntryPoint{Address: ":443", TLS: &tls.TLS{}},
|
||||
},
|
||||
},
|
||||
expectedReplacement: "https://$1:443$2",
|
||||
expectedReplacement: "https://${1}:443${2}",
|
||||
},
|
||||
{
|
||||
desc: "Redirect endpoint http to http02 with HTTP protocol",
|
||||
@@ -1082,7 +1082,7 @@ func TestServerBuildRedirect(t *testing.T) {
|
||||
"http02": &configuration.EntryPoint{Address: ":88"},
|
||||
},
|
||||
},
|
||||
expectedReplacement: "http://$1:88$2",
|
||||
expectedReplacement: "http://${1}:88${2}",
|
||||
},
|
||||
{
|
||||
desc: "Redirect endpoint to non-existent entry point",
|
||||
|
||||
@@ -95,7 +95,8 @@ func (c *Certificates) CreateTLSConfig(entryPointName string) (*tls.Config, map[
|
||||
for _, certificate := range *c {
|
||||
err := certificate.AppendCertificates(domainsCertificates, entryPointName)
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
log.Errorf("Unable to add a certificate to the entryPoint %q : %v", entryPointName, err)
|
||||
continue
|
||||
}
|
||||
for _, certDom := range domainsCertificates {
|
||||
for _, cert := range certDom.Get().(map[string]*tls.Certificate) {
|
||||
@@ -127,16 +128,16 @@ func (c *Certificate) AppendCertificates(certs map[string]*DomainsCertificates,
|
||||
|
||||
certContent, err := c.CertFile.Read()
|
||||
if err != nil {
|
||||
return err
|
||||
return fmt.Errorf("unable to read CertFile : %v", err)
|
||||
}
|
||||
|
||||
keyContent, err := c.KeyFile.Read()
|
||||
if err != nil {
|
||||
return err
|
||||
return fmt.Errorf("uUnable to read KeyFile : %v", err)
|
||||
}
|
||||
tlsCert, err := tls.X509KeyPair(certContent, keyContent)
|
||||
if err != nil {
|
||||
return err
|
||||
return fmt.Errorf("unable to generate TLS certificate : %v", err)
|
||||
}
|
||||
|
||||
parsedCert, _ := x509.ParseCertificate(tlsCert.Certificate[0])
|
||||
|
||||
20
vendor/cloud.google.com/go/cloud.go
generated
vendored
20
vendor/cloud.google.com/go/cloud.go
generated
vendored
@@ -1,20 +0,0 @@
|
||||
// Copyright 2014 Google Inc. All Rights Reserved.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
// Package cloud is the root of the packages used to access Google Cloud
|
||||
// Services. See https://godoc.org/cloud.google.com/go for a full list
|
||||
// of sub-packages.
|
||||
//
|
||||
// This package documents how to authorize and authenticate the sub packages.
|
||||
package cloud // import "cloud.google.com/go"
|
||||
14
vendor/github.com/BurntSushi/toml/cmd/toml-test-decoder/COPYING
generated
vendored
Normal file
14
vendor/github.com/BurntSushi/toml/cmd/toml-test-decoder/COPYING
generated
vendored
Normal file
@@ -0,0 +1,14 @@
|
||||
DO WHAT THE FUCK YOU WANT TO PUBLIC LICENSE
|
||||
Version 2, December 2004
|
||||
|
||||
Copyright (C) 2004 Sam Hocevar <sam@hocevar.net>
|
||||
|
||||
Everyone is permitted to copy and distribute verbatim or modified
|
||||
copies of this license document, and changing it is allowed as long
|
||||
as the name is changed.
|
||||
|
||||
DO WHAT THE FUCK YOU WANT TO PUBLIC LICENSE
|
||||
TERMS AND CONDITIONS FOR COPYING, DISTRIBUTION AND MODIFICATION
|
||||
|
||||
0. You just DO WHAT THE FUCK YOU WANT TO.
|
||||
|
||||
14
vendor/github.com/BurntSushi/toml/cmd/toml-test-encoder/COPYING
generated
vendored
Normal file
14
vendor/github.com/BurntSushi/toml/cmd/toml-test-encoder/COPYING
generated
vendored
Normal file
@@ -0,0 +1,14 @@
|
||||
DO WHAT THE FUCK YOU WANT TO PUBLIC LICENSE
|
||||
Version 2, December 2004
|
||||
|
||||
Copyright (C) 2004 Sam Hocevar <sam@hocevar.net>
|
||||
|
||||
Everyone is permitted to copy and distribute verbatim or modified
|
||||
copies of this license document, and changing it is allowed as long
|
||||
as the name is changed.
|
||||
|
||||
DO WHAT THE FUCK YOU WANT TO PUBLIC LICENSE
|
||||
TERMS AND CONDITIONS FOR COPYING, DISTRIBUTION AND MODIFICATION
|
||||
|
||||
0. You just DO WHAT THE FUCK YOU WANT TO.
|
||||
|
||||
14
vendor/github.com/BurntSushi/toml/cmd/tomlv/COPYING
generated
vendored
Normal file
14
vendor/github.com/BurntSushi/toml/cmd/tomlv/COPYING
generated
vendored
Normal file
@@ -0,0 +1,14 @@
|
||||
DO WHAT THE FUCK YOU WANT TO PUBLIC LICENSE
|
||||
Version 2, December 2004
|
||||
|
||||
Copyright (C) 2004 Sam Hocevar <sam@hocevar.net>
|
||||
|
||||
Everyone is permitted to copy and distribute verbatim or modified
|
||||
copies of this license document, and changing it is allowed as long
|
||||
as the name is changed.
|
||||
|
||||
DO WHAT THE FUCK YOU WANT TO PUBLIC LICENSE
|
||||
TERMS AND CONDITIONS FOR COPYING, DISTRIBUTION AND MODIFICATION
|
||||
|
||||
0. You just DO WHAT THE FUCK YOU WANT TO.
|
||||
|
||||
13
vendor/github.com/JamesClonk/vultr/vultr.go
generated
vendored
13
vendor/github.com/JamesClonk/vultr/vultr.go
generated
vendored
@@ -1,13 +0,0 @@
|
||||
package main
|
||||
|
||||
import (
|
||||
"os"
|
||||
|
||||
"github.com/JamesClonk/vultr/cmd"
|
||||
)
|
||||
|
||||
func main() {
|
||||
cli := cmd.NewCLI()
|
||||
cli.RegisterCommands()
|
||||
cli.Run(os.Args)
|
||||
}
|
||||
27
vendor/github.com/Microsoft/go-winio/archive/tar/LICENSE
generated
vendored
Normal file
27
vendor/github.com/Microsoft/go-winio/archive/tar/LICENSE
generated
vendored
Normal file
@@ -0,0 +1,27 @@
|
||||
Copyright (c) 2012 The Go Authors. All rights reserved.
|
||||
|
||||
Redistribution and use in source and binary forms, with or without
|
||||
modification, are permitted provided that the following conditions are
|
||||
met:
|
||||
|
||||
* Redistributions of source code must retain the above copyright
|
||||
notice, this list of conditions and the following disclaimer.
|
||||
* Redistributions in binary form must reproduce the above
|
||||
copyright notice, this list of conditions and the following disclaimer
|
||||
in the documentation and/or other materials provided with the
|
||||
distribution.
|
||||
* Neither the name of Google Inc. nor the names of its
|
||||
contributors may be used to endorse or promote products derived from
|
||||
this software without specific prior written permission.
|
||||
|
||||
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
|
||||
"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
|
||||
LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
|
||||
A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
|
||||
OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
|
||||
SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
|
||||
LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
|
||||
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
|
||||
THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
||||
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
||||
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||
7
vendor/github.com/aws/aws-sdk-go/sdk.go
generated
vendored
7
vendor/github.com/aws/aws-sdk-go/sdk.go
generated
vendored
@@ -1,7 +0,0 @@
|
||||
// Package sdk is the official AWS SDK for the Go programming language.
|
||||
//
|
||||
// See our Developer Guide for information for on getting started and using
|
||||
// the SDK.
|
||||
//
|
||||
// https://github.com/aws/aws-sdk-go/wiki
|
||||
package sdk
|
||||
5
vendor/github.com/aws/aws-sdk-go/service/generate.go
generated
vendored
5
vendor/github.com/aws/aws-sdk-go/service/generate.go
generated
vendored
@@ -1,5 +0,0 @@
|
||||
// Package service contains automatically generated AWS clients.
|
||||
package service
|
||||
|
||||
//go:generate go run -tags codegen ../private/model/cli/gen-api/main.go -path=../service ../models/apis/*/*/api-2.json
|
||||
//go:generate gofmt -s -w ../service
|
||||
400
vendor/github.com/containous/flaeg/flaeg.go
generated
vendored
400
vendor/github.com/containous/flaeg/flaeg.go
generated
vendored
@@ -12,64 +12,63 @@ import (
|
||||
"strings"
|
||||
"text/tabwriter"
|
||||
"text/template"
|
||||
"time"
|
||||
|
||||
"github.com/containous/flaeg/parse"
|
||||
flag "github.com/ogier/pflag"
|
||||
)
|
||||
|
||||
// ErrParserNotFound is thrown when a field is flaged but not parser match its type
|
||||
var ErrParserNotFound = errors.New("Parser not found or custom parser missing")
|
||||
var ErrParserNotFound = errors.New("parser not found or custom parser missing")
|
||||
|
||||
// GetTypesRecursive links in flagmap a flag with its reflect.StructField
|
||||
// GetTypesRecursive links in flagMap a flag with its reflect.StructField
|
||||
// You can whether provide objValue on a structure or a pointer to structure as first argument
|
||||
// Flags are genereted from field name or from StructTag
|
||||
func getTypesRecursive(objValue reflect.Value, flagmap map[string]reflect.StructField, key string) error {
|
||||
// Flags are generated from field name or from StructTag
|
||||
func getTypesRecursive(objValue reflect.Value, flagMap map[string]reflect.StructField, key string) error {
|
||||
name := key
|
||||
switch objValue.Kind() {
|
||||
case reflect.Struct:
|
||||
|
||||
for i := 0; i < objValue.NumField(); i++ {
|
||||
if objValue.Type().Field(i).Anonymous {
|
||||
if err := getTypesRecursive(objValue.Field(i), flagmap, name); err != nil {
|
||||
if err := getTypesRecursive(objValue.Field(i), flagMap, name); err != nil {
|
||||
return err
|
||||
}
|
||||
} else if len(objValue.Type().Field(i).Tag.Get("description")) > 0 {
|
||||
fieldName := objValue.Type().Field(i).Name
|
||||
if !isExported(fieldName) {
|
||||
return fmt.Errorf("Field %s is an unexported field", fieldName)
|
||||
return fmt.Errorf("field %s is an unexported field", fieldName)
|
||||
}
|
||||
|
||||
name += objValue.Type().Name()
|
||||
if tag := objValue.Type().Field(i).Tag.Get("long"); len(tag) > 0 {
|
||||
fieldName = tag
|
||||
}
|
||||
|
||||
if len(key) == 0 {
|
||||
//Lower Camel Case
|
||||
//name = strings.ToLower(string(fieldName[0])) + fieldName[1:]
|
||||
name = strings.ToLower(fieldName)
|
||||
} else {
|
||||
name = key + "." + strings.ToLower(fieldName)
|
||||
}
|
||||
if _, ok := flagmap[name]; ok {
|
||||
return errors.New("Tag already exists: " + name)
|
||||
}
|
||||
flagmap[name] = objValue.Type().Field(i)
|
||||
|
||||
if err := getTypesRecursive(objValue.Field(i), flagmap, name); err != nil {
|
||||
if _, ok := flagMap[name]; ok {
|
||||
return fmt.Errorf("tag already exists: %s", name)
|
||||
}
|
||||
flagMap[name] = objValue.Type().Field(i)
|
||||
|
||||
if err := getTypesRecursive(objValue.Field(i), flagMap, name); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
}
|
||||
case reflect.Ptr:
|
||||
if len(key) > 0 {
|
||||
field := flagmap[name]
|
||||
field := flagMap[name]
|
||||
field.Type = reflect.TypeOf(false)
|
||||
flagmap[name] = field
|
||||
flagMap[name] = field
|
||||
}
|
||||
|
||||
typ := objValue.Type().Elem()
|
||||
inst := reflect.New(typ).Elem()
|
||||
if err := getTypesRecursive(inst, flagmap, name); err != nil {
|
||||
|
||||
if err := getTypesRecursive(inst, flagMap, name); err != nil {
|
||||
return err
|
||||
}
|
||||
default:
|
||||
@@ -78,14 +77,15 @@ func getTypesRecursive(objValue reflect.Value, flagmap map[string]reflect.Struct
|
||||
return nil
|
||||
}
|
||||
|
||||
//GetPointerFlags returns flags on pointers
|
||||
// GetBoolFlags returns flags on pointers
|
||||
func GetBoolFlags(config interface{}) ([]string, error) {
|
||||
flagmap := make(map[string]reflect.StructField)
|
||||
if err := getTypesRecursive(reflect.ValueOf(config), flagmap, ""); err != nil {
|
||||
flagMap := make(map[string]reflect.StructField)
|
||||
if err := getTypesRecursive(reflect.ValueOf(config), flagMap, ""); err != nil {
|
||||
return []string{}, err
|
||||
}
|
||||
flags := make([]string, 0, len(flagmap))
|
||||
for f, structField := range flagmap {
|
||||
|
||||
flags := make([]string, 0, len(flagMap))
|
||||
for f, structField := range flagMap {
|
||||
if structField.Type.Kind() == reflect.Bool {
|
||||
flags = append(flags, f)
|
||||
}
|
||||
@@ -93,86 +93,42 @@ func GetBoolFlags(config interface{}) ([]string, error) {
|
||||
return flags, nil
|
||||
}
|
||||
|
||||
//GetFlags returns flags
|
||||
// GetFlags returns flags
|
||||
func GetFlags(config interface{}) ([]string, error) {
|
||||
flagmap := make(map[string]reflect.StructField)
|
||||
if err := getTypesRecursive(reflect.ValueOf(config), flagmap, ""); err != nil {
|
||||
flagMap := make(map[string]reflect.StructField)
|
||||
if err := getTypesRecursive(reflect.ValueOf(config), flagMap, ""); err != nil {
|
||||
return []string{}, err
|
||||
}
|
||||
flags := make([]string, 0, len(flagmap))
|
||||
for f := range flagmap {
|
||||
|
||||
flags := make([]string, 0, len(flagMap))
|
||||
for f := range flagMap {
|
||||
flags = append(flags, f)
|
||||
}
|
||||
return flags, nil
|
||||
}
|
||||
|
||||
//loadParsers loads default parsers and custom parsers given as parameter. Return a map [reflect.Type]parsers
|
||||
// bool, int, int64, uint, uint64, float64,
|
||||
func loadParsers(customParsers map[reflect.Type]Parser) (map[reflect.Type]Parser, error) {
|
||||
parsers := map[reflect.Type]Parser{}
|
||||
|
||||
var boolParser boolValue
|
||||
parsers[reflect.TypeOf(true)] = &boolParser
|
||||
|
||||
var intParser intValue
|
||||
parsers[reflect.TypeOf(1)] = &intParser
|
||||
|
||||
var int64Parser int64Value
|
||||
parsers[reflect.TypeOf(int64(1))] = &int64Parser
|
||||
|
||||
var uintParser uintValue
|
||||
parsers[reflect.TypeOf(uint(1))] = &uintParser
|
||||
|
||||
var uint64Parser uint64Value
|
||||
parsers[reflect.TypeOf(uint64(1))] = &uint64Parser
|
||||
|
||||
var stringParser stringValue
|
||||
parsers[reflect.TypeOf("")] = &stringParser
|
||||
|
||||
var float64Parser float64Value
|
||||
parsers[reflect.TypeOf(float64(1.5))] = &float64Parser
|
||||
|
||||
var durationParser Duration
|
||||
parsers[reflect.TypeOf(Duration(time.Second))] = &durationParser
|
||||
|
||||
var timeParser timeValue
|
||||
parsers[reflect.TypeOf(time.Now())] = &timeParser
|
||||
|
||||
for rType, parser := range customParsers {
|
||||
parsers[rType] = parser
|
||||
}
|
||||
return parsers, nil
|
||||
}
|
||||
|
||||
//ParseArgs : parses args return valmap map[flag]Getter, using parsers map[type]Getter
|
||||
//args must be formated as like as flag documentation. See https://golang.org/pkg/flag
|
||||
func parseArgs(args []string, flagmap map[string]reflect.StructField, parsers map[reflect.Type]Parser) (map[string]Parser, error) {
|
||||
//Return var
|
||||
valmap := make(map[string]Parser)
|
||||
//Visitor in flag.Parse
|
||||
flagList := []*flag.Flag{}
|
||||
visitor := func(fl *flag.Flag) {
|
||||
flagList = append(flagList, fl)
|
||||
}
|
||||
newParsers := map[string]Parser{}
|
||||
// ParseArgs : parses args return a map[flag]Getter, using parsers map[type]Getter
|
||||
// args must be formatted as like as flag documentation. See https://golang.org/pkg/flag
|
||||
func parseArgs(args []string, flagMap map[string]reflect.StructField, parsers map[reflect.Type]parse.Parser) (map[string]parse.Parser, error) {
|
||||
newParsers := map[string]parse.Parser{}
|
||||
flagSet := flag.NewFlagSet("flaeg.Load", flag.ContinueOnError)
|
||||
//Disable output
|
||||
|
||||
// Disable output
|
||||
flagSet.SetOutput(ioutil.Discard)
|
||||
|
||||
var err error
|
||||
for flag, structField := range flagmap {
|
||||
//for _, flag := range flags {
|
||||
//structField := flagmap[flag]
|
||||
for flg, structField := range flagMap {
|
||||
if parser, ok := parsers[structField.Type]; ok {
|
||||
newparserValue := reflect.New(reflect.TypeOf(parser).Elem())
|
||||
newparserValue.Elem().Set(reflect.ValueOf(parser).Elem())
|
||||
newparser := newparserValue.Interface().(Parser)
|
||||
newParserValue := reflect.New(reflect.TypeOf(parser).Elem())
|
||||
newParserValue.Elem().Set(reflect.ValueOf(parser).Elem())
|
||||
newParser := newParserValue.Interface().(parse.Parser)
|
||||
|
||||
if short := structField.Tag.Get("short"); len(short) == 1 {
|
||||
// fmt.Printf("short : %s long : %s\n", short, flag)
|
||||
flagSet.VarP(newparser, flag, short, structField.Tag.Get("description"))
|
||||
flagSet.VarP(newParser, flg, short, structField.Tag.Get("description"))
|
||||
} else {
|
||||
flagSet.Var(newparser, flag, structField.Tag.Get("description"))
|
||||
flagSet.Var(newParser, flg, structField.Tag.Get("description"))
|
||||
}
|
||||
newParsers[flag] = newparser
|
||||
newParsers[flg] = newParser
|
||||
} else {
|
||||
err = ErrParserNotFound
|
||||
}
|
||||
@@ -180,24 +136,35 @@ func parseArgs(args []string, flagmap map[string]reflect.StructField, parsers ma
|
||||
|
||||
// prevents case sensitivity issue
|
||||
args = argsToLower(args)
|
||||
if err := flagSet.Parse(args); err != nil {
|
||||
return nil, err
|
||||
if errParse := flagSet.Parse(args); errParse != nil {
|
||||
return nil, errParse
|
||||
}
|
||||
|
||||
//Fill flagList with parsed flags
|
||||
// Visitor in flag.Parse
|
||||
var flagList []*flag.Flag
|
||||
visitor := func(fl *flag.Flag) {
|
||||
flagList = append(flagList, fl)
|
||||
}
|
||||
|
||||
// Fill flagList with parsed flags
|
||||
flagSet.Visit(visitor)
|
||||
//Return parsers on parsed flag
|
||||
for _, flag := range flagList {
|
||||
valmap[flag.Name] = newParsers[flag.Name]
|
||||
|
||||
// Return var
|
||||
valMap := make(map[string]parse.Parser)
|
||||
|
||||
// Return parsers on parsed flag
|
||||
for _, flg := range flagList {
|
||||
valMap[flg.Name] = newParsers[flg.Name]
|
||||
}
|
||||
|
||||
return valmap, err
|
||||
return valMap, err
|
||||
}
|
||||
|
||||
func getDefaultValue(defaultValue reflect.Value, defaultPointersValue reflect.Value, defaultValmap map[string]reflect.Value, key string) error {
|
||||
if defaultValue.Type() != defaultPointersValue.Type() {
|
||||
return fmt.Errorf("Parameters defaultValue and defaultPointersValue must be the same struct. defaultValue type : %s is not defaultPointersValue type : %s", defaultValue.Type().String(), defaultPointersValue.Type().String())
|
||||
return fmt.Errorf("parameters defaultValue and defaultPointersValue must be the same struct. defaultValue type: %s is not defaultPointersValue type: %s", defaultValue.Type().String(), defaultPointersValue.Type().String())
|
||||
}
|
||||
|
||||
name := key
|
||||
switch defaultValue.Kind() {
|
||||
case reflect.Struct:
|
||||
@@ -207,22 +174,19 @@ func getDefaultValue(defaultValue reflect.Value, defaultPointersValue reflect.Va
|
||||
return err
|
||||
}
|
||||
} else if len(defaultValue.Type().Field(i).Tag.Get("description")) > 0 {
|
||||
name += defaultValue.Type().Name()
|
||||
fieldName := defaultValue.Type().Field(i).Name
|
||||
if tag := defaultValue.Type().Field(i).Tag.Get("long"); len(tag) > 0 {
|
||||
fieldName = tag
|
||||
}
|
||||
|
||||
if len(key) == 0 {
|
||||
name = strings.ToLower(fieldName)
|
||||
} else {
|
||||
name = key + "." + strings.ToLower(fieldName)
|
||||
}
|
||||
|
||||
if defaultValue.Field(i).Kind() != reflect.Ptr {
|
||||
// if _, ok := defaultValmap[name]; ok {
|
||||
// return errors.New("Tag already exists: " + name)
|
||||
// }
|
||||
defaultValmap[name] = defaultValue.Field(i)
|
||||
// fmt.Printf("%s: got default value %+v\n", name, defaultValue.Field(i))
|
||||
}
|
||||
if err := getDefaultValue(defaultValue.Field(i), defaultPointersValue.Field(i), defaultValmap, name); err != nil {
|
||||
return err
|
||||
@@ -232,14 +196,14 @@ func getDefaultValue(defaultValue reflect.Value, defaultPointersValue reflect.Va
|
||||
case reflect.Ptr:
|
||||
if !defaultPointersValue.IsNil() {
|
||||
if len(key) != 0 {
|
||||
//turn ptr fields to nil
|
||||
// turn ptr fields to nil
|
||||
defaultPointersNilValue, err := setPointersNil(defaultPointersValue)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defaultValmap[name] = defaultPointersNilValue
|
||||
// fmt.Printf("%s: got default value %+v\n", name, defaultPointersNilValue)
|
||||
}
|
||||
|
||||
if !defaultValue.IsNil() {
|
||||
if err := getDefaultValue(defaultValue.Elem(), defaultPointersValue.Elem(), defaultValmap, name); err != nil {
|
||||
return err
|
||||
@@ -253,8 +217,8 @@ func getDefaultValue(defaultValue reflect.Value, defaultPointersValue reflect.Va
|
||||
instValue := reflect.New(defaultPointersValue.Type().Elem())
|
||||
if len(key) != 0 {
|
||||
defaultValmap[name] = instValue
|
||||
// fmt.Printf("%s: got default value %+v\n", name, instValue)
|
||||
}
|
||||
|
||||
if !defaultValue.IsNil() {
|
||||
if err := getDefaultValue(defaultValue.Elem(), instValue.Elem(), defaultValmap, name); err != nil {
|
||||
return err
|
||||
@@ -271,17 +235,18 @@ func getDefaultValue(defaultValue reflect.Value, defaultPointersValue reflect.Va
|
||||
return nil
|
||||
}
|
||||
|
||||
//objValue a reflect.Value of a not-nil pointer on a struct
|
||||
// objValue a reflect.Value of a not-nil pointer on a struct
|
||||
func setPointersNil(objValue reflect.Value) (reflect.Value, error) {
|
||||
if objValue.Kind() != reflect.Ptr {
|
||||
return objValue, fmt.Errorf("Parameters objValue must be a not-nil pointer on a struct, not a %s", objValue.Kind().String())
|
||||
return objValue, fmt.Errorf("parameters objValue must be a not-nil pointer on a struct, not a %s", objValue.Kind())
|
||||
} else if objValue.IsNil() {
|
||||
return objValue, fmt.Errorf("Parameters objValue must be a not-nil pointer")
|
||||
return objValue, errors.New("parameters objValue must be a not-nil pointer")
|
||||
} else if objValue.Elem().Kind() != reflect.Struct {
|
||||
// fmt.Printf("Parameters objValue must be a not-nil pointer on a struct, not a pointer on a %s\n", objValue.Elem().Kind().String())
|
||||
return objValue, nil
|
||||
}
|
||||
//Clone
|
||||
|
||||
// Clone
|
||||
starObjValue := objValue.Elem()
|
||||
nilPointersObjVal := reflect.New(starObjValue.Type())
|
||||
starNilPointersObjVal := nilPointersObjVal.Elem()
|
||||
@@ -295,39 +260,38 @@ func setPointersNil(objValue reflect.Value) (reflect.Value, error) {
|
||||
return nilPointersObjVal, nil
|
||||
}
|
||||
|
||||
//FillStructRecursive initialize a value of any taged Struct given by reference
|
||||
func fillStructRecursive(objValue reflect.Value, defaultPointerValmap map[string]reflect.Value, valmap map[string]Parser, key string) error {
|
||||
// FillStructRecursive initialize a value of any tagged Struct given by reference
|
||||
func fillStructRecursive(objValue reflect.Value, defaultPointerValMap map[string]reflect.Value, valMap map[string]parse.Parser, key string) error {
|
||||
name := key
|
||||
switch objValue.Kind() {
|
||||
case reflect.Struct:
|
||||
|
||||
for i := 0; i < objValue.Type().NumField(); i++ {
|
||||
if objValue.Type().Field(i).Anonymous {
|
||||
if err := fillStructRecursive(objValue.Field(i), defaultPointerValmap, valmap, name); err != nil {
|
||||
if err := fillStructRecursive(objValue.Field(i), defaultPointerValMap, valMap, name); err != nil {
|
||||
return err
|
||||
}
|
||||
} else if len(objValue.Type().Field(i).Tag.Get("description")) > 0 {
|
||||
name += objValue.Type().Name()
|
||||
fieldName := objValue.Type().Field(i).Name
|
||||
if tag := objValue.Type().Field(i).Tag.Get("long"); len(tag) > 0 {
|
||||
fieldName = tag
|
||||
}
|
||||
|
||||
if len(key) == 0 {
|
||||
name = strings.ToLower(fieldName)
|
||||
} else {
|
||||
name = key + "." + strings.ToLower(fieldName)
|
||||
}
|
||||
// fmt.Println(name)
|
||||
if objValue.Field(i).Kind() != reflect.Ptr {
|
||||
|
||||
if val, ok := valmap[name]; ok {
|
||||
// fmt.Printf("%s : set def val\n", name)
|
||||
if objValue.Field(i).Kind() != reflect.Ptr {
|
||||
if val, ok := valMap[name]; ok {
|
||||
if err := setFields(objValue.Field(i), val); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
}
|
||||
if err := fillStructRecursive(objValue.Field(i), defaultPointerValmap, valmap, name); err != nil {
|
||||
|
||||
if err := fillStructRecursive(objValue.Field(i), defaultPointerValMap, valMap, name); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
@@ -335,39 +299,38 @@ func fillStructRecursive(objValue reflect.Value, defaultPointerValmap map[string
|
||||
|
||||
case reflect.Ptr:
|
||||
if len(key) == 0 && !objValue.IsNil() {
|
||||
if err := fillStructRecursive(objValue.Elem(), defaultPointerValmap, valmap, name); err != nil {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
return fillStructRecursive(objValue.Elem(), defaultPointerValMap, valMap, name)
|
||||
}
|
||||
|
||||
contains := false
|
||||
for flag := range valmap {
|
||||
for flg := range valMap {
|
||||
// TODO replace by regexp
|
||||
if strings.HasPrefix(flag, name+".") {
|
||||
if strings.HasPrefix(flg, name+".") {
|
||||
contains = true
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
needDefault := false
|
||||
if _, ok := valmap[name]; ok {
|
||||
needDefault = valmap[name].Get().(bool)
|
||||
if _, ok := valMap[name]; ok {
|
||||
needDefault = valMap[name].Get().(bool)
|
||||
}
|
||||
if contains && objValue.IsNil() {
|
||||
needDefault = true
|
||||
}
|
||||
|
||||
if needDefault {
|
||||
if defVal, ok := defaultPointerValmap[name]; ok {
|
||||
//set default pointer value
|
||||
// fmt.Printf("%s : set default value %+v\n", name, defVal)
|
||||
if defVal, ok := defaultPointerValMap[name]; ok {
|
||||
// set default pointer value
|
||||
objValue.Set(defVal)
|
||||
} else {
|
||||
return fmt.Errorf("flag %s default value not provided", name)
|
||||
}
|
||||
}
|
||||
|
||||
if !objValue.IsNil() && contains {
|
||||
if objValue.Type().Elem().Kind() == reflect.Struct {
|
||||
if err := fillStructRecursive(objValue.Elem(), defaultPointerValmap, valmap, name); err != nil {
|
||||
if err := fillStructRecursive(objValue.Elem(), defaultPointerValMap, valMap, name); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
@@ -378,35 +341,35 @@ func fillStructRecursive(objValue reflect.Value, defaultPointerValmap map[string
|
||||
return nil
|
||||
}
|
||||
|
||||
// SetFields sets value to fieldValue using tag as key in valmap
|
||||
func setFields(fieldValue reflect.Value, val Parser) error {
|
||||
// SetFields sets value to fieldValue using tag as key in valMap
|
||||
func setFields(fieldValue reflect.Value, val parse.Parser) error {
|
||||
if fieldValue.CanSet() {
|
||||
fieldValue.Set(reflect.ValueOf(val).Elem().Convert(fieldValue.Type()))
|
||||
} else {
|
||||
return errors.New(fieldValue.Type().String() + " is not settable.")
|
||||
return fmt.Errorf("%s is not settable", fieldValue.Type().String())
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
//PrintHelp generates and prints command line help
|
||||
func PrintHelp(flagmap map[string]reflect.StructField, defaultValmap map[string]reflect.Value, parsers map[reflect.Type]Parser) error {
|
||||
return PrintHelpWithCommand(flagmap, defaultValmap, parsers, nil, nil)
|
||||
// PrintHelp generates and prints command line help
|
||||
func PrintHelp(flagMap map[string]reflect.StructField, defaultValmap map[string]reflect.Value, parsers map[reflect.Type]parse.Parser) error {
|
||||
return PrintHelpWithCommand(flagMap, defaultValmap, parsers, nil, nil)
|
||||
}
|
||||
|
||||
//PrintError takes a not nil error and prints command line help
|
||||
func PrintError(err error, flagmap map[string]reflect.StructField, defaultValmap map[string]reflect.Value, parsers map[reflect.Type]Parser) error {
|
||||
// PrintError takes a not nil error and prints command line help
|
||||
func PrintError(err error, flagMap map[string]reflect.StructField, defaultValmap map[string]reflect.Value, parsers map[reflect.Type]parse.Parser) error {
|
||||
if err != flag.ErrHelp {
|
||||
fmt.Printf("Error : %s\n", err)
|
||||
fmt.Printf("Error: %s\n", err)
|
||||
}
|
||||
if !strings.Contains(err.Error(), ":No parser for type") {
|
||||
PrintHelp(flagmap, defaultValmap, parsers)
|
||||
PrintHelp(flagMap, defaultValmap, parsers)
|
||||
}
|
||||
return err
|
||||
}
|
||||
|
||||
//LoadWithParsers initializes config : struct fields given by reference, with args : arguments.
|
||||
//Some custom parsers may be given.
|
||||
func LoadWithParsers(config interface{}, defaultValue interface{}, args []string, customParsers map[reflect.Type]Parser) error {
|
||||
// LoadWithParsers initializes config : struct fields given by reference, with args : arguments.
|
||||
// Some custom parsers may be given.
|
||||
func LoadWithParsers(config interface{}, defaultValue interface{}, args []string, customParsers map[reflect.Type]parse.Parser) error {
|
||||
cmd := &Command{
|
||||
Config: config,
|
||||
DefaultPointersConfig: defaultValue,
|
||||
@@ -415,8 +378,8 @@ func LoadWithParsers(config interface{}, defaultValue interface{}, args []string
|
||||
return LoadWithCommand(cmd, args, customParsers, nil)
|
||||
}
|
||||
|
||||
//Load initializes config : struct fields given by reference, with args : arguments.
|
||||
//Some custom parsers may be given.
|
||||
// Load initializes config : struct fields given by reference, with args : arguments.
|
||||
// Some custom parsers may be given.
|
||||
func Load(config interface{}, defaultValue interface{}, args []string) error {
|
||||
return LoadWithParsers(config, defaultValue, args, nil)
|
||||
}
|
||||
@@ -430,35 +393,34 @@ type Command struct {
|
||||
Name string
|
||||
Description string
|
||||
Config interface{}
|
||||
DefaultPointersConfig interface{} //TODO:case DefaultPointersConfig is nil
|
||||
DefaultPointersConfig interface{} // TODO: case DefaultPointersConfig is nil
|
||||
Run func() error
|
||||
Metadata map[string]string
|
||||
}
|
||||
|
||||
//LoadWithCommand initializes config : struct fields given by reference, with args : arguments.
|
||||
//Some custom parsers and some subCommand may be given.
|
||||
func LoadWithCommand(cmd *Command, cmdArgs []string, customParsers map[reflect.Type]Parser, subCommand []*Command) error {
|
||||
|
||||
parsers, err := loadParsers(customParsers)
|
||||
// LoadWithCommand initializes config : struct fields given by reference, with args : arguments.
|
||||
// Some custom parsers and some subCommand may be given.
|
||||
func LoadWithCommand(cmd *Command, cmdArgs []string, customParsers map[reflect.Type]parse.Parser, subCommand []*Command) error {
|
||||
parsers, err := parse.LoadParsers(customParsers)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
tagsmap := make(map[string]reflect.StructField)
|
||||
if err := getTypesRecursive(reflect.ValueOf(cmd.Config), tagsmap, ""); err != nil {
|
||||
tagsMap := make(map[string]reflect.StructField)
|
||||
if err := getTypesRecursive(reflect.ValueOf(cmd.Config), tagsMap, ""); err != nil {
|
||||
return err
|
||||
}
|
||||
defaultValmap := make(map[string]reflect.Value)
|
||||
if err := getDefaultValue(reflect.ValueOf(cmd.Config), reflect.ValueOf(cmd.DefaultPointersConfig), defaultValmap, ""); err != nil {
|
||||
defaultValMap := make(map[string]reflect.Value)
|
||||
if err := getDefaultValue(reflect.ValueOf(cmd.Config), reflect.ValueOf(cmd.DefaultPointersConfig), defaultValMap, ""); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
valmap, errParseArgs := parseArgs(cmdArgs, tagsmap, parsers)
|
||||
valMap, errParseArgs := parseArgs(cmdArgs, tagsMap, parsers)
|
||||
if errParseArgs != nil && errParseArgs != ErrParserNotFound {
|
||||
return PrintErrorWithCommand(errParseArgs, tagsmap, defaultValmap, parsers, cmd, subCommand)
|
||||
return PrintErrorWithCommand(errParseArgs, tagsMap, defaultValMap, parsers, cmd, subCommand)
|
||||
}
|
||||
|
||||
if err := fillStructRecursive(reflect.ValueOf(cmd.Config), defaultValmap, valmap, ""); err != nil {
|
||||
if err := fillStructRecursive(reflect.ValueOf(cmd.Config), defaultValMap, valMap, ""); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
@@ -469,8 +431,8 @@ func LoadWithCommand(cmd *Command, cmdArgs []string, customParsers map[reflect.T
|
||||
return nil
|
||||
}
|
||||
|
||||
//PrintHelpWithCommand generates and prints command line help for a Command
|
||||
func PrintHelpWithCommand(flagmap map[string]reflect.StructField, defaultValmap map[string]reflect.Value, parsers map[reflect.Type]Parser, cmd *Command, subCmd []*Command) error {
|
||||
// PrintHelpWithCommand generates and prints command line help for a Command
|
||||
func PrintHelpWithCommand(flagMap map[string]reflect.StructField, defaultValMap map[string]reflect.Value, parsers map[reflect.Type]parse.Parser, cmd *Command, subCmd []*Command) error {
|
||||
// Define a templates
|
||||
// Using POSXE STD : http://pubs.opengroup.org/onlinepubs/9699919799/
|
||||
const helper = `{{if .ProgDescription}}{{.ProgDescription}}
|
||||
@@ -504,7 +466,7 @@ Flags:
|
||||
_, tempStruct.ProgName = path.Split(os.Args[0])
|
||||
}
|
||||
|
||||
//Run Template
|
||||
// Run Template
|
||||
tmplHelper, err := template.New("helper").Parse(helper)
|
||||
if err != nil {
|
||||
return err
|
||||
@@ -514,38 +476,38 @@ Flags:
|
||||
return err
|
||||
}
|
||||
|
||||
return printFlagsDescriptionsDefaultValues(flagmap, defaultValmap, parsers, os.Stdout)
|
||||
return printFlagsDescriptionsDefaultValues(flagMap, defaultValMap, parsers, os.Stdout)
|
||||
}
|
||||
|
||||
func printFlagsDescriptionsDefaultValues(flagmap map[string]reflect.StructField, defaultValmap map[string]reflect.Value, parsers map[reflect.Type]Parser, output io.Writer) error {
|
||||
func printFlagsDescriptionsDefaultValues(flagMap map[string]reflect.StructField, defaultValMap map[string]reflect.Value, parsers map[reflect.Type]parse.Parser, output io.Writer) error {
|
||||
// Sort alphabetically & Delete unparsable flags in a slice
|
||||
flags := []string{}
|
||||
for flag, field := range flagmap {
|
||||
var flags []string
|
||||
for flg, field := range flagMap {
|
||||
if _, ok := parsers[field.Type]; ok {
|
||||
flags = append(flags, flag)
|
||||
flags = append(flags, flg)
|
||||
}
|
||||
}
|
||||
sort.Strings(flags)
|
||||
|
||||
// Process data
|
||||
descriptions := []string{}
|
||||
defaultValues := []string{}
|
||||
flagsWithDashs := []string{}
|
||||
shortFlagsWithDash := []string{}
|
||||
for _, flag := range flags {
|
||||
field := flagmap[flag]
|
||||
var descriptions []string
|
||||
var defaultValues []string
|
||||
var flagsWithDash []string
|
||||
var shortFlagsWithDash []string
|
||||
for _, flg := range flags {
|
||||
field := flagMap[flg]
|
||||
if short := field.Tag.Get("short"); len(short) == 1 {
|
||||
shortFlagsWithDash = append(shortFlagsWithDash, "-"+short+",")
|
||||
} else {
|
||||
shortFlagsWithDash = append(shortFlagsWithDash, "")
|
||||
}
|
||||
flagsWithDashs = append(flagsWithDashs, "--"+flag)
|
||||
flagsWithDash = append(flagsWithDash, "--"+flg)
|
||||
|
||||
//flag on pointer ?
|
||||
if defVal, ok := defaultValmap[flag]; ok {
|
||||
// flag on pointer ?
|
||||
if defVal, ok := defaultValMap[flg]; ok {
|
||||
if defVal.Kind() != reflect.Ptr {
|
||||
// Set defaultValue on parsers
|
||||
parsers[field.Type].SetValue(defaultValmap[flag].Interface())
|
||||
parsers[field.Type].SetValue(defaultValMap[flg].Interface())
|
||||
}
|
||||
|
||||
if defVal := parsers[field.Type].String(); len(defVal) > 0 {
|
||||
@@ -560,17 +522,19 @@ func printFlagsDescriptionsDefaultValues(flagmap map[string]reflect.StructField,
|
||||
descriptions = append(descriptions, description)
|
||||
if i != 0 {
|
||||
defaultValues = append(defaultValues, "")
|
||||
flagsWithDashs = append(flagsWithDashs, "")
|
||||
flagsWithDash = append(flagsWithDash, "")
|
||||
shortFlagsWithDash = append(shortFlagsWithDash, "")
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
//add help flag
|
||||
shortFlagsWithDash = append(shortFlagsWithDash, "-h,")
|
||||
flagsWithDashs = append(flagsWithDashs, "--help")
|
||||
flagsWithDash = append(flagsWithDash, "--help")
|
||||
descriptions = append(descriptions, "Print Help (this message) and exit")
|
||||
defaultValues = append(defaultValues, "")
|
||||
return displayTab(output, shortFlagsWithDash, flagsWithDashs, descriptions, defaultValues)
|
||||
|
||||
return displayTab(output, shortFlagsWithDash, flagsWithDash, descriptions, defaultValues)
|
||||
}
|
||||
func split(str string, width int) []string {
|
||||
if len(str) > width {
|
||||
@@ -578,16 +542,19 @@ func split(str string, width int) []string {
|
||||
if index == -1 {
|
||||
index = width
|
||||
}
|
||||
|
||||
return append([]string{strings.TrimSpace(str[:index])}, split(strings.TrimSpace(str[index:]), width)...)
|
||||
}
|
||||
return []string{str}
|
||||
}
|
||||
|
||||
func displayTab(output io.Writer, columns ...[]string) error {
|
||||
nbRow := len(columns[0])
|
||||
nbCol := len(columns)
|
||||
w := new(tabwriter.Writer)
|
||||
w.Init(output, 0, 4, 1, ' ', 0)
|
||||
|
||||
nbRow := len(columns[0])
|
||||
nbCol := len(columns)
|
||||
|
||||
for i := 0; i < nbRow; i++ {
|
||||
row := ""
|
||||
for j, col := range columns {
|
||||
@@ -598,56 +565,58 @@ func displayTab(output io.Writer, columns ...[]string) error {
|
||||
}
|
||||
fmt.Fprintln(w, row)
|
||||
}
|
||||
w.Flush()
|
||||
return nil
|
||||
|
||||
return w.Flush()
|
||||
}
|
||||
|
||||
//PrintErrorWithCommand takes a not nil error and prints command line help
|
||||
func PrintErrorWithCommand(err error, flagmap map[string]reflect.StructField, defaultValmap map[string]reflect.Value, parsers map[reflect.Type]Parser, cmd *Command, subCmd []*Command) error {
|
||||
// PrintErrorWithCommand takes a not nil error and prints command line help
|
||||
func PrintErrorWithCommand(err error, flagMap map[string]reflect.StructField, defaultValMap map[string]reflect.Value, parsers map[reflect.Type]parse.Parser, cmd *Command, subCmd []*Command) error {
|
||||
if err != flag.ErrHelp {
|
||||
fmt.Printf("Error here : %s\n", err)
|
||||
}
|
||||
PrintHelpWithCommand(flagmap, defaultValmap, parsers, cmd, subCmd)
|
||||
|
||||
PrintHelpWithCommand(flagMap, defaultValMap, parsers, cmd, subCmd)
|
||||
return err
|
||||
}
|
||||
|
||||
//Flaeg struct contains commands (at least the root one)
|
||||
//and row arguments (command and/or flags)
|
||||
//a map of custom parsers could be use
|
||||
// Flaeg struct contains commands (at least the root one)
|
||||
// and row arguments (command and/or flags)
|
||||
// a map of custom parsers could be use
|
||||
type Flaeg struct {
|
||||
calledCommand *Command
|
||||
commands []*Command ///rootCommand is th fist one in this slice
|
||||
args []string
|
||||
commmandArgs []string
|
||||
customParsers map[reflect.Type]Parser
|
||||
commandArgs []string
|
||||
customParsers map[reflect.Type]parse.Parser
|
||||
}
|
||||
|
||||
//New creats and initialize a pointer on Flaeg
|
||||
// New creates and initialize a pointer on Flaeg
|
||||
func New(rootCommand *Command, args []string) *Flaeg {
|
||||
var f Flaeg
|
||||
f.commands = []*Command{rootCommand}
|
||||
f.args = args
|
||||
f.customParsers = map[reflect.Type]Parser{}
|
||||
f.customParsers = map[reflect.Type]parse.Parser{}
|
||||
return &f
|
||||
}
|
||||
|
||||
//AddCommand adds sub-command to the root command
|
||||
// AddCommand adds sub-command to the root command
|
||||
func (f *Flaeg) AddCommand(command *Command) {
|
||||
f.commands = append(f.commands, command)
|
||||
}
|
||||
|
||||
//AddParser adds custom parser for a type to the map of custom parsers
|
||||
func (f *Flaeg) AddParser(typ reflect.Type, parser Parser) {
|
||||
// AddParser adds custom parser for a type to the map of custom parsers
|
||||
func (f *Flaeg) AddParser(typ reflect.Type, parser parse.Parser) {
|
||||
f.customParsers[typ] = parser
|
||||
}
|
||||
|
||||
// Run calls the command with flags given as agruments
|
||||
// Run calls the command with flags given as arguments
|
||||
func (f *Flaeg) Run() error {
|
||||
if f.calledCommand == nil {
|
||||
if _, _, err := f.findCommandWithCommandArgs(); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
if _, err := f.Parse(f.calledCommand); err != nil {
|
||||
return err
|
||||
}
|
||||
@@ -658,15 +627,16 @@ func (f *Flaeg) Run() error {
|
||||
// It returns nil and a not nil error if it fails
|
||||
func (f *Flaeg) Parse(cmd *Command) (*Command, error) {
|
||||
if f.calledCommand == nil {
|
||||
f.commmandArgs = f.args
|
||||
f.commandArgs = f.args
|
||||
}
|
||||
if err := LoadWithCommand(cmd, f.commmandArgs, f.customParsers, f.commands); err != nil {
|
||||
|
||||
if err := LoadWithCommand(cmd, f.commandArgs, f.customParsers, f.commands); err != nil {
|
||||
return cmd, err
|
||||
}
|
||||
return cmd, nil
|
||||
}
|
||||
|
||||
//splitArgs takes args (type []string) and return command ("" if rootCommand) and command's args
|
||||
// splitArgs takes args (type []string) and return command ("" if rootCommand) and command's args
|
||||
func splitArgs(args []string) (string, []string) {
|
||||
if len(args) >= 1 && len(args[0]) >= 1 && string(args[0][0]) != "-" {
|
||||
if len(args) == 1 {
|
||||
@@ -680,20 +650,20 @@ func splitArgs(args []string) (string, []string) {
|
||||
// findCommandWithCommandArgs returns the called command (by reference) and command's args
|
||||
// the error returned is not nil if it fails
|
||||
func (f *Flaeg) findCommandWithCommandArgs() (*Command, []string, error) {
|
||||
commandName := ""
|
||||
commandName, f.commmandArgs = splitArgs(f.args)
|
||||
var commandName string
|
||||
commandName, f.commandArgs = splitArgs(f.args)
|
||||
if len(commandName) > 0 {
|
||||
for _, command := range f.commands {
|
||||
if commandName == command.Name {
|
||||
f.calledCommand = command
|
||||
return f.calledCommand, f.commmandArgs, nil
|
||||
return f.calledCommand, f.commandArgs, nil
|
||||
}
|
||||
}
|
||||
return nil, []string{}, fmt.Errorf("Command %s not found", commandName)
|
||||
return nil, []string{}, fmt.Errorf("command %s not found", commandName)
|
||||
}
|
||||
|
||||
f.calledCommand = f.commands[0]
|
||||
return f.calledCommand, f.commmandArgs, nil
|
||||
return f.calledCommand, f.commandArgs, nil
|
||||
}
|
||||
|
||||
// GetCommand splits args and returns the called command (by reference)
|
||||
@@ -706,15 +676,17 @@ func (f *Flaeg) GetCommand() (*Command, error) {
|
||||
return f.calledCommand, nil
|
||||
}
|
||||
|
||||
//isExported return true is the field (from fieldName) is exported,
|
||||
//else false
|
||||
// isExported return true is the field (from fieldName) is exported,
|
||||
// else false
|
||||
func isExported(fieldName string) bool {
|
||||
if len(fieldName) < 1 {
|
||||
return false
|
||||
}
|
||||
|
||||
if string(fieldName[0]) == strings.ToUpper(string(fieldName[0])) {
|
||||
return true
|
||||
}
|
||||
|
||||
return false
|
||||
}
|
||||
|
||||
@@ -722,22 +694,24 @@ func argToLower(inArg string) string {
|
||||
if len(inArg) < 2 {
|
||||
return strings.ToLower(inArg)
|
||||
}
|
||||
|
||||
var outArg string
|
||||
dashIndex := strings.Index(inArg, "--")
|
||||
if dashIndex == -1 {
|
||||
if dashIndex = strings.Index(inArg, "-"); dashIndex == -1 {
|
||||
return inArg
|
||||
}
|
||||
//-fValue
|
||||
// -fValue
|
||||
outArg = strings.ToLower(inArg[dashIndex:dashIndex+2]) + inArg[dashIndex+2:]
|
||||
return outArg
|
||||
}
|
||||
//--flag
|
||||
|
||||
// --flag
|
||||
if equalIndex := strings.Index(inArg, "="); equalIndex != -1 {
|
||||
//--flag=value
|
||||
// --flag=value
|
||||
outArg = strings.ToLower(inArg[dashIndex:equalIndex]) + inArg[equalIndex:]
|
||||
} else {
|
||||
//--boolflag
|
||||
// --boolflag
|
||||
outArg = strings.ToLower(inArg[dashIndex:])
|
||||
}
|
||||
|
||||
@@ -745,7 +719,7 @@ func argToLower(inArg string) string {
|
||||
}
|
||||
|
||||
func argsToLower(inArgs []string) []string {
|
||||
outArgs := make([]string, len(inArgs), len(inArgs))
|
||||
outArgs := make([]string, len(inArgs))
|
||||
for i, inArg := range inArgs {
|
||||
outArgs[i] = argToLower(inArg)
|
||||
}
|
||||
|
||||
7
vendor/github.com/containous/flaeg/flaeg_types.go
generated
vendored
Normal file
7
vendor/github.com/containous/flaeg/flaeg_types.go
generated
vendored
Normal file
@@ -0,0 +1,7 @@
|
||||
package flaeg
|
||||
|
||||
import "github.com/containous/flaeg/parse"
|
||||
|
||||
// Duration is deprecated use parse.Duration instead
|
||||
// Deprecated
|
||||
type Duration = parse.Duration
|
||||
301
vendor/github.com/containous/flaeg/parse/parse.go
generated
vendored
Normal file
301
vendor/github.com/containous/flaeg/parse/parse.go
generated
vendored
Normal file
@@ -0,0 +1,301 @@
|
||||
package parse
|
||||
|
||||
import (
|
||||
"flag"
|
||||
"fmt"
|
||||
"reflect"
|
||||
"strconv"
|
||||
"strings"
|
||||
"time"
|
||||
)
|
||||
|
||||
// Parser is an interface that allows the contents of a flag.Getter to be set.
|
||||
type Parser interface {
|
||||
flag.Getter
|
||||
SetValue(interface{})
|
||||
}
|
||||
|
||||
// BoolValue bool Value type
|
||||
type BoolValue bool
|
||||
|
||||
// Set sets bool value from the given string value.
|
||||
func (b *BoolValue) Set(s string) error {
|
||||
v, err := strconv.ParseBool(s)
|
||||
*b = BoolValue(v)
|
||||
return err
|
||||
}
|
||||
|
||||
// Get returns the bool value.
|
||||
func (b *BoolValue) Get() interface{} { return bool(*b) }
|
||||
|
||||
func (b *BoolValue) String() string { return fmt.Sprintf("%v", *b) }
|
||||
|
||||
// IsBoolFlag return true
|
||||
func (b *BoolValue) IsBoolFlag() bool { return true }
|
||||
|
||||
// SetValue sets the duration from the given bool-asserted value.
|
||||
func (b *BoolValue) SetValue(val interface{}) {
|
||||
*b = BoolValue(val.(bool))
|
||||
}
|
||||
|
||||
// BoolFlag optional interface to indicate boolean flags that can be
|
||||
// supplied without "=value" text
|
||||
type BoolFlag interface {
|
||||
flag.Value
|
||||
IsBoolFlag() bool
|
||||
}
|
||||
|
||||
// IntValue int Value
|
||||
type IntValue int
|
||||
|
||||
// Set sets int value from the given string value.
|
||||
func (i *IntValue) Set(s string) error {
|
||||
v, err := strconv.ParseInt(s, 0, 64)
|
||||
*i = IntValue(v)
|
||||
return err
|
||||
}
|
||||
|
||||
// Get returns the int value.
|
||||
func (i *IntValue) Get() interface{} { return int(*i) }
|
||||
|
||||
func (i *IntValue) String() string { return fmt.Sprintf("%v", *i) }
|
||||
|
||||
// SetValue sets the IntValue from the given int-asserted value.
|
||||
func (i *IntValue) SetValue(val interface{}) {
|
||||
*i = IntValue(val.(int))
|
||||
}
|
||||
|
||||
// Int64Value int64 Value
|
||||
type Int64Value int64
|
||||
|
||||
// Set sets int64 value from the given string value.
|
||||
func (i *Int64Value) Set(s string) error {
|
||||
v, err := strconv.ParseInt(s, 0, 64)
|
||||
*i = Int64Value(v)
|
||||
return err
|
||||
}
|
||||
|
||||
// Get returns the int64 value.
|
||||
func (i *Int64Value) Get() interface{} { return int64(*i) }
|
||||
|
||||
func (i *Int64Value) String() string { return fmt.Sprintf("%v", *i) }
|
||||
|
||||
// SetValue sets the Int64Value from the given int64-asserted value.
|
||||
func (i *Int64Value) SetValue(val interface{}) {
|
||||
*i = Int64Value(val.(int64))
|
||||
}
|
||||
|
||||
// UintValue uint Value
|
||||
type UintValue uint
|
||||
|
||||
// Set sets uint value from the given string value.
|
||||
func (i *UintValue) Set(s string) error {
|
||||
v, err := strconv.ParseUint(s, 0, 64)
|
||||
*i = UintValue(v)
|
||||
return err
|
||||
}
|
||||
|
||||
// Get returns the uint value.
|
||||
func (i *UintValue) Get() interface{} { return uint(*i) }
|
||||
|
||||
func (i *UintValue) String() string { return fmt.Sprintf("%v", *i) }
|
||||
|
||||
// SetValue sets the UintValue from the given uint-asserted value.
|
||||
func (i *UintValue) SetValue(val interface{}) {
|
||||
*i = UintValue(val.(uint))
|
||||
}
|
||||
|
||||
// Uint64Value uint64 Value
|
||||
type Uint64Value uint64
|
||||
|
||||
// Set sets uint64 value from the given string value.
|
||||
func (i *Uint64Value) Set(s string) error {
|
||||
v, err := strconv.ParseUint(s, 0, 64)
|
||||
*i = Uint64Value(v)
|
||||
return err
|
||||
}
|
||||
|
||||
// Get returns the uint64 value.
|
||||
func (i *Uint64Value) Get() interface{} { return uint64(*i) }
|
||||
|
||||
func (i *Uint64Value) String() string { return fmt.Sprintf("%v", *i) }
|
||||
|
||||
// SetValue sets the Uint64Value from the given uint64-asserted value.
|
||||
func (i *Uint64Value) SetValue(val interface{}) {
|
||||
*i = Uint64Value(val.(uint64))
|
||||
}
|
||||
|
||||
// StringValue string Value
|
||||
type StringValue string
|
||||
|
||||
// Set sets string value from the given string value.
|
||||
func (s *StringValue) Set(val string) error {
|
||||
*s = StringValue(val)
|
||||
return nil
|
||||
}
|
||||
|
||||
// Get returns the string value.
|
||||
func (s *StringValue) Get() interface{} { return string(*s) }
|
||||
|
||||
func (s *StringValue) String() string { return string(*s) }
|
||||
|
||||
// SetValue sets the StringValue from the given string-asserted value.
|
||||
func (s *StringValue) SetValue(val interface{}) {
|
||||
*s = StringValue(val.(string))
|
||||
}
|
||||
|
||||
// Float64Value float64 Value
|
||||
type Float64Value float64
|
||||
|
||||
// Set sets float64 value from the given string value.
|
||||
func (f *Float64Value) Set(s string) error {
|
||||
v, err := strconv.ParseFloat(s, 64)
|
||||
*f = Float64Value(v)
|
||||
return err
|
||||
}
|
||||
|
||||
// Get returns the float64 value.
|
||||
func (f *Float64Value) Get() interface{} { return float64(*f) }
|
||||
|
||||
func (f *Float64Value) String() string { return fmt.Sprintf("%v", *f) }
|
||||
|
||||
// SetValue sets the Float64Value from the given float64-asserted value.
|
||||
func (f *Float64Value) SetValue(val interface{}) {
|
||||
*f = Float64Value(val.(float64))
|
||||
}
|
||||
|
||||
// Duration is a custom type suitable for parsing duration values.
|
||||
// It supports `time.ParseDuration`-compatible values and suffix-less digits; in
|
||||
// the latter case, seconds are assumed.
|
||||
type Duration time.Duration
|
||||
|
||||
// Set sets the duration from the given string value.
|
||||
func (d *Duration) Set(s string) error {
|
||||
if v, err := strconv.Atoi(s); err == nil {
|
||||
*d = Duration(time.Duration(v) * time.Second)
|
||||
return nil
|
||||
}
|
||||
|
||||
v, err := time.ParseDuration(s)
|
||||
*d = Duration(v)
|
||||
return err
|
||||
}
|
||||
|
||||
// Get returns the duration value.
|
||||
func (d *Duration) Get() interface{} { return time.Duration(*d) }
|
||||
|
||||
// String returns a string representation of the duration value.
|
||||
func (d *Duration) String() string { return (*time.Duration)(d).String() }
|
||||
|
||||
// SetValue sets the duration from the given Duration-asserted value.
|
||||
func (d *Duration) SetValue(val interface{}) {
|
||||
*d = val.(Duration)
|
||||
}
|
||||
|
||||
// MarshalText serialize the given duration value into a text.
|
||||
func (d *Duration) MarshalText() ([]byte, error) {
|
||||
return []byte(d.String()), nil
|
||||
}
|
||||
|
||||
// UnmarshalText deserializes the given text into a duration value.
|
||||
// It is meant to support TOML decoding of durations.
|
||||
func (d *Duration) UnmarshalText(text []byte) error {
|
||||
return d.Set(string(text))
|
||||
}
|
||||
|
||||
// UnmarshalJSON deserializes the given text into a duration value.
|
||||
func (d *Duration) UnmarshalJSON(text []byte) error {
|
||||
if v, err := strconv.Atoi(string(text)); err == nil {
|
||||
*d = Duration(time.Duration(v))
|
||||
return nil
|
||||
}
|
||||
|
||||
v, err := time.ParseDuration(string(text))
|
||||
*d = Duration(v)
|
||||
return err
|
||||
}
|
||||
|
||||
// TimeValue time.Time Value
|
||||
type TimeValue time.Time
|
||||
|
||||
// Set sets time.Time value from the given string value.
|
||||
func (t *TimeValue) Set(s string) error {
|
||||
v, err := time.Parse(time.RFC3339, s)
|
||||
*t = TimeValue(v)
|
||||
return err
|
||||
}
|
||||
|
||||
// Get returns the time.Time value.
|
||||
func (t *TimeValue) Get() interface{} { return time.Time(*t) }
|
||||
|
||||
func (t *TimeValue) String() string { return (*time.Time)(t).String() }
|
||||
|
||||
// SetValue sets the TimeValue from the given time.Time-asserted value.
|
||||
func (t *TimeValue) SetValue(val interface{}) {
|
||||
*t = TimeValue(val.(time.Time))
|
||||
}
|
||||
|
||||
// SliceStrings parse slice of strings
|
||||
type SliceStrings []string
|
||||
|
||||
// Set adds strings elem into the the parser.
|
||||
// It splits str on , and ;
|
||||
func (s *SliceStrings) Set(str string) error {
|
||||
fargs := func(c rune) bool {
|
||||
return c == ',' || c == ';'
|
||||
}
|
||||
// get function
|
||||
slice := strings.FieldsFunc(str, fargs)
|
||||
*s = append(*s, slice...)
|
||||
return nil
|
||||
}
|
||||
|
||||
// Get []string
|
||||
func (s *SliceStrings) Get() interface{} { return []string(*s) }
|
||||
|
||||
// String return slice in a string
|
||||
func (s *SliceStrings) String() string { return fmt.Sprintf("%v", *s) }
|
||||
|
||||
// SetValue sets []string into the parser
|
||||
func (s *SliceStrings) SetValue(val interface{}) {
|
||||
*s = SliceStrings(val.([]string))
|
||||
}
|
||||
|
||||
// LoadParsers loads default parsers and custom parsers given as parameter.
|
||||
// Return a map [reflect.Type]parsers
|
||||
// bool, int, int64, uint, uint64, float64,
|
||||
func LoadParsers(customParsers map[reflect.Type]Parser) (map[reflect.Type]Parser, error) {
|
||||
parsers := map[reflect.Type]Parser{}
|
||||
|
||||
var boolParser BoolValue
|
||||
parsers[reflect.TypeOf(true)] = &boolParser
|
||||
|
||||
var intParser IntValue
|
||||
parsers[reflect.TypeOf(1)] = &intParser
|
||||
|
||||
var int64Parser Int64Value
|
||||
parsers[reflect.TypeOf(int64(1))] = &int64Parser
|
||||
|
||||
var uintParser UintValue
|
||||
parsers[reflect.TypeOf(uint(1))] = &uintParser
|
||||
|
||||
var uint64Parser Uint64Value
|
||||
parsers[reflect.TypeOf(uint64(1))] = &uint64Parser
|
||||
|
||||
var stringParser StringValue
|
||||
parsers[reflect.TypeOf("")] = &stringParser
|
||||
|
||||
var float64Parser Float64Value
|
||||
parsers[reflect.TypeOf(float64(1.5))] = &float64Parser
|
||||
|
||||
var durationParser Duration
|
||||
parsers[reflect.TypeOf(Duration(time.Second))] = &durationParser
|
||||
|
||||
var timeParser TimeValue
|
||||
parsers[reflect.TypeOf(time.Now())] = &timeParser
|
||||
|
||||
for rType, parser := range customParsers {
|
||||
parsers[rType] = parser
|
||||
}
|
||||
return parsers, nil
|
||||
}
|
||||
221
vendor/github.com/containous/flaeg/parsers.go
generated
vendored
221
vendor/github.com/containous/flaeg/parsers.go
generated
vendored
@@ -1,221 +0,0 @@
|
||||
package flaeg
|
||||
|
||||
import (
|
||||
"flag"
|
||||
"fmt"
|
||||
"strconv"
|
||||
"strings"
|
||||
"time"
|
||||
)
|
||||
|
||||
//TODO : add parsers on all types in https://golang.org/pkg/builtin/
|
||||
|
||||
// Parser is an interface that allows the contents of a flag.Getter to be set.
|
||||
type Parser interface {
|
||||
flag.Getter
|
||||
SetValue(interface{})
|
||||
}
|
||||
|
||||
// -- bool Value
|
||||
type boolValue bool
|
||||
|
||||
func (b *boolValue) Set(s string) error {
|
||||
v, err := strconv.ParseBool(s)
|
||||
*b = boolValue(v)
|
||||
return err
|
||||
}
|
||||
|
||||
func (b *boolValue) Get() interface{} { return bool(*b) }
|
||||
|
||||
func (b *boolValue) String() string { return fmt.Sprintf("%v", *b) }
|
||||
|
||||
func (b *boolValue) IsBoolFlag() bool { return true }
|
||||
|
||||
func (b *boolValue) SetValue(val interface{}) {
|
||||
*b = boolValue(val.(bool))
|
||||
}
|
||||
|
||||
// optional interface to indicate boolean flags that can be
|
||||
// supplied without "=value" text
|
||||
type boolFlag interface {
|
||||
flag.Value
|
||||
IsBoolFlag() bool
|
||||
}
|
||||
|
||||
// -- int Value
|
||||
type intValue int
|
||||
|
||||
func (i *intValue) Set(s string) error {
|
||||
v, err := strconv.ParseInt(s, 0, 64)
|
||||
*i = intValue(v)
|
||||
return err
|
||||
}
|
||||
|
||||
func (i *intValue) Get() interface{} { return int(*i) }
|
||||
|
||||
func (i *intValue) String() string { return fmt.Sprintf("%v", *i) }
|
||||
|
||||
func (i *intValue) SetValue(val interface{}) {
|
||||
*i = intValue(val.(int))
|
||||
}
|
||||
|
||||
// -- int64 Value
|
||||
type int64Value int64
|
||||
|
||||
func (i *int64Value) Set(s string) error {
|
||||
v, err := strconv.ParseInt(s, 0, 64)
|
||||
*i = int64Value(v)
|
||||
return err
|
||||
}
|
||||
|
||||
func (i *int64Value) Get() interface{} { return int64(*i) }
|
||||
|
||||
func (i *int64Value) String() string { return fmt.Sprintf("%v", *i) }
|
||||
|
||||
func (i *int64Value) SetValue(val interface{}) {
|
||||
*i = int64Value(val.(int64))
|
||||
}
|
||||
|
||||
// -- uint Value
|
||||
type uintValue uint
|
||||
|
||||
func (i *uintValue) Set(s string) error {
|
||||
v, err := strconv.ParseUint(s, 0, 64)
|
||||
*i = uintValue(v)
|
||||
return err
|
||||
}
|
||||
|
||||
func (i *uintValue) Get() interface{} { return uint(*i) }
|
||||
|
||||
func (i *uintValue) String() string { return fmt.Sprintf("%v", *i) }
|
||||
|
||||
func (i *uintValue) SetValue(val interface{}) {
|
||||
*i = uintValue(val.(uint))
|
||||
}
|
||||
|
||||
// -- uint64 Value
|
||||
type uint64Value uint64
|
||||
|
||||
func (i *uint64Value) Set(s string) error {
|
||||
v, err := strconv.ParseUint(s, 0, 64)
|
||||
*i = uint64Value(v)
|
||||
return err
|
||||
}
|
||||
|
||||
func (i *uint64Value) Get() interface{} { return uint64(*i) }
|
||||
|
||||
func (i *uint64Value) String() string { return fmt.Sprintf("%v", *i) }
|
||||
|
||||
func (i *uint64Value) SetValue(val interface{}) {
|
||||
*i = uint64Value(val.(uint64))
|
||||
}
|
||||
|
||||
// -- string Value
|
||||
type stringValue string
|
||||
|
||||
func (s *stringValue) Set(val string) error {
|
||||
*s = stringValue(val)
|
||||
return nil
|
||||
}
|
||||
|
||||
func (s *stringValue) Get() interface{} { return string(*s) }
|
||||
|
||||
func (s *stringValue) String() string { return fmt.Sprintf("%s", *s) }
|
||||
|
||||
func (s *stringValue) SetValue(val interface{}) {
|
||||
*s = stringValue(val.(string))
|
||||
}
|
||||
|
||||
// -- float64 Value
|
||||
type float64Value float64
|
||||
|
||||
func (f *float64Value) Set(s string) error {
|
||||
v, err := strconv.ParseFloat(s, 64)
|
||||
*f = float64Value(v)
|
||||
return err
|
||||
}
|
||||
|
||||
func (f *float64Value) Get() interface{} { return float64(*f) }
|
||||
|
||||
func (f *float64Value) String() string { return fmt.Sprintf("%v", *f) }
|
||||
|
||||
func (f *float64Value) SetValue(val interface{}) {
|
||||
*f = float64Value(val.(float64))
|
||||
}
|
||||
|
||||
// Duration is a custom type suitable for parsing duration values.
|
||||
// It supports `time.ParseDuration`-compatible values and suffix-less digits; in
|
||||
// the latter case, seconds are assumed.
|
||||
type Duration time.Duration
|
||||
|
||||
// Set sets the duration from the given string value.
|
||||
func (d *Duration) Set(s string) error {
|
||||
if v, err := strconv.Atoi(s); err == nil {
|
||||
*d = Duration(time.Duration(v) * time.Second)
|
||||
return nil
|
||||
}
|
||||
|
||||
v, err := time.ParseDuration(s)
|
||||
*d = Duration(v)
|
||||
return err
|
||||
}
|
||||
|
||||
// Get returns the duration value.
|
||||
func (d *Duration) Get() interface{} { return time.Duration(*d) }
|
||||
|
||||
// String returns a string representation of the duration value.
|
||||
func (d *Duration) String() string { return (*time.Duration)(d).String() }
|
||||
|
||||
// SetValue sets the duration from the given Duration-asserted value.
|
||||
func (d *Duration) SetValue(val interface{}) {
|
||||
*d = Duration(val.(Duration))
|
||||
}
|
||||
|
||||
// UnmarshalText deserializes the given text into a duration value.
|
||||
// It is meant to support TOML decoding of durations.
|
||||
func (d *Duration) UnmarshalText(text []byte) error {
|
||||
return d.Set(string(text))
|
||||
}
|
||||
|
||||
// -- time.Time Value
|
||||
type timeValue time.Time
|
||||
|
||||
func (t *timeValue) Set(s string) error {
|
||||
v, err := time.Parse(time.RFC3339, s)
|
||||
*t = timeValue(v)
|
||||
return err
|
||||
}
|
||||
|
||||
func (t *timeValue) Get() interface{} { return time.Time(*t) }
|
||||
|
||||
func (t *timeValue) String() string { return (*time.Time)(t).String() }
|
||||
|
||||
func (t *timeValue) SetValue(val interface{}) {
|
||||
*t = timeValue(val.(time.Time))
|
||||
}
|
||||
|
||||
//SliceStrings parse slice of strings
|
||||
type SliceStrings []string
|
||||
|
||||
//Set adds strings elem into the the parser
|
||||
//it splits str on , and ;
|
||||
func (s *SliceStrings) Set(str string) error {
|
||||
fargs := func(c rune) bool {
|
||||
return c == ',' || c == ';'
|
||||
}
|
||||
// get function
|
||||
slice := strings.FieldsFunc(str, fargs)
|
||||
*s = append(*s, slice...)
|
||||
return nil
|
||||
}
|
||||
|
||||
//Get []string
|
||||
func (s *SliceStrings) Get() interface{} { return []string(*s) }
|
||||
|
||||
//String return slice in a string
|
||||
func (s *SliceStrings) String() string { return fmt.Sprintf("%v", *s) }
|
||||
|
||||
//SetValue sets []string into the parser
|
||||
func (s *SliceStrings) SetValue(val interface{}) {
|
||||
*s = SliceStrings(val.([]string))
|
||||
}
|
||||
47
vendor/github.com/containous/staert/kv.go
generated
vendored
47
vendor/github.com/containous/staert/kv.go
generated
vendored
@@ -1,10 +1,14 @@
|
||||
package staert
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"compress/gzip"
|
||||
"encoding"
|
||||
"encoding/base64"
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"reflect"
|
||||
"sort"
|
||||
"strconv"
|
||||
@@ -155,16 +159,32 @@ func decodeHook(fromType reflect.Type, toType reflect.Type, data interface{}) (i
|
||||
|
||||
return dataOutput, nil
|
||||
} else if fromType.Kind() == reflect.String {
|
||||
b, err := base64.StdEncoding.DecodeString(data.(string))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return b, nil
|
||||
return readCompressedData(data.(string), gzipReader, base64Reader)
|
||||
}
|
||||
}
|
||||
return data, nil
|
||||
}
|
||||
|
||||
func readCompressedData(data string, fs ...func(io.Reader) (io.Reader, error)) ([]byte, error) {
|
||||
var err error
|
||||
for _, f := range fs {
|
||||
var reader io.Reader
|
||||
reader, err = f(bytes.NewBufferString(data))
|
||||
if err == nil {
|
||||
return ioutil.ReadAll(reader)
|
||||
}
|
||||
}
|
||||
return nil, err
|
||||
}
|
||||
|
||||
func base64Reader(r io.Reader) (io.Reader, error) {
|
||||
return base64.NewDecoder(base64.StdEncoding, r), nil
|
||||
}
|
||||
|
||||
func gzipReader(r io.Reader) (io.Reader, error) {
|
||||
return gzip.NewReader(r)
|
||||
}
|
||||
|
||||
// StoreConfig stores the config into the KV Store
|
||||
func (kv *KvSource) StoreConfig(config interface{}) error {
|
||||
kvMap := map[string]string{}
|
||||
@@ -263,7 +283,11 @@ func collateKvRecursive(objValue reflect.Value, kv map[string]string, key string
|
||||
case reflect.Array, reflect.Slice:
|
||||
// Byte slices get special treatment
|
||||
if objValue.Type().Elem().Kind() == reflect.Uint8 {
|
||||
kv[name] = base64.StdEncoding.EncodeToString(objValue.Bytes())
|
||||
compressedData, err := writeCompressedData(objValue.Bytes())
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
kv[name] = compressedData
|
||||
} else {
|
||||
for i := 0; i < objValue.Len(); i++ {
|
||||
name = key + "/" + strconv.Itoa(i)
|
||||
@@ -286,6 +310,17 @@ func collateKvRecursive(objValue reflect.Value, kv map[string]string, key string
|
||||
return nil
|
||||
}
|
||||
|
||||
func writeCompressedData(data []byte) (string, error) {
|
||||
var buffer bytes.Buffer
|
||||
gzipWriter := gzip.NewWriter(&buffer)
|
||||
_, err := gzipWriter.Write(data)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
gzipWriter.Close()
|
||||
return buffer.String(), nil
|
||||
}
|
||||
|
||||
// ListRecursive lists all key value children under key
|
||||
func (kv *KvSource) ListRecursive(key string, pairs map[string][]byte) error {
|
||||
pairsN1, err := kv.List(key, nil)
|
||||
|
||||
16
vendor/github.com/coreos/etcd/auth/doc.go
generated
vendored
16
vendor/github.com/coreos/etcd/auth/doc.go
generated
vendored
@@ -1,16 +0,0 @@
|
||||
// Copyright 2016 The etcd Authors
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
// Package auth provides client role authentication for accessing keys in etcd.
|
||||
package auth
|
||||
137
vendor/github.com/coreos/etcd/auth/jwt.go
generated
vendored
137
vendor/github.com/coreos/etcd/auth/jwt.go
generated
vendored
@@ -1,137 +0,0 @@
|
||||
// Copyright 2017 The etcd Authors
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package auth
|
||||
|
||||
import (
|
||||
"crypto/rsa"
|
||||
"io/ioutil"
|
||||
|
||||
jwt "github.com/dgrijalva/jwt-go"
|
||||
"golang.org/x/net/context"
|
||||
)
|
||||
|
||||
type tokenJWT struct {
|
||||
signMethod string
|
||||
signKey *rsa.PrivateKey
|
||||
verifyKey *rsa.PublicKey
|
||||
}
|
||||
|
||||
func (t *tokenJWT) enable() {}
|
||||
func (t *tokenJWT) disable() {}
|
||||
func (t *tokenJWT) invalidateUser(string) {}
|
||||
func (t *tokenJWT) genTokenPrefix() (string, error) { return "", nil }
|
||||
|
||||
func (t *tokenJWT) info(ctx context.Context, token string, rev uint64) (*AuthInfo, bool) {
|
||||
// rev isn't used in JWT, it is only used in simple token
|
||||
var (
|
||||
username string
|
||||
revision uint64
|
||||
)
|
||||
|
||||
parsed, err := jwt.Parse(token, func(token *jwt.Token) (interface{}, error) {
|
||||
return t.verifyKey, nil
|
||||
})
|
||||
|
||||
switch err.(type) {
|
||||
case nil:
|
||||
if !parsed.Valid {
|
||||
plog.Warningf("invalid jwt token: %s", token)
|
||||
return nil, false
|
||||
}
|
||||
|
||||
claims := parsed.Claims.(jwt.MapClaims)
|
||||
|
||||
username = claims["username"].(string)
|
||||
revision = uint64(claims["revision"].(float64))
|
||||
default:
|
||||
plog.Warningf("failed to parse jwt token: %s", err)
|
||||
return nil, false
|
||||
}
|
||||
|
||||
return &AuthInfo{Username: username, Revision: revision}, true
|
||||
}
|
||||
|
||||
func (t *tokenJWT) assign(ctx context.Context, username string, revision uint64) (string, error) {
|
||||
// Future work: let a jwt token include permission information would be useful for
|
||||
// permission checking in proxy side.
|
||||
tk := jwt.NewWithClaims(jwt.GetSigningMethod(t.signMethod),
|
||||
jwt.MapClaims{
|
||||
"username": username,
|
||||
"revision": revision,
|
||||
})
|
||||
|
||||
token, err := tk.SignedString(t.signKey)
|
||||
if err != nil {
|
||||
plog.Debugf("failed to sign jwt token: %s", err)
|
||||
return "", err
|
||||
}
|
||||
|
||||
plog.Debugf("jwt token: %s", token)
|
||||
|
||||
return token, err
|
||||
}
|
||||
|
||||
func prepareOpts(opts map[string]string) (jwtSignMethod, jwtPubKeyPath, jwtPrivKeyPath string, err error) {
|
||||
for k, v := range opts {
|
||||
switch k {
|
||||
case "sign-method":
|
||||
jwtSignMethod = v
|
||||
case "pub-key":
|
||||
jwtPubKeyPath = v
|
||||
case "priv-key":
|
||||
jwtPrivKeyPath = v
|
||||
default:
|
||||
plog.Errorf("unknown token specific option: %s", k)
|
||||
return "", "", "", ErrInvalidAuthOpts
|
||||
}
|
||||
}
|
||||
|
||||
return jwtSignMethod, jwtPubKeyPath, jwtPrivKeyPath, nil
|
||||
}
|
||||
|
||||
func newTokenProviderJWT(opts map[string]string) (*tokenJWT, error) {
|
||||
jwtSignMethod, jwtPubKeyPath, jwtPrivKeyPath, err := prepareOpts(opts)
|
||||
if err != nil {
|
||||
return nil, ErrInvalidAuthOpts
|
||||
}
|
||||
|
||||
t := &tokenJWT{}
|
||||
|
||||
t.signMethod = jwtSignMethod
|
||||
|
||||
verifyBytes, err := ioutil.ReadFile(jwtPubKeyPath)
|
||||
if err != nil {
|
||||
plog.Errorf("failed to read public key (%s) for jwt: %s", jwtPubKeyPath, err)
|
||||
return nil, err
|
||||
}
|
||||
t.verifyKey, err = jwt.ParseRSAPublicKeyFromPEM(verifyBytes)
|
||||
if err != nil {
|
||||
plog.Errorf("failed to parse public key (%s): %s", jwtPubKeyPath, err)
|
||||
return nil, err
|
||||
}
|
||||
|
||||
signBytes, err := ioutil.ReadFile(jwtPrivKeyPath)
|
||||
if err != nil {
|
||||
plog.Errorf("failed to read private key (%s) for jwt: %s", jwtPrivKeyPath, err)
|
||||
return nil, err
|
||||
}
|
||||
t.signKey, err = jwt.ParseRSAPrivateKeyFromPEM(signBytes)
|
||||
if err != nil {
|
||||
plog.Errorf("failed to parse private key (%s): %s", jwtPrivKeyPath, err)
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return t, nil
|
||||
}
|
||||
133
vendor/github.com/coreos/etcd/auth/range_perm_cache.go
generated
vendored
133
vendor/github.com/coreos/etcd/auth/range_perm_cache.go
generated
vendored
@@ -1,133 +0,0 @@
|
||||
// Copyright 2016 The etcd Authors
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package auth
|
||||
|
||||
import (
|
||||
"github.com/coreos/etcd/auth/authpb"
|
||||
"github.com/coreos/etcd/mvcc/backend"
|
||||
"github.com/coreos/etcd/pkg/adt"
|
||||
)
|
||||
|
||||
func getMergedPerms(tx backend.BatchTx, userName string) *unifiedRangePermissions {
|
||||
user := getUser(tx, userName)
|
||||
if user == nil {
|
||||
plog.Errorf("invalid user name %s", userName)
|
||||
return nil
|
||||
}
|
||||
|
||||
readPerms := &adt.IntervalTree{}
|
||||
writePerms := &adt.IntervalTree{}
|
||||
|
||||
for _, roleName := range user.Roles {
|
||||
role := getRole(tx, roleName)
|
||||
if role == nil {
|
||||
continue
|
||||
}
|
||||
|
||||
for _, perm := range role.KeyPermission {
|
||||
var ivl adt.Interval
|
||||
var rangeEnd []byte
|
||||
|
||||
if len(perm.RangeEnd) != 1 || perm.RangeEnd[0] != 0 {
|
||||
rangeEnd = perm.RangeEnd
|
||||
}
|
||||
|
||||
if len(perm.RangeEnd) != 0 {
|
||||
ivl = adt.NewBytesAffineInterval(perm.Key, rangeEnd)
|
||||
} else {
|
||||
ivl = adt.NewBytesAffinePoint(perm.Key)
|
||||
}
|
||||
|
||||
switch perm.PermType {
|
||||
case authpb.READWRITE:
|
||||
readPerms.Insert(ivl, struct{}{})
|
||||
writePerms.Insert(ivl, struct{}{})
|
||||
|
||||
case authpb.READ:
|
||||
readPerms.Insert(ivl, struct{}{})
|
||||
|
||||
case authpb.WRITE:
|
||||
writePerms.Insert(ivl, struct{}{})
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return &unifiedRangePermissions{
|
||||
readPerms: readPerms,
|
||||
writePerms: writePerms,
|
||||
}
|
||||
}
|
||||
|
||||
func checkKeyInterval(cachedPerms *unifiedRangePermissions, key, rangeEnd []byte, permtyp authpb.Permission_Type) bool {
|
||||
if len(rangeEnd) == 1 && rangeEnd[0] == 0 {
|
||||
rangeEnd = nil
|
||||
}
|
||||
|
||||
ivl := adt.NewBytesAffineInterval(key, rangeEnd)
|
||||
switch permtyp {
|
||||
case authpb.READ:
|
||||
return cachedPerms.readPerms.Contains(ivl)
|
||||
case authpb.WRITE:
|
||||
return cachedPerms.writePerms.Contains(ivl)
|
||||
default:
|
||||
plog.Panicf("unknown auth type: %v", permtyp)
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
func checkKeyPoint(cachedPerms *unifiedRangePermissions, key []byte, permtyp authpb.Permission_Type) bool {
|
||||
pt := adt.NewBytesAffinePoint(key)
|
||||
switch permtyp {
|
||||
case authpb.READ:
|
||||
return cachedPerms.readPerms.Intersects(pt)
|
||||
case authpb.WRITE:
|
||||
return cachedPerms.writePerms.Intersects(pt)
|
||||
default:
|
||||
plog.Panicf("unknown auth type: %v", permtyp)
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
func (as *authStore) isRangeOpPermitted(tx backend.BatchTx, userName string, key, rangeEnd []byte, permtyp authpb.Permission_Type) bool {
|
||||
// assumption: tx is Lock()ed
|
||||
_, ok := as.rangePermCache[userName]
|
||||
if !ok {
|
||||
perms := getMergedPerms(tx, userName)
|
||||
if perms == nil {
|
||||
plog.Errorf("failed to create a unified permission of user %s", userName)
|
||||
return false
|
||||
}
|
||||
as.rangePermCache[userName] = perms
|
||||
}
|
||||
|
||||
if len(rangeEnd) == 0 {
|
||||
return checkKeyPoint(as.rangePermCache[userName], key, permtyp)
|
||||
}
|
||||
|
||||
return checkKeyInterval(as.rangePermCache[userName], key, rangeEnd, permtyp)
|
||||
}
|
||||
|
||||
func (as *authStore) clearCachedPerm() {
|
||||
as.rangePermCache = make(map[string]*unifiedRangePermissions)
|
||||
}
|
||||
|
||||
func (as *authStore) invalidateCachedPerm(userName string) {
|
||||
delete(as.rangePermCache, userName)
|
||||
}
|
||||
|
||||
type unifiedRangePermissions struct {
|
||||
readPerms *adt.IntervalTree
|
||||
writePerms *adt.IntervalTree
|
||||
}
|
||||
220
vendor/github.com/coreos/etcd/auth/simple_token.go
generated
vendored
220
vendor/github.com/coreos/etcd/auth/simple_token.go
generated
vendored
@@ -1,220 +0,0 @@
|
||||
// Copyright 2016 The etcd Authors
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package auth
|
||||
|
||||
// CAUTION: This randum number based token mechanism is only for testing purpose.
|
||||
// JWT based mechanism will be added in the near future.
|
||||
|
||||
import (
|
||||
"crypto/rand"
|
||||
"fmt"
|
||||
"math/big"
|
||||
"strconv"
|
||||
"strings"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"golang.org/x/net/context"
|
||||
)
|
||||
|
||||
const (
|
||||
letters = "abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ"
|
||||
defaultSimpleTokenLength = 16
|
||||
)
|
||||
|
||||
// var for testing purposes
|
||||
var (
|
||||
simpleTokenTTL = 5 * time.Minute
|
||||
simpleTokenTTLResolution = 1 * time.Second
|
||||
)
|
||||
|
||||
type simpleTokenTTLKeeper struct {
|
||||
tokens map[string]time.Time
|
||||
donec chan struct{}
|
||||
stopc chan struct{}
|
||||
deleteTokenFunc func(string)
|
||||
mu *sync.Mutex
|
||||
}
|
||||
|
||||
func (tm *simpleTokenTTLKeeper) stop() {
|
||||
select {
|
||||
case tm.stopc <- struct{}{}:
|
||||
case <-tm.donec:
|
||||
}
|
||||
<-tm.donec
|
||||
}
|
||||
|
||||
func (tm *simpleTokenTTLKeeper) addSimpleToken(token string) {
|
||||
tm.tokens[token] = time.Now().Add(simpleTokenTTL)
|
||||
}
|
||||
|
||||
func (tm *simpleTokenTTLKeeper) resetSimpleToken(token string) {
|
||||
if _, ok := tm.tokens[token]; ok {
|
||||
tm.tokens[token] = time.Now().Add(simpleTokenTTL)
|
||||
}
|
||||
}
|
||||
|
||||
func (tm *simpleTokenTTLKeeper) deleteSimpleToken(token string) {
|
||||
delete(tm.tokens, token)
|
||||
}
|
||||
|
||||
func (tm *simpleTokenTTLKeeper) run() {
|
||||
tokenTicker := time.NewTicker(simpleTokenTTLResolution)
|
||||
defer func() {
|
||||
tokenTicker.Stop()
|
||||
close(tm.donec)
|
||||
}()
|
||||
for {
|
||||
select {
|
||||
case <-tokenTicker.C:
|
||||
nowtime := time.Now()
|
||||
tm.mu.Lock()
|
||||
for t, tokenendtime := range tm.tokens {
|
||||
if nowtime.After(tokenendtime) {
|
||||
tm.deleteTokenFunc(t)
|
||||
delete(tm.tokens, t)
|
||||
}
|
||||
}
|
||||
tm.mu.Unlock()
|
||||
case <-tm.stopc:
|
||||
return
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
type tokenSimple struct {
|
||||
indexWaiter func(uint64) <-chan struct{}
|
||||
simpleTokenKeeper *simpleTokenTTLKeeper
|
||||
simpleTokensMu sync.Mutex
|
||||
simpleTokens map[string]string // token -> username
|
||||
}
|
||||
|
||||
func (t *tokenSimple) genTokenPrefix() (string, error) {
|
||||
ret := make([]byte, defaultSimpleTokenLength)
|
||||
|
||||
for i := 0; i < defaultSimpleTokenLength; i++ {
|
||||
bInt, err := rand.Int(rand.Reader, big.NewInt(int64(len(letters))))
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
|
||||
ret[i] = letters[bInt.Int64()]
|
||||
}
|
||||
|
||||
return string(ret), nil
|
||||
}
|
||||
|
||||
func (t *tokenSimple) assignSimpleTokenToUser(username, token string) {
|
||||
t.simpleTokensMu.Lock()
|
||||
_, ok := t.simpleTokens[token]
|
||||
if ok {
|
||||
plog.Panicf("token %s is alredy used", token)
|
||||
}
|
||||
|
||||
t.simpleTokens[token] = username
|
||||
t.simpleTokenKeeper.addSimpleToken(token)
|
||||
t.simpleTokensMu.Unlock()
|
||||
}
|
||||
|
||||
func (t *tokenSimple) invalidateUser(username string) {
|
||||
if t.simpleTokenKeeper == nil {
|
||||
return
|
||||
}
|
||||
t.simpleTokensMu.Lock()
|
||||
for token, name := range t.simpleTokens {
|
||||
if strings.Compare(name, username) == 0 {
|
||||
delete(t.simpleTokens, token)
|
||||
t.simpleTokenKeeper.deleteSimpleToken(token)
|
||||
}
|
||||
}
|
||||
t.simpleTokensMu.Unlock()
|
||||
}
|
||||
|
||||
func (t *tokenSimple) enable() {
|
||||
delf := func(tk string) {
|
||||
if username, ok := t.simpleTokens[tk]; ok {
|
||||
plog.Infof("deleting token %s for user %s", tk, username)
|
||||
delete(t.simpleTokens, tk)
|
||||
}
|
||||
}
|
||||
t.simpleTokenKeeper = &simpleTokenTTLKeeper{
|
||||
tokens: make(map[string]time.Time),
|
||||
donec: make(chan struct{}),
|
||||
stopc: make(chan struct{}),
|
||||
deleteTokenFunc: delf,
|
||||
mu: &t.simpleTokensMu,
|
||||
}
|
||||
go t.simpleTokenKeeper.run()
|
||||
}
|
||||
|
||||
func (t *tokenSimple) disable() {
|
||||
t.simpleTokensMu.Lock()
|
||||
tk := t.simpleTokenKeeper
|
||||
t.simpleTokenKeeper = nil
|
||||
t.simpleTokens = make(map[string]string) // invalidate all tokens
|
||||
t.simpleTokensMu.Unlock()
|
||||
if tk != nil {
|
||||
tk.stop()
|
||||
}
|
||||
}
|
||||
|
||||
func (t *tokenSimple) info(ctx context.Context, token string, revision uint64) (*AuthInfo, bool) {
|
||||
if !t.isValidSimpleToken(ctx, token) {
|
||||
return nil, false
|
||||
}
|
||||
t.simpleTokensMu.Lock()
|
||||
username, ok := t.simpleTokens[token]
|
||||
if ok && t.simpleTokenKeeper != nil {
|
||||
t.simpleTokenKeeper.resetSimpleToken(token)
|
||||
}
|
||||
t.simpleTokensMu.Unlock()
|
||||
return &AuthInfo{Username: username, Revision: revision}, ok
|
||||
}
|
||||
|
||||
func (t *tokenSimple) assign(ctx context.Context, username string, rev uint64) (string, error) {
|
||||
// rev isn't used in simple token, it is only used in JWT
|
||||
index := ctx.Value("index").(uint64)
|
||||
simpleToken := ctx.Value("simpleToken").(string)
|
||||
token := fmt.Sprintf("%s.%d", simpleToken, index)
|
||||
t.assignSimpleTokenToUser(username, token)
|
||||
|
||||
return token, nil
|
||||
}
|
||||
|
||||
func (t *tokenSimple) isValidSimpleToken(ctx context.Context, token string) bool {
|
||||
splitted := strings.Split(token, ".")
|
||||
if len(splitted) != 2 {
|
||||
return false
|
||||
}
|
||||
index, err := strconv.Atoi(splitted[1])
|
||||
if err != nil {
|
||||
return false
|
||||
}
|
||||
|
||||
select {
|
||||
case <-t.indexWaiter(uint64(index)):
|
||||
return true
|
||||
case <-ctx.Done():
|
||||
}
|
||||
|
||||
return false
|
||||
}
|
||||
|
||||
func newTokenProviderSimple(indexWaiter func(uint64) <-chan struct{}) *tokenSimple {
|
||||
return &tokenSimple{
|
||||
simpleTokens: make(map[string]string),
|
||||
indexWaiter: indexWaiter,
|
||||
}
|
||||
}
|
||||
1059
vendor/github.com/coreos/etcd/auth/store.go
generated
vendored
1059
vendor/github.com/coreos/etcd/auth/store.go
generated
vendored
File diff suppressed because it is too large
Load Diff
86
vendor/github.com/coreos/etcd/etcdserver/api/capability.go
generated
vendored
86
vendor/github.com/coreos/etcd/etcdserver/api/capability.go
generated
vendored
@@ -1,86 +0,0 @@
|
||||
// Copyright 2015 The etcd Authors
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package api
|
||||
|
||||
import (
|
||||
"sync"
|
||||
|
||||
"github.com/coreos/etcd/version"
|
||||
"github.com/coreos/go-semver/semver"
|
||||
"github.com/coreos/pkg/capnslog"
|
||||
)
|
||||
|
||||
type Capability string
|
||||
|
||||
const (
|
||||
AuthCapability Capability = "auth"
|
||||
V3rpcCapability Capability = "v3rpc"
|
||||
)
|
||||
|
||||
var (
|
||||
plog = capnslog.NewPackageLogger("github.com/coreos/etcd", "etcdserver/api")
|
||||
|
||||
// capabilityMaps is a static map of version to capability map.
|
||||
capabilityMaps = map[string]map[Capability]bool{
|
||||
"3.0.0": {AuthCapability: true, V3rpcCapability: true},
|
||||
"3.1.0": {AuthCapability: true, V3rpcCapability: true},
|
||||
"3.2.0": {AuthCapability: true, V3rpcCapability: true},
|
||||
}
|
||||
|
||||
enableMapMu sync.RWMutex
|
||||
// enabledMap points to a map in capabilityMaps
|
||||
enabledMap map[Capability]bool
|
||||
|
||||
curVersion *semver.Version
|
||||
)
|
||||
|
||||
func init() {
|
||||
enabledMap = map[Capability]bool{
|
||||
AuthCapability: true,
|
||||
V3rpcCapability: true,
|
||||
}
|
||||
}
|
||||
|
||||
// UpdateCapability updates the enabledMap when the cluster version increases.
|
||||
func UpdateCapability(v *semver.Version) {
|
||||
if v == nil {
|
||||
// if recovered but version was never set by cluster
|
||||
return
|
||||
}
|
||||
enableMapMu.Lock()
|
||||
if curVersion != nil && !curVersion.LessThan(*v) {
|
||||
enableMapMu.Unlock()
|
||||
return
|
||||
}
|
||||
curVersion = v
|
||||
enabledMap = capabilityMaps[curVersion.String()]
|
||||
enableMapMu.Unlock()
|
||||
plog.Infof("enabled capabilities for version %s", version.Cluster(v.String()))
|
||||
}
|
||||
|
||||
func IsCapabilityEnabled(c Capability) bool {
|
||||
enableMapMu.RLock()
|
||||
defer enableMapMu.RUnlock()
|
||||
if enabledMap == nil {
|
||||
return false
|
||||
}
|
||||
return enabledMap[c]
|
||||
}
|
||||
|
||||
func EnableCapability(c Capability) {
|
||||
enableMapMu.Lock()
|
||||
defer enableMapMu.Unlock()
|
||||
enabledMap[c] = true
|
||||
}
|
||||
41
vendor/github.com/coreos/etcd/etcdserver/api/cluster.go
generated
vendored
41
vendor/github.com/coreos/etcd/etcdserver/api/cluster.go
generated
vendored
@@ -1,41 +0,0 @@
|
||||
// Copyright 2016 The etcd Authors
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package api
|
||||
|
||||
import (
|
||||
"github.com/coreos/etcd/etcdserver/membership"
|
||||
"github.com/coreos/etcd/pkg/types"
|
||||
|
||||
"github.com/coreos/go-semver/semver"
|
||||
)
|
||||
|
||||
// Cluster is an interface representing a collection of members in one etcd cluster.
|
||||
type Cluster interface {
|
||||
// ID returns the cluster ID
|
||||
ID() types.ID
|
||||
// ClientURLs returns an aggregate set of all URLs on which this
|
||||
// cluster is listening for client requests
|
||||
ClientURLs() []string
|
||||
// Members returns a slice of members sorted by their ID
|
||||
Members() []*membership.Member
|
||||
// Member retrieves a particular member based on ID, or nil if the
|
||||
// member does not exist in the cluster
|
||||
Member(id types.ID) *membership.Member
|
||||
// IsIDRemoved checks whether the given ID has been removed from this
|
||||
// cluster at some point in the past
|
||||
IsIDRemoved(id types.ID) bool
|
||||
// Version is the cluster-wide minimum major.minor version.
|
||||
Version() *semver.Version
|
||||
}
|
||||
16
vendor/github.com/coreos/etcd/etcdserver/api/doc.go
generated
vendored
16
vendor/github.com/coreos/etcd/etcdserver/api/doc.go
generated
vendored
@@ -1,16 +0,0 @@
|
||||
// Copyright 2016 The etcd Authors
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
// Package api manages the capabilities and features that are exposed to clients by the etcd cluster.
|
||||
package api
|
||||
157
vendor/github.com/coreos/etcd/etcdserver/api/v3rpc/auth.go
generated
vendored
157
vendor/github.com/coreos/etcd/etcdserver/api/v3rpc/auth.go
generated
vendored
@@ -1,157 +0,0 @@
|
||||
// Copyright 2016 The etcd Authors
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package v3rpc
|
||||
|
||||
import (
|
||||
"github.com/coreos/etcd/etcdserver"
|
||||
pb "github.com/coreos/etcd/etcdserver/etcdserverpb"
|
||||
"golang.org/x/net/context"
|
||||
)
|
||||
|
||||
type AuthServer struct {
|
||||
authenticator etcdserver.Authenticator
|
||||
}
|
||||
|
||||
func NewAuthServer(s *etcdserver.EtcdServer) *AuthServer {
|
||||
return &AuthServer{authenticator: s}
|
||||
}
|
||||
|
||||
func (as *AuthServer) AuthEnable(ctx context.Context, r *pb.AuthEnableRequest) (*pb.AuthEnableResponse, error) {
|
||||
resp, err := as.authenticator.AuthEnable(ctx, r)
|
||||
if err != nil {
|
||||
return nil, togRPCError(err)
|
||||
}
|
||||
return resp, nil
|
||||
}
|
||||
|
||||
func (as *AuthServer) AuthDisable(ctx context.Context, r *pb.AuthDisableRequest) (*pb.AuthDisableResponse, error) {
|
||||
resp, err := as.authenticator.AuthDisable(ctx, r)
|
||||
if err != nil {
|
||||
return nil, togRPCError(err)
|
||||
}
|
||||
return resp, nil
|
||||
}
|
||||
|
||||
func (as *AuthServer) Authenticate(ctx context.Context, r *pb.AuthenticateRequest) (*pb.AuthenticateResponse, error) {
|
||||
resp, err := as.authenticator.Authenticate(ctx, r)
|
||||
if err != nil {
|
||||
return nil, togRPCError(err)
|
||||
}
|
||||
return resp, nil
|
||||
}
|
||||
|
||||
func (as *AuthServer) RoleAdd(ctx context.Context, r *pb.AuthRoleAddRequest) (*pb.AuthRoleAddResponse, error) {
|
||||
resp, err := as.authenticator.RoleAdd(ctx, r)
|
||||
if err != nil {
|
||||
return nil, togRPCError(err)
|
||||
}
|
||||
return resp, nil
|
||||
}
|
||||
|
||||
func (as *AuthServer) RoleDelete(ctx context.Context, r *pb.AuthRoleDeleteRequest) (*pb.AuthRoleDeleteResponse, error) {
|
||||
resp, err := as.authenticator.RoleDelete(ctx, r)
|
||||
if err != nil {
|
||||
return nil, togRPCError(err)
|
||||
}
|
||||
return resp, nil
|
||||
}
|
||||
|
||||
func (as *AuthServer) RoleGet(ctx context.Context, r *pb.AuthRoleGetRequest) (*pb.AuthRoleGetResponse, error) {
|
||||
resp, err := as.authenticator.RoleGet(ctx, r)
|
||||
if err != nil {
|
||||
return nil, togRPCError(err)
|
||||
}
|
||||
return resp, nil
|
||||
}
|
||||
|
||||
func (as *AuthServer) RoleList(ctx context.Context, r *pb.AuthRoleListRequest) (*pb.AuthRoleListResponse, error) {
|
||||
resp, err := as.authenticator.RoleList(ctx, r)
|
||||
if err != nil {
|
||||
return nil, togRPCError(err)
|
||||
}
|
||||
return resp, nil
|
||||
}
|
||||
|
||||
func (as *AuthServer) RoleRevokePermission(ctx context.Context, r *pb.AuthRoleRevokePermissionRequest) (*pb.AuthRoleRevokePermissionResponse, error) {
|
||||
resp, err := as.authenticator.RoleRevokePermission(ctx, r)
|
||||
if err != nil {
|
||||
return nil, togRPCError(err)
|
||||
}
|
||||
return resp, nil
|
||||
}
|
||||
|
||||
func (as *AuthServer) RoleGrantPermission(ctx context.Context, r *pb.AuthRoleGrantPermissionRequest) (*pb.AuthRoleGrantPermissionResponse, error) {
|
||||
resp, err := as.authenticator.RoleGrantPermission(ctx, r)
|
||||
if err != nil {
|
||||
return nil, togRPCError(err)
|
||||
}
|
||||
return resp, nil
|
||||
}
|
||||
|
||||
func (as *AuthServer) UserAdd(ctx context.Context, r *pb.AuthUserAddRequest) (*pb.AuthUserAddResponse, error) {
|
||||
resp, err := as.authenticator.UserAdd(ctx, r)
|
||||
if err != nil {
|
||||
return nil, togRPCError(err)
|
||||
}
|
||||
return resp, nil
|
||||
}
|
||||
|
||||
func (as *AuthServer) UserDelete(ctx context.Context, r *pb.AuthUserDeleteRequest) (*pb.AuthUserDeleteResponse, error) {
|
||||
resp, err := as.authenticator.UserDelete(ctx, r)
|
||||
if err != nil {
|
||||
return nil, togRPCError(err)
|
||||
}
|
||||
return resp, nil
|
||||
}
|
||||
|
||||
func (as *AuthServer) UserGet(ctx context.Context, r *pb.AuthUserGetRequest) (*pb.AuthUserGetResponse, error) {
|
||||
resp, err := as.authenticator.UserGet(ctx, r)
|
||||
if err != nil {
|
||||
return nil, togRPCError(err)
|
||||
}
|
||||
return resp, nil
|
||||
}
|
||||
|
||||
func (as *AuthServer) UserList(ctx context.Context, r *pb.AuthUserListRequest) (*pb.AuthUserListResponse, error) {
|
||||
resp, err := as.authenticator.UserList(ctx, r)
|
||||
if err != nil {
|
||||
return nil, togRPCError(err)
|
||||
}
|
||||
return resp, nil
|
||||
}
|
||||
|
||||
func (as *AuthServer) UserGrantRole(ctx context.Context, r *pb.AuthUserGrantRoleRequest) (*pb.AuthUserGrantRoleResponse, error) {
|
||||
resp, err := as.authenticator.UserGrantRole(ctx, r)
|
||||
if err != nil {
|
||||
return nil, togRPCError(err)
|
||||
}
|
||||
return resp, nil
|
||||
}
|
||||
|
||||
func (as *AuthServer) UserRevokeRole(ctx context.Context, r *pb.AuthUserRevokeRoleRequest) (*pb.AuthUserRevokeRoleResponse, error) {
|
||||
resp, err := as.authenticator.UserRevokeRole(ctx, r)
|
||||
if err != nil {
|
||||
return nil, togRPCError(err)
|
||||
}
|
||||
return resp, nil
|
||||
}
|
||||
|
||||
func (as *AuthServer) UserChangePassword(ctx context.Context, r *pb.AuthUserChangePasswordRequest) (*pb.AuthUserChangePasswordResponse, error) {
|
||||
resp, err := as.authenticator.UserChangePassword(ctx, r)
|
||||
if err != nil {
|
||||
return nil, togRPCError(err)
|
||||
}
|
||||
return resp, nil
|
||||
}
|
||||
34
vendor/github.com/coreos/etcd/etcdserver/api/v3rpc/codec.go
generated
vendored
34
vendor/github.com/coreos/etcd/etcdserver/api/v3rpc/codec.go
generated
vendored
@@ -1,34 +0,0 @@
|
||||
// Copyright 2016 The etcd Authors
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package v3rpc
|
||||
|
||||
import "github.com/gogo/protobuf/proto"
|
||||
|
||||
type codec struct{}
|
||||
|
||||
func (c *codec) Marshal(v interface{}) ([]byte, error) {
|
||||
b, err := proto.Marshal(v.(proto.Message))
|
||||
sentBytes.Add(float64(len(b)))
|
||||
return b, err
|
||||
}
|
||||
|
||||
func (c *codec) Unmarshal(data []byte, v interface{}) error {
|
||||
receivedBytes.Add(float64(len(data)))
|
||||
return proto.Unmarshal(data, v.(proto.Message))
|
||||
}
|
||||
|
||||
func (c *codec) String() string {
|
||||
return "proto"
|
||||
}
|
||||
53
vendor/github.com/coreos/etcd/etcdserver/api/v3rpc/grpc.go
generated
vendored
53
vendor/github.com/coreos/etcd/etcdserver/api/v3rpc/grpc.go
generated
vendored
@@ -1,53 +0,0 @@
|
||||
// Copyright 2016 The etcd Authors
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package v3rpc
|
||||
|
||||
import (
|
||||
"crypto/tls"
|
||||
"math"
|
||||
|
||||
"github.com/coreos/etcd/etcdserver"
|
||||
pb "github.com/coreos/etcd/etcdserver/etcdserverpb"
|
||||
"google.golang.org/grpc"
|
||||
"google.golang.org/grpc/credentials"
|
||||
"google.golang.org/grpc/grpclog"
|
||||
)
|
||||
|
||||
const maxStreams = math.MaxUint32
|
||||
|
||||
func init() {
|
||||
grpclog.SetLogger(plog)
|
||||
}
|
||||
|
||||
func Server(s *etcdserver.EtcdServer, tls *tls.Config) *grpc.Server {
|
||||
var opts []grpc.ServerOption
|
||||
opts = append(opts, grpc.CustomCodec(&codec{}))
|
||||
if tls != nil {
|
||||
opts = append(opts, grpc.Creds(credentials.NewTLS(tls)))
|
||||
}
|
||||
opts = append(opts, grpc.UnaryInterceptor(newUnaryInterceptor(s)))
|
||||
opts = append(opts, grpc.StreamInterceptor(newStreamInterceptor(s)))
|
||||
opts = append(opts, grpc.MaxConcurrentStreams(maxStreams))
|
||||
grpcServer := grpc.NewServer(opts...)
|
||||
|
||||
pb.RegisterKVServer(grpcServer, NewQuotaKVServer(s))
|
||||
pb.RegisterWatchServer(grpcServer, NewWatchServer(s))
|
||||
pb.RegisterLeaseServer(grpcServer, NewQuotaLeaseServer(s))
|
||||
pb.RegisterClusterServer(grpcServer, NewClusterServer(s))
|
||||
pb.RegisterAuthServer(grpcServer, NewAuthServer(s))
|
||||
pb.RegisterMaintenanceServer(grpcServer, NewMaintenanceServer(s))
|
||||
|
||||
return grpcServer
|
||||
}
|
||||
46
vendor/github.com/coreos/etcd/etcdserver/api/v3rpc/header.go
generated
vendored
46
vendor/github.com/coreos/etcd/etcdserver/api/v3rpc/header.go
generated
vendored
@@ -1,46 +0,0 @@
|
||||
// Copyright 2016 The etcd Authors
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package v3rpc
|
||||
|
||||
import (
|
||||
"github.com/coreos/etcd/etcdserver"
|
||||
pb "github.com/coreos/etcd/etcdserver/etcdserverpb"
|
||||
)
|
||||
|
||||
type header struct {
|
||||
clusterID int64
|
||||
memberID int64
|
||||
raftTimer etcdserver.RaftTimer
|
||||
rev func() int64
|
||||
}
|
||||
|
||||
func newHeader(s *etcdserver.EtcdServer) header {
|
||||
return header{
|
||||
clusterID: int64(s.Cluster().ID()),
|
||||
memberID: int64(s.ID()),
|
||||
raftTimer: s,
|
||||
rev: func() int64 { return s.KV().Rev() },
|
||||
}
|
||||
}
|
||||
|
||||
// fill populates pb.ResponseHeader using etcdserver information
|
||||
func (h *header) fill(rh *pb.ResponseHeader) {
|
||||
rh.ClusterId = uint64(h.clusterID)
|
||||
rh.MemberId = uint64(h.memberID)
|
||||
rh.RaftTerm = h.raftTimer.Term()
|
||||
if rh.Revision == 0 {
|
||||
rh.Revision = h.rev()
|
||||
}
|
||||
}
|
||||
144
vendor/github.com/coreos/etcd/etcdserver/api/v3rpc/interceptor.go
generated
vendored
144
vendor/github.com/coreos/etcd/etcdserver/api/v3rpc/interceptor.go
generated
vendored
@@ -1,144 +0,0 @@
|
||||
// Copyright 2016 The etcd Authors
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package v3rpc
|
||||
|
||||
import (
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/coreos/etcd/etcdserver"
|
||||
"github.com/coreos/etcd/etcdserver/api"
|
||||
"github.com/coreos/etcd/etcdserver/api/v3rpc/rpctypes"
|
||||
"github.com/coreos/etcd/pkg/types"
|
||||
"github.com/coreos/etcd/raft"
|
||||
|
||||
prometheus "github.com/grpc-ecosystem/go-grpc-prometheus"
|
||||
"golang.org/x/net/context"
|
||||
"google.golang.org/grpc"
|
||||
"google.golang.org/grpc/metadata"
|
||||
)
|
||||
|
||||
const (
|
||||
maxNoLeaderCnt = 3
|
||||
)
|
||||
|
||||
type streamsMap struct {
|
||||
mu sync.Mutex
|
||||
streams map[grpc.ServerStream]struct{}
|
||||
}
|
||||
|
||||
func newUnaryInterceptor(s *etcdserver.EtcdServer) grpc.UnaryServerInterceptor {
|
||||
return func(ctx context.Context, req interface{}, info *grpc.UnaryServerInfo, handler grpc.UnaryHandler) (resp interface{}, err error) {
|
||||
if !api.IsCapabilityEnabled(api.V3rpcCapability) {
|
||||
return nil, rpctypes.ErrGRPCNotCapable
|
||||
}
|
||||
|
||||
md, ok := metadata.FromContext(ctx)
|
||||
if ok {
|
||||
if ks := md[rpctypes.MetadataRequireLeaderKey]; len(ks) > 0 && ks[0] == rpctypes.MetadataHasLeader {
|
||||
if s.Leader() == types.ID(raft.None) {
|
||||
return nil, rpctypes.ErrGRPCNoLeader
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return prometheus.UnaryServerInterceptor(ctx, req, info, handler)
|
||||
}
|
||||
}
|
||||
|
||||
func newStreamInterceptor(s *etcdserver.EtcdServer) grpc.StreamServerInterceptor {
|
||||
smap := monitorLeader(s)
|
||||
|
||||
return func(srv interface{}, ss grpc.ServerStream, info *grpc.StreamServerInfo, handler grpc.StreamHandler) error {
|
||||
if !api.IsCapabilityEnabled(api.V3rpcCapability) {
|
||||
return rpctypes.ErrGRPCNotCapable
|
||||
}
|
||||
|
||||
md, ok := metadata.FromContext(ss.Context())
|
||||
if ok {
|
||||
if ks := md[rpctypes.MetadataRequireLeaderKey]; len(ks) > 0 && ks[0] == rpctypes.MetadataHasLeader {
|
||||
if s.Leader() == types.ID(raft.None) {
|
||||
return rpctypes.ErrGRPCNoLeader
|
||||
}
|
||||
|
||||
cctx, cancel := context.WithCancel(ss.Context())
|
||||
ss = serverStreamWithCtx{ctx: cctx, cancel: &cancel, ServerStream: ss}
|
||||
|
||||
smap.mu.Lock()
|
||||
smap.streams[ss] = struct{}{}
|
||||
smap.mu.Unlock()
|
||||
|
||||
defer func() {
|
||||
smap.mu.Lock()
|
||||
delete(smap.streams, ss)
|
||||
smap.mu.Unlock()
|
||||
cancel()
|
||||
}()
|
||||
|
||||
}
|
||||
}
|
||||
|
||||
return prometheus.StreamServerInterceptor(srv, ss, info, handler)
|
||||
}
|
||||
}
|
||||
|
||||
type serverStreamWithCtx struct {
|
||||
grpc.ServerStream
|
||||
ctx context.Context
|
||||
cancel *context.CancelFunc
|
||||
}
|
||||
|
||||
func (ssc serverStreamWithCtx) Context() context.Context { return ssc.ctx }
|
||||
|
||||
func monitorLeader(s *etcdserver.EtcdServer) *streamsMap {
|
||||
smap := &streamsMap{
|
||||
streams: make(map[grpc.ServerStream]struct{}),
|
||||
}
|
||||
|
||||
go func() {
|
||||
election := time.Duration(s.Cfg.TickMs) * time.Duration(s.Cfg.ElectionTicks) * time.Millisecond
|
||||
noLeaderCnt := 0
|
||||
|
||||
for {
|
||||
select {
|
||||
case <-s.StopNotify():
|
||||
return
|
||||
case <-time.After(election):
|
||||
if s.Leader() == types.ID(raft.None) {
|
||||
noLeaderCnt++
|
||||
} else {
|
||||
noLeaderCnt = 0
|
||||
}
|
||||
|
||||
// We are more conservative on canceling existing streams. Reconnecting streams
|
||||
// cost much more than just rejecting new requests. So we wait until the member
|
||||
// cannot find a leader for maxNoLeaderCnt election timeouts to cancel existing streams.
|
||||
if noLeaderCnt >= maxNoLeaderCnt {
|
||||
smap.mu.Lock()
|
||||
for ss := range smap.streams {
|
||||
if ssWithCtx, ok := ss.(serverStreamWithCtx); ok {
|
||||
(*ssWithCtx.cancel)()
|
||||
<-ss.Context().Done()
|
||||
}
|
||||
}
|
||||
smap.streams = make(map[grpc.ServerStream]struct{})
|
||||
smap.mu.Unlock()
|
||||
}
|
||||
}
|
||||
}
|
||||
}()
|
||||
|
||||
return smap
|
||||
}
|
||||
259
vendor/github.com/coreos/etcd/etcdserver/api/v3rpc/key.go
generated
vendored
259
vendor/github.com/coreos/etcd/etcdserver/api/v3rpc/key.go
generated
vendored
@@ -1,259 +0,0 @@
|
||||
// Copyright 2015 The etcd Authors
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
// Package v3rpc implements etcd v3 RPC system based on gRPC.
|
||||
package v3rpc
|
||||
|
||||
import (
|
||||
"sort"
|
||||
|
||||
"github.com/coreos/etcd/etcdserver"
|
||||
"github.com/coreos/etcd/etcdserver/api/v3rpc/rpctypes"
|
||||
pb "github.com/coreos/etcd/etcdserver/etcdserverpb"
|
||||
"github.com/coreos/pkg/capnslog"
|
||||
"golang.org/x/net/context"
|
||||
)
|
||||
|
||||
var (
|
||||
plog = capnslog.NewPackageLogger("github.com/coreos/etcd", "etcdserver/api/v3rpc")
|
||||
|
||||
// Max operations per txn list. For example, Txn.Success can have at most 128 operations,
|
||||
// and Txn.Failure can have at most 128 operations.
|
||||
MaxOpsPerTxn = 128
|
||||
)
|
||||
|
||||
type kvServer struct {
|
||||
hdr header
|
||||
kv etcdserver.RaftKV
|
||||
}
|
||||
|
||||
func NewKVServer(s *etcdserver.EtcdServer) pb.KVServer {
|
||||
return &kvServer{hdr: newHeader(s), kv: s}
|
||||
}
|
||||
|
||||
func (s *kvServer) Range(ctx context.Context, r *pb.RangeRequest) (*pb.RangeResponse, error) {
|
||||
if err := checkRangeRequest(r); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
resp, err := s.kv.Range(ctx, r)
|
||||
if err != nil {
|
||||
return nil, togRPCError(err)
|
||||
}
|
||||
|
||||
if resp.Header == nil {
|
||||
plog.Panic("unexpected nil resp.Header")
|
||||
}
|
||||
s.hdr.fill(resp.Header)
|
||||
return resp, nil
|
||||
}
|
||||
|
||||
func (s *kvServer) Put(ctx context.Context, r *pb.PutRequest) (*pb.PutResponse, error) {
|
||||
if err := checkPutRequest(r); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
resp, err := s.kv.Put(ctx, r)
|
||||
if err != nil {
|
||||
return nil, togRPCError(err)
|
||||
}
|
||||
|
||||
if resp.Header == nil {
|
||||
plog.Panic("unexpected nil resp.Header")
|
||||
}
|
||||
s.hdr.fill(resp.Header)
|
||||
return resp, nil
|
||||
}
|
||||
|
||||
func (s *kvServer) DeleteRange(ctx context.Context, r *pb.DeleteRangeRequest) (*pb.DeleteRangeResponse, error) {
|
||||
if err := checkDeleteRequest(r); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
resp, err := s.kv.DeleteRange(ctx, r)
|
||||
if err != nil {
|
||||
return nil, togRPCError(err)
|
||||
}
|
||||
|
||||
if resp.Header == nil {
|
||||
plog.Panic("unexpected nil resp.Header")
|
||||
}
|
||||
s.hdr.fill(resp.Header)
|
||||
return resp, nil
|
||||
}
|
||||
|
||||
func (s *kvServer) Txn(ctx context.Context, r *pb.TxnRequest) (*pb.TxnResponse, error) {
|
||||
if err := checkTxnRequest(r); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
resp, err := s.kv.Txn(ctx, r)
|
||||
if err != nil {
|
||||
return nil, togRPCError(err)
|
||||
}
|
||||
|
||||
if resp.Header == nil {
|
||||
plog.Panic("unexpected nil resp.Header")
|
||||
}
|
||||
s.hdr.fill(resp.Header)
|
||||
return resp, nil
|
||||
}
|
||||
|
||||
func (s *kvServer) Compact(ctx context.Context, r *pb.CompactionRequest) (*pb.CompactionResponse, error) {
|
||||
resp, err := s.kv.Compact(ctx, r)
|
||||
if err != nil {
|
||||
return nil, togRPCError(err)
|
||||
}
|
||||
|
||||
if resp.Header == nil {
|
||||
plog.Panic("unexpected nil resp.Header")
|
||||
}
|
||||
s.hdr.fill(resp.Header)
|
||||
return resp, nil
|
||||
}
|
||||
|
||||
func checkRangeRequest(r *pb.RangeRequest) error {
|
||||
if len(r.Key) == 0 {
|
||||
return rpctypes.ErrGRPCEmptyKey
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func checkPutRequest(r *pb.PutRequest) error {
|
||||
if len(r.Key) == 0 {
|
||||
return rpctypes.ErrGRPCEmptyKey
|
||||
}
|
||||
if r.IgnoreValue && len(r.Value) != 0 {
|
||||
return rpctypes.ErrGRPCValueProvided
|
||||
}
|
||||
if r.IgnoreLease && r.Lease != 0 {
|
||||
return rpctypes.ErrGRPCLeaseProvided
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func checkDeleteRequest(r *pb.DeleteRangeRequest) error {
|
||||
if len(r.Key) == 0 {
|
||||
return rpctypes.ErrGRPCEmptyKey
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func checkTxnRequest(r *pb.TxnRequest) error {
|
||||
if len(r.Compare) > MaxOpsPerTxn || len(r.Success) > MaxOpsPerTxn || len(r.Failure) > MaxOpsPerTxn {
|
||||
return rpctypes.ErrGRPCTooManyOps
|
||||
}
|
||||
|
||||
for _, c := range r.Compare {
|
||||
if len(c.Key) == 0 {
|
||||
return rpctypes.ErrGRPCEmptyKey
|
||||
}
|
||||
}
|
||||
|
||||
for _, u := range r.Success {
|
||||
if err := checkRequestOp(u); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
if err := checkRequestDupKeys(r.Success); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
for _, u := range r.Failure {
|
||||
if err := checkRequestOp(u); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
return checkRequestDupKeys(r.Failure)
|
||||
}
|
||||
|
||||
// checkRequestDupKeys gives rpctypes.ErrGRPCDuplicateKey if the same key is modified twice
|
||||
func checkRequestDupKeys(reqs []*pb.RequestOp) error {
|
||||
// check put overlap
|
||||
keys := make(map[string]struct{})
|
||||
for _, requ := range reqs {
|
||||
tv, ok := requ.Request.(*pb.RequestOp_RequestPut)
|
||||
if !ok {
|
||||
continue
|
||||
}
|
||||
preq := tv.RequestPut
|
||||
if preq == nil {
|
||||
continue
|
||||
}
|
||||
if _, ok := keys[string(preq.Key)]; ok {
|
||||
return rpctypes.ErrGRPCDuplicateKey
|
||||
}
|
||||
keys[string(preq.Key)] = struct{}{}
|
||||
}
|
||||
|
||||
// no need to check deletes if no puts; delete overlaps are permitted
|
||||
if len(keys) == 0 {
|
||||
return nil
|
||||
}
|
||||
|
||||
// sort keys for range checking
|
||||
sortedKeys := []string{}
|
||||
for k := range keys {
|
||||
sortedKeys = append(sortedKeys, k)
|
||||
}
|
||||
sort.Strings(sortedKeys)
|
||||
|
||||
// check put overlap with deletes
|
||||
for _, requ := range reqs {
|
||||
tv, ok := requ.Request.(*pb.RequestOp_RequestDeleteRange)
|
||||
if !ok {
|
||||
continue
|
||||
}
|
||||
dreq := tv.RequestDeleteRange
|
||||
if dreq == nil {
|
||||
continue
|
||||
}
|
||||
if dreq.RangeEnd == nil {
|
||||
if _, found := keys[string(dreq.Key)]; found {
|
||||
return rpctypes.ErrGRPCDuplicateKey
|
||||
}
|
||||
} else {
|
||||
lo := sort.SearchStrings(sortedKeys, string(dreq.Key))
|
||||
hi := sort.SearchStrings(sortedKeys, string(dreq.RangeEnd))
|
||||
if lo != hi {
|
||||
// element between lo and hi => overlap
|
||||
return rpctypes.ErrGRPCDuplicateKey
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func checkRequestOp(u *pb.RequestOp) error {
|
||||
// TODO: ensure only one of the field is set.
|
||||
switch uv := u.Request.(type) {
|
||||
case *pb.RequestOp_RequestRange:
|
||||
if uv.RequestRange != nil {
|
||||
return checkRangeRequest(uv.RequestRange)
|
||||
}
|
||||
case *pb.RequestOp_RequestPut:
|
||||
if uv.RequestPut != nil {
|
||||
return checkPutRequest(uv.RequestPut)
|
||||
}
|
||||
case *pb.RequestOp_RequestDeleteRange:
|
||||
if uv.RequestDeleteRange != nil {
|
||||
return checkDeleteRequest(uv.RequestDeleteRange)
|
||||
}
|
||||
default:
|
||||
// empty op / nil entry
|
||||
return rpctypes.ErrGRPCKeyNotFound
|
||||
}
|
||||
return nil
|
||||
}
|
||||
123
vendor/github.com/coreos/etcd/etcdserver/api/v3rpc/lease.go
generated
vendored
123
vendor/github.com/coreos/etcd/etcdserver/api/v3rpc/lease.go
generated
vendored
@@ -1,123 +0,0 @@
|
||||
// Copyright 2016 The etcd Authors
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package v3rpc
|
||||
|
||||
import (
|
||||
"io"
|
||||
|
||||
"github.com/coreos/etcd/etcdserver"
|
||||
"github.com/coreos/etcd/etcdserver/api/v3rpc/rpctypes"
|
||||
pb "github.com/coreos/etcd/etcdserver/etcdserverpb"
|
||||
"github.com/coreos/etcd/lease"
|
||||
"golang.org/x/net/context"
|
||||
)
|
||||
|
||||
type LeaseServer struct {
|
||||
hdr header
|
||||
le etcdserver.Lessor
|
||||
}
|
||||
|
||||
func NewLeaseServer(s *etcdserver.EtcdServer) pb.LeaseServer {
|
||||
return &LeaseServer{le: s, hdr: newHeader(s)}
|
||||
}
|
||||
|
||||
func (ls *LeaseServer) LeaseGrant(ctx context.Context, cr *pb.LeaseGrantRequest) (*pb.LeaseGrantResponse, error) {
|
||||
resp, err := ls.le.LeaseGrant(ctx, cr)
|
||||
|
||||
if err != nil {
|
||||
return nil, togRPCError(err)
|
||||
}
|
||||
ls.hdr.fill(resp.Header)
|
||||
return resp, nil
|
||||
}
|
||||
|
||||
func (ls *LeaseServer) LeaseRevoke(ctx context.Context, rr *pb.LeaseRevokeRequest) (*pb.LeaseRevokeResponse, error) {
|
||||
resp, err := ls.le.LeaseRevoke(ctx, rr)
|
||||
if err != nil {
|
||||
return nil, togRPCError(err)
|
||||
}
|
||||
ls.hdr.fill(resp.Header)
|
||||
return resp, nil
|
||||
}
|
||||
|
||||
func (ls *LeaseServer) LeaseTimeToLive(ctx context.Context, rr *pb.LeaseTimeToLiveRequest) (*pb.LeaseTimeToLiveResponse, error) {
|
||||
resp, err := ls.le.LeaseTimeToLive(ctx, rr)
|
||||
if err != nil && err != lease.ErrLeaseNotFound {
|
||||
return nil, togRPCError(err)
|
||||
}
|
||||
if err == lease.ErrLeaseNotFound {
|
||||
resp = &pb.LeaseTimeToLiveResponse{
|
||||
Header: &pb.ResponseHeader{},
|
||||
ID: rr.ID,
|
||||
TTL: -1,
|
||||
}
|
||||
}
|
||||
ls.hdr.fill(resp.Header)
|
||||
return resp, nil
|
||||
}
|
||||
|
||||
func (ls *LeaseServer) LeaseKeepAlive(stream pb.Lease_LeaseKeepAliveServer) (err error) {
|
||||
errc := make(chan error, 1)
|
||||
go func() {
|
||||
errc <- ls.leaseKeepAlive(stream)
|
||||
}()
|
||||
select {
|
||||
case err = <-errc:
|
||||
case <-stream.Context().Done():
|
||||
// the only server-side cancellation is noleader for now.
|
||||
err = stream.Context().Err()
|
||||
if err == context.Canceled {
|
||||
err = rpctypes.ErrGRPCNoLeader
|
||||
}
|
||||
}
|
||||
return err
|
||||
}
|
||||
|
||||
func (ls *LeaseServer) leaseKeepAlive(stream pb.Lease_LeaseKeepAliveServer) error {
|
||||
for {
|
||||
req, err := stream.Recv()
|
||||
if err == io.EOF {
|
||||
return nil
|
||||
}
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Create header before we sent out the renew request.
|
||||
// This can make sure that the revision is strictly smaller or equal to
|
||||
// when the keepalive happened at the local server (when the local server is the leader)
|
||||
// or remote leader.
|
||||
// Without this, a lease might be revoked at rev 3 but client can see the keepalive succeeded
|
||||
// at rev 4.
|
||||
resp := &pb.LeaseKeepAliveResponse{ID: req.ID, Header: &pb.ResponseHeader{}}
|
||||
ls.hdr.fill(resp.Header)
|
||||
|
||||
ttl, err := ls.le.LeaseRenew(stream.Context(), lease.LeaseID(req.ID))
|
||||
if err == lease.ErrLeaseNotFound {
|
||||
err = nil
|
||||
ttl = 0
|
||||
}
|
||||
|
||||
if err != nil {
|
||||
return togRPCError(err)
|
||||
}
|
||||
|
||||
resp.TTL = ttl
|
||||
err = stream.Send(resp)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
}
|
||||
190
vendor/github.com/coreos/etcd/etcdserver/api/v3rpc/maintenance.go
generated
vendored
190
vendor/github.com/coreos/etcd/etcdserver/api/v3rpc/maintenance.go
generated
vendored
@@ -1,190 +0,0 @@
|
||||
// Copyright 2016 The etcd Authors
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package v3rpc
|
||||
|
||||
import (
|
||||
"crypto/sha256"
|
||||
"io"
|
||||
|
||||
"github.com/coreos/etcd/auth"
|
||||
"github.com/coreos/etcd/etcdserver"
|
||||
pb "github.com/coreos/etcd/etcdserver/etcdserverpb"
|
||||
"github.com/coreos/etcd/mvcc"
|
||||
"github.com/coreos/etcd/mvcc/backend"
|
||||
"github.com/coreos/etcd/pkg/types"
|
||||
"github.com/coreos/etcd/version"
|
||||
"golang.org/x/net/context"
|
||||
)
|
||||
|
||||
type KVGetter interface {
|
||||
KV() mvcc.ConsistentWatchableKV
|
||||
}
|
||||
|
||||
type BackendGetter interface {
|
||||
Backend() backend.Backend
|
||||
}
|
||||
|
||||
type Alarmer interface {
|
||||
Alarm(ctx context.Context, ar *pb.AlarmRequest) (*pb.AlarmResponse, error)
|
||||
}
|
||||
|
||||
type RaftStatusGetter interface {
|
||||
Index() uint64
|
||||
Term() uint64
|
||||
Leader() types.ID
|
||||
}
|
||||
|
||||
type AuthGetter interface {
|
||||
AuthInfoFromCtx(ctx context.Context) (*auth.AuthInfo, error)
|
||||
AuthStore() auth.AuthStore
|
||||
}
|
||||
|
||||
type maintenanceServer struct {
|
||||
rg RaftStatusGetter
|
||||
kg KVGetter
|
||||
bg BackendGetter
|
||||
a Alarmer
|
||||
hdr header
|
||||
}
|
||||
|
||||
func NewMaintenanceServer(s *etcdserver.EtcdServer) pb.MaintenanceServer {
|
||||
srv := &maintenanceServer{rg: s, kg: s, bg: s, a: s, hdr: newHeader(s)}
|
||||
return &authMaintenanceServer{srv, s}
|
||||
}
|
||||
|
||||
func (ms *maintenanceServer) Defragment(ctx context.Context, sr *pb.DefragmentRequest) (*pb.DefragmentResponse, error) {
|
||||
plog.Noticef("starting to defragment the storage backend...")
|
||||
err := ms.bg.Backend().Defrag()
|
||||
if err != nil {
|
||||
plog.Errorf("failed to defragment the storage backend (%v)", err)
|
||||
return nil, err
|
||||
}
|
||||
plog.Noticef("finished defragmenting the storage backend")
|
||||
return &pb.DefragmentResponse{}, nil
|
||||
}
|
||||
|
||||
func (ms *maintenanceServer) Snapshot(sr *pb.SnapshotRequest, srv pb.Maintenance_SnapshotServer) error {
|
||||
snap := ms.bg.Backend().Snapshot()
|
||||
pr, pw := io.Pipe()
|
||||
|
||||
defer pr.Close()
|
||||
|
||||
go func() {
|
||||
snap.WriteTo(pw)
|
||||
if err := snap.Close(); err != nil {
|
||||
plog.Errorf("error closing snapshot (%v)", err)
|
||||
}
|
||||
pw.Close()
|
||||
}()
|
||||
|
||||
// send file data
|
||||
h := sha256.New()
|
||||
br := int64(0)
|
||||
buf := make([]byte, 32*1024)
|
||||
sz := snap.Size()
|
||||
for br < sz {
|
||||
n, err := io.ReadFull(pr, buf)
|
||||
if err != nil && err != io.EOF && err != io.ErrUnexpectedEOF {
|
||||
return togRPCError(err)
|
||||
}
|
||||
br += int64(n)
|
||||
resp := &pb.SnapshotResponse{
|
||||
RemainingBytes: uint64(sz - br),
|
||||
Blob: buf[:n],
|
||||
}
|
||||
if err = srv.Send(resp); err != nil {
|
||||
return togRPCError(err)
|
||||
}
|
||||
h.Write(buf[:n])
|
||||
}
|
||||
|
||||
// send sha
|
||||
sha := h.Sum(nil)
|
||||
hresp := &pb.SnapshotResponse{RemainingBytes: 0, Blob: sha}
|
||||
if err := srv.Send(hresp); err != nil {
|
||||
return togRPCError(err)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (ms *maintenanceServer) Hash(ctx context.Context, r *pb.HashRequest) (*pb.HashResponse, error) {
|
||||
h, rev, err := ms.kg.KV().Hash()
|
||||
if err != nil {
|
||||
return nil, togRPCError(err)
|
||||
}
|
||||
resp := &pb.HashResponse{Header: &pb.ResponseHeader{Revision: rev}, Hash: h}
|
||||
ms.hdr.fill(resp.Header)
|
||||
return resp, nil
|
||||
}
|
||||
|
||||
func (ms *maintenanceServer) Alarm(ctx context.Context, ar *pb.AlarmRequest) (*pb.AlarmResponse, error) {
|
||||
return ms.a.Alarm(ctx, ar)
|
||||
}
|
||||
|
||||
func (ms *maintenanceServer) Status(ctx context.Context, ar *pb.StatusRequest) (*pb.StatusResponse, error) {
|
||||
resp := &pb.StatusResponse{
|
||||
Header: &pb.ResponseHeader{Revision: ms.hdr.rev()},
|
||||
Version: version.Version,
|
||||
DbSize: ms.bg.Backend().Size(),
|
||||
Leader: uint64(ms.rg.Leader()),
|
||||
RaftIndex: ms.rg.Index(),
|
||||
RaftTerm: ms.rg.Term(),
|
||||
}
|
||||
ms.hdr.fill(resp.Header)
|
||||
return resp, nil
|
||||
}
|
||||
|
||||
type authMaintenanceServer struct {
|
||||
*maintenanceServer
|
||||
ag AuthGetter
|
||||
}
|
||||
|
||||
func (ams *authMaintenanceServer) isAuthenticated(ctx context.Context) error {
|
||||
authInfo, err := ams.ag.AuthInfoFromCtx(ctx)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return ams.ag.AuthStore().IsAdminPermitted(authInfo)
|
||||
}
|
||||
|
||||
func (ams *authMaintenanceServer) Defragment(ctx context.Context, sr *pb.DefragmentRequest) (*pb.DefragmentResponse, error) {
|
||||
if err := ams.isAuthenticated(ctx); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return ams.maintenanceServer.Defragment(ctx, sr)
|
||||
}
|
||||
|
||||
func (ams *authMaintenanceServer) Snapshot(sr *pb.SnapshotRequest, srv pb.Maintenance_SnapshotServer) error {
|
||||
if err := ams.isAuthenticated(srv.Context()); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return ams.maintenanceServer.Snapshot(sr, srv)
|
||||
}
|
||||
|
||||
func (ams *authMaintenanceServer) Hash(ctx context.Context, r *pb.HashRequest) (*pb.HashResponse, error) {
|
||||
if err := ams.isAuthenticated(ctx); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return ams.maintenanceServer.Hash(ctx, r)
|
||||
}
|
||||
|
||||
func (ams *authMaintenanceServer) Status(ctx context.Context, ar *pb.StatusRequest) (*pb.StatusResponse, error) {
|
||||
return ams.maintenanceServer.Status(ctx, ar)
|
||||
}
|
||||
103
vendor/github.com/coreos/etcd/etcdserver/api/v3rpc/member.go
generated
vendored
103
vendor/github.com/coreos/etcd/etcdserver/api/v3rpc/member.go
generated
vendored
@@ -1,103 +0,0 @@
|
||||
// Copyright 2016 The etcd Authors
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package v3rpc
|
||||
|
||||
import (
|
||||
"time"
|
||||
|
||||
"github.com/coreos/etcd/etcdserver"
|
||||
"github.com/coreos/etcd/etcdserver/api"
|
||||
"github.com/coreos/etcd/etcdserver/api/v3rpc/rpctypes"
|
||||
pb "github.com/coreos/etcd/etcdserver/etcdserverpb"
|
||||
"github.com/coreos/etcd/etcdserver/membership"
|
||||
"github.com/coreos/etcd/pkg/types"
|
||||
"golang.org/x/net/context"
|
||||
)
|
||||
|
||||
type ClusterServer struct {
|
||||
cluster api.Cluster
|
||||
server etcdserver.Server
|
||||
raftTimer etcdserver.RaftTimer
|
||||
}
|
||||
|
||||
func NewClusterServer(s *etcdserver.EtcdServer) *ClusterServer {
|
||||
return &ClusterServer{
|
||||
cluster: s.Cluster(),
|
||||
server: s,
|
||||
raftTimer: s,
|
||||
}
|
||||
}
|
||||
|
||||
func (cs *ClusterServer) MemberAdd(ctx context.Context, r *pb.MemberAddRequest) (*pb.MemberAddResponse, error) {
|
||||
urls, err := types.NewURLs(r.PeerURLs)
|
||||
if err != nil {
|
||||
return nil, rpctypes.ErrGRPCMemberBadURLs
|
||||
}
|
||||
|
||||
now := time.Now()
|
||||
m := membership.NewMember("", urls, "", &now)
|
||||
membs, merr := cs.server.AddMember(ctx, *m)
|
||||
if merr != nil {
|
||||
return nil, togRPCError(merr)
|
||||
}
|
||||
|
||||
return &pb.MemberAddResponse{
|
||||
Header: cs.header(),
|
||||
Member: &pb.Member{ID: uint64(m.ID), PeerURLs: m.PeerURLs},
|
||||
Members: membersToProtoMembers(membs),
|
||||
}, nil
|
||||
}
|
||||
|
||||
func (cs *ClusterServer) MemberRemove(ctx context.Context, r *pb.MemberRemoveRequest) (*pb.MemberRemoveResponse, error) {
|
||||
membs, err := cs.server.RemoveMember(ctx, r.ID)
|
||||
if err != nil {
|
||||
return nil, togRPCError(err)
|
||||
}
|
||||
return &pb.MemberRemoveResponse{Header: cs.header(), Members: membersToProtoMembers(membs)}, nil
|
||||
}
|
||||
|
||||
func (cs *ClusterServer) MemberUpdate(ctx context.Context, r *pb.MemberUpdateRequest) (*pb.MemberUpdateResponse, error) {
|
||||
m := membership.Member{
|
||||
ID: types.ID(r.ID),
|
||||
RaftAttributes: membership.RaftAttributes{PeerURLs: r.PeerURLs},
|
||||
}
|
||||
membs, err := cs.server.UpdateMember(ctx, m)
|
||||
if err != nil {
|
||||
return nil, togRPCError(err)
|
||||
}
|
||||
return &pb.MemberUpdateResponse{Header: cs.header(), Members: membersToProtoMembers(membs)}, nil
|
||||
}
|
||||
|
||||
func (cs *ClusterServer) MemberList(ctx context.Context, r *pb.MemberListRequest) (*pb.MemberListResponse, error) {
|
||||
membs := membersToProtoMembers(cs.cluster.Members())
|
||||
return &pb.MemberListResponse{Header: cs.header(), Members: membs}, nil
|
||||
}
|
||||
|
||||
func (cs *ClusterServer) header() *pb.ResponseHeader {
|
||||
return &pb.ResponseHeader{ClusterId: uint64(cs.cluster.ID()), MemberId: uint64(cs.server.ID()), RaftTerm: cs.raftTimer.Term()}
|
||||
}
|
||||
|
||||
func membersToProtoMembers(membs []*membership.Member) []*pb.Member {
|
||||
protoMembs := make([]*pb.Member, len(membs))
|
||||
for i := range membs {
|
||||
protoMembs[i] = &pb.Member{
|
||||
Name: membs[i].Name,
|
||||
ID: uint64(membs[i].ID),
|
||||
PeerURLs: membs[i].PeerURLs,
|
||||
ClientURLs: membs[i].ClientURLs,
|
||||
}
|
||||
}
|
||||
return protoMembs
|
||||
}
|
||||
38
vendor/github.com/coreos/etcd/etcdserver/api/v3rpc/metrics.go
generated
vendored
38
vendor/github.com/coreos/etcd/etcdserver/api/v3rpc/metrics.go
generated
vendored
@@ -1,38 +0,0 @@
|
||||
// Copyright 2016 The etcd Authors
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package v3rpc
|
||||
|
||||
import "github.com/prometheus/client_golang/prometheus"
|
||||
|
||||
var (
|
||||
sentBytes = prometheus.NewCounter(prometheus.CounterOpts{
|
||||
Namespace: "etcd",
|
||||
Subsystem: "network",
|
||||
Name: "client_grpc_sent_bytes_total",
|
||||
Help: "The total number of bytes sent to grpc clients.",
|
||||
})
|
||||
|
||||
receivedBytes = prometheus.NewCounter(prometheus.CounterOpts{
|
||||
Namespace: "etcd",
|
||||
Subsystem: "network",
|
||||
Name: "client_grpc_received_bytes_total",
|
||||
Help: "The total number of bytes received from grpc clients.",
|
||||
})
|
||||
)
|
||||
|
||||
func init() {
|
||||
prometheus.MustRegister(sentBytes)
|
||||
prometheus.MustRegister(receivedBytes)
|
||||
}
|
||||
89
vendor/github.com/coreos/etcd/etcdserver/api/v3rpc/quota.go
generated
vendored
89
vendor/github.com/coreos/etcd/etcdserver/api/v3rpc/quota.go
generated
vendored
@@ -1,89 +0,0 @@
|
||||
// Copyright 2016 The etcd Authors
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package v3rpc
|
||||
|
||||
import (
|
||||
"github.com/coreos/etcd/etcdserver"
|
||||
"github.com/coreos/etcd/etcdserver/api/v3rpc/rpctypes"
|
||||
pb "github.com/coreos/etcd/etcdserver/etcdserverpb"
|
||||
"github.com/coreos/etcd/pkg/types"
|
||||
"golang.org/x/net/context"
|
||||
)
|
||||
|
||||
type quotaKVServer struct {
|
||||
pb.KVServer
|
||||
qa quotaAlarmer
|
||||
}
|
||||
|
||||
type quotaAlarmer struct {
|
||||
q etcdserver.Quota
|
||||
a Alarmer
|
||||
id types.ID
|
||||
}
|
||||
|
||||
// check whether request satisfies the quota. If there is not enough space,
|
||||
// ignore request and raise the free space alarm.
|
||||
func (qa *quotaAlarmer) check(ctx context.Context, r interface{}) error {
|
||||
if qa.q.Available(r) {
|
||||
return nil
|
||||
}
|
||||
req := &pb.AlarmRequest{
|
||||
MemberID: uint64(qa.id),
|
||||
Action: pb.AlarmRequest_ACTIVATE,
|
||||
Alarm: pb.AlarmType_NOSPACE,
|
||||
}
|
||||
qa.a.Alarm(ctx, req)
|
||||
return rpctypes.ErrGRPCNoSpace
|
||||
}
|
||||
|
||||
func NewQuotaKVServer(s *etcdserver.EtcdServer) pb.KVServer {
|
||||
return "aKVServer{
|
||||
NewKVServer(s),
|
||||
quotaAlarmer{etcdserver.NewBackendQuota(s), s, s.ID()},
|
||||
}
|
||||
}
|
||||
|
||||
func (s *quotaKVServer) Put(ctx context.Context, r *pb.PutRequest) (*pb.PutResponse, error) {
|
||||
if err := s.qa.check(ctx, r); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return s.KVServer.Put(ctx, r)
|
||||
}
|
||||
|
||||
func (s *quotaKVServer) Txn(ctx context.Context, r *pb.TxnRequest) (*pb.TxnResponse, error) {
|
||||
if err := s.qa.check(ctx, r); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return s.KVServer.Txn(ctx, r)
|
||||
}
|
||||
|
||||
type quotaLeaseServer struct {
|
||||
pb.LeaseServer
|
||||
qa quotaAlarmer
|
||||
}
|
||||
|
||||
func (s *quotaLeaseServer) LeaseGrant(ctx context.Context, cr *pb.LeaseGrantRequest) (*pb.LeaseGrantResponse, error) {
|
||||
if err := s.qa.check(ctx, cr); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return s.LeaseServer.LeaseGrant(ctx, cr)
|
||||
}
|
||||
|
||||
func NewQuotaLeaseServer(s *etcdserver.EtcdServer) pb.LeaseServer {
|
||||
return "aLeaseServer{
|
||||
NewLeaseServer(s),
|
||||
quotaAlarmer{etcdserver.NewBackendQuota(s), s, s.ID()},
|
||||
}
|
||||
}
|
||||
103
vendor/github.com/coreos/etcd/etcdserver/api/v3rpc/util.go
generated
vendored
103
vendor/github.com/coreos/etcd/etcdserver/api/v3rpc/util.go
generated
vendored
@@ -1,103 +0,0 @@
|
||||
// Copyright 2016 The etcd Authors
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package v3rpc
|
||||
|
||||
import (
|
||||
"github.com/coreos/etcd/auth"
|
||||
"github.com/coreos/etcd/etcdserver"
|
||||
"github.com/coreos/etcd/etcdserver/api/v3rpc/rpctypes"
|
||||
"github.com/coreos/etcd/etcdserver/membership"
|
||||
"github.com/coreos/etcd/lease"
|
||||
"github.com/coreos/etcd/mvcc"
|
||||
"google.golang.org/grpc"
|
||||
"google.golang.org/grpc/codes"
|
||||
)
|
||||
|
||||
func togRPCError(err error) error {
|
||||
switch err {
|
||||
case membership.ErrIDRemoved:
|
||||
return rpctypes.ErrGRPCMemberNotFound
|
||||
case membership.ErrIDNotFound:
|
||||
return rpctypes.ErrGRPCMemberNotFound
|
||||
case membership.ErrIDExists:
|
||||
return rpctypes.ErrGRPCMemberExist
|
||||
case membership.ErrPeerURLexists:
|
||||
return rpctypes.ErrGRPCPeerURLExist
|
||||
case etcdserver.ErrNotEnoughStartedMembers:
|
||||
return rpctypes.ErrMemberNotEnoughStarted
|
||||
|
||||
case mvcc.ErrCompacted:
|
||||
return rpctypes.ErrGRPCCompacted
|
||||
case mvcc.ErrFutureRev:
|
||||
return rpctypes.ErrGRPCFutureRev
|
||||
case etcdserver.ErrRequestTooLarge:
|
||||
return rpctypes.ErrGRPCRequestTooLarge
|
||||
case etcdserver.ErrNoSpace:
|
||||
return rpctypes.ErrGRPCNoSpace
|
||||
case etcdserver.ErrTooManyRequests:
|
||||
return rpctypes.ErrTooManyRequests
|
||||
|
||||
case etcdserver.ErrNoLeader:
|
||||
return rpctypes.ErrGRPCNoLeader
|
||||
case etcdserver.ErrStopped:
|
||||
return rpctypes.ErrGRPCStopped
|
||||
case etcdserver.ErrTimeout:
|
||||
return rpctypes.ErrGRPCTimeout
|
||||
case etcdserver.ErrTimeoutDueToLeaderFail:
|
||||
return rpctypes.ErrGRPCTimeoutDueToLeaderFail
|
||||
case etcdserver.ErrTimeoutDueToConnectionLost:
|
||||
return rpctypes.ErrGRPCTimeoutDueToConnectionLost
|
||||
case etcdserver.ErrUnhealthy:
|
||||
return rpctypes.ErrGRPCUnhealthy
|
||||
case etcdserver.ErrKeyNotFound:
|
||||
return rpctypes.ErrGRPCKeyNotFound
|
||||
|
||||
case lease.ErrLeaseNotFound:
|
||||
return rpctypes.ErrGRPCLeaseNotFound
|
||||
case lease.ErrLeaseExists:
|
||||
return rpctypes.ErrGRPCLeaseExist
|
||||
|
||||
case auth.ErrRootUserNotExist:
|
||||
return rpctypes.ErrGRPCRootUserNotExist
|
||||
case auth.ErrRootRoleNotExist:
|
||||
return rpctypes.ErrGRPCRootRoleNotExist
|
||||
case auth.ErrUserAlreadyExist:
|
||||
return rpctypes.ErrGRPCUserAlreadyExist
|
||||
case auth.ErrUserEmpty:
|
||||
return rpctypes.ErrGRPCUserEmpty
|
||||
case auth.ErrUserNotFound:
|
||||
return rpctypes.ErrGRPCUserNotFound
|
||||
case auth.ErrRoleAlreadyExist:
|
||||
return rpctypes.ErrGRPCRoleAlreadyExist
|
||||
case auth.ErrRoleNotFound:
|
||||
return rpctypes.ErrGRPCRoleNotFound
|
||||
case auth.ErrAuthFailed:
|
||||
return rpctypes.ErrGRPCAuthFailed
|
||||
case auth.ErrPermissionDenied:
|
||||
return rpctypes.ErrGRPCPermissionDenied
|
||||
case auth.ErrRoleNotGranted:
|
||||
return rpctypes.ErrGRPCRoleNotGranted
|
||||
case auth.ErrPermissionNotGranted:
|
||||
return rpctypes.ErrGRPCPermissionNotGranted
|
||||
case auth.ErrAuthNotEnabled:
|
||||
return rpctypes.ErrGRPCAuthNotEnabled
|
||||
case auth.ErrInvalidAuthToken:
|
||||
return rpctypes.ErrGRPCInvalidAuthToken
|
||||
case auth.ErrInvalidAuthMgmt:
|
||||
return rpctypes.ErrGRPCInvalidAuthMgmt
|
||||
default:
|
||||
return grpc.Errorf(codes.Unknown, err.Error())
|
||||
}
|
||||
}
|
||||
426
vendor/github.com/coreos/etcd/etcdserver/api/v3rpc/watch.go
generated
vendored
426
vendor/github.com/coreos/etcd/etcdserver/api/v3rpc/watch.go
generated
vendored
@@ -1,426 +0,0 @@
|
||||
// Copyright 2015 The etcd Authors
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package v3rpc
|
||||
|
||||
import (
|
||||
"io"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"golang.org/x/net/context"
|
||||
|
||||
"github.com/coreos/etcd/auth"
|
||||
"github.com/coreos/etcd/etcdserver"
|
||||
"github.com/coreos/etcd/etcdserver/api/v3rpc/rpctypes"
|
||||
pb "github.com/coreos/etcd/etcdserver/etcdserverpb"
|
||||
"github.com/coreos/etcd/mvcc"
|
||||
"github.com/coreos/etcd/mvcc/mvccpb"
|
||||
)
|
||||
|
||||
type watchServer struct {
|
||||
clusterID int64
|
||||
memberID int64
|
||||
raftTimer etcdserver.RaftTimer
|
||||
watchable mvcc.WatchableKV
|
||||
|
||||
ag AuthGetter
|
||||
}
|
||||
|
||||
func NewWatchServer(s *etcdserver.EtcdServer) pb.WatchServer {
|
||||
return &watchServer{
|
||||
clusterID: int64(s.Cluster().ID()),
|
||||
memberID: int64(s.ID()),
|
||||
raftTimer: s,
|
||||
watchable: s.Watchable(),
|
||||
ag: s,
|
||||
}
|
||||
}
|
||||
|
||||
var (
|
||||
// External test can read this with GetProgressReportInterval()
|
||||
// and change this to a small value to finish fast with
|
||||
// SetProgressReportInterval().
|
||||
progressReportInterval = 10 * time.Minute
|
||||
progressReportIntervalMu sync.RWMutex
|
||||
)
|
||||
|
||||
func GetProgressReportInterval() time.Duration {
|
||||
progressReportIntervalMu.RLock()
|
||||
defer progressReportIntervalMu.RUnlock()
|
||||
return progressReportInterval
|
||||
}
|
||||
|
||||
func SetProgressReportInterval(newTimeout time.Duration) {
|
||||
progressReportIntervalMu.Lock()
|
||||
defer progressReportIntervalMu.Unlock()
|
||||
progressReportInterval = newTimeout
|
||||
}
|
||||
|
||||
const (
|
||||
// We send ctrl response inside the read loop. We do not want
|
||||
// send to block read, but we still want ctrl response we sent to
|
||||
// be serialized. Thus we use a buffered chan to solve the problem.
|
||||
// A small buffer should be OK for most cases, since we expect the
|
||||
// ctrl requests are infrequent.
|
||||
ctrlStreamBufLen = 16
|
||||
)
|
||||
|
||||
// serverWatchStream is an etcd server side stream. It receives requests
|
||||
// from client side gRPC stream. It receives watch events from mvcc.WatchStream,
|
||||
// and creates responses that forwarded to gRPC stream.
|
||||
// It also forwards control message like watch created and canceled.
|
||||
type serverWatchStream struct {
|
||||
clusterID int64
|
||||
memberID int64
|
||||
raftTimer etcdserver.RaftTimer
|
||||
|
||||
watchable mvcc.WatchableKV
|
||||
|
||||
gRPCStream pb.Watch_WatchServer
|
||||
watchStream mvcc.WatchStream
|
||||
ctrlStream chan *pb.WatchResponse
|
||||
|
||||
// mu protects progress, prevKV
|
||||
mu sync.Mutex
|
||||
// progress tracks the watchID that stream might need to send
|
||||
// progress to.
|
||||
// TODO: combine progress and prevKV into a single struct?
|
||||
progress map[mvcc.WatchID]bool
|
||||
prevKV map[mvcc.WatchID]bool
|
||||
|
||||
// closec indicates the stream is closed.
|
||||
closec chan struct{}
|
||||
|
||||
// wg waits for the send loop to complete
|
||||
wg sync.WaitGroup
|
||||
|
||||
ag AuthGetter
|
||||
}
|
||||
|
||||
func (ws *watchServer) Watch(stream pb.Watch_WatchServer) (err error) {
|
||||
sws := serverWatchStream{
|
||||
clusterID: ws.clusterID,
|
||||
memberID: ws.memberID,
|
||||
raftTimer: ws.raftTimer,
|
||||
|
||||
watchable: ws.watchable,
|
||||
|
||||
gRPCStream: stream,
|
||||
watchStream: ws.watchable.NewWatchStream(),
|
||||
// chan for sending control response like watcher created and canceled.
|
||||
ctrlStream: make(chan *pb.WatchResponse, ctrlStreamBufLen),
|
||||
progress: make(map[mvcc.WatchID]bool),
|
||||
prevKV: make(map[mvcc.WatchID]bool),
|
||||
closec: make(chan struct{}),
|
||||
|
||||
ag: ws.ag,
|
||||
}
|
||||
|
||||
sws.wg.Add(1)
|
||||
go func() {
|
||||
sws.sendLoop()
|
||||
sws.wg.Done()
|
||||
}()
|
||||
|
||||
errc := make(chan error, 1)
|
||||
// Ideally recvLoop would also use sws.wg to signal its completion
|
||||
// but when stream.Context().Done() is closed, the stream's recv
|
||||
// may continue to block since it uses a different context, leading to
|
||||
// deadlock when calling sws.close().
|
||||
go func() {
|
||||
if rerr := sws.recvLoop(); rerr != nil {
|
||||
errc <- rerr
|
||||
}
|
||||
}()
|
||||
select {
|
||||
case err = <-errc:
|
||||
close(sws.ctrlStream)
|
||||
case <-stream.Context().Done():
|
||||
err = stream.Context().Err()
|
||||
// the only server-side cancellation is noleader for now.
|
||||
if err == context.Canceled {
|
||||
err = rpctypes.ErrGRPCNoLeader
|
||||
}
|
||||
}
|
||||
sws.close()
|
||||
return err
|
||||
}
|
||||
|
||||
func (sws *serverWatchStream) isWatchPermitted(wcr *pb.WatchCreateRequest) bool {
|
||||
authInfo, err := sws.ag.AuthInfoFromCtx(sws.gRPCStream.Context())
|
||||
if err != nil {
|
||||
return false
|
||||
}
|
||||
if authInfo == nil {
|
||||
// if auth is enabled, IsRangePermitted() can cause an error
|
||||
authInfo = &auth.AuthInfo{}
|
||||
}
|
||||
|
||||
return sws.ag.AuthStore().IsRangePermitted(authInfo, wcr.Key, wcr.RangeEnd) == nil
|
||||
}
|
||||
|
||||
func (sws *serverWatchStream) recvLoop() error {
|
||||
for {
|
||||
req, err := sws.gRPCStream.Recv()
|
||||
if err == io.EOF {
|
||||
return nil
|
||||
}
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
switch uv := req.RequestUnion.(type) {
|
||||
case *pb.WatchRequest_CreateRequest:
|
||||
if uv.CreateRequest == nil {
|
||||
break
|
||||
}
|
||||
|
||||
creq := uv.CreateRequest
|
||||
if len(creq.Key) == 0 {
|
||||
// \x00 is the smallest key
|
||||
creq.Key = []byte{0}
|
||||
}
|
||||
if len(creq.RangeEnd) == 0 {
|
||||
// force nil since watchstream.Watch distinguishes
|
||||
// between nil and []byte{} for single key / >=
|
||||
creq.RangeEnd = nil
|
||||
}
|
||||
if len(creq.RangeEnd) == 1 && creq.RangeEnd[0] == 0 {
|
||||
// support >= key queries
|
||||
creq.RangeEnd = []byte{}
|
||||
}
|
||||
|
||||
if !sws.isWatchPermitted(creq) {
|
||||
wr := &pb.WatchResponse{
|
||||
Header: sws.newResponseHeader(sws.watchStream.Rev()),
|
||||
WatchId: -1,
|
||||
Canceled: true,
|
||||
Created: true,
|
||||
CancelReason: rpctypes.ErrGRPCPermissionDenied.Error(),
|
||||
}
|
||||
|
||||
select {
|
||||
case sws.ctrlStream <- wr:
|
||||
case <-sws.closec:
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
filters := FiltersFromRequest(creq)
|
||||
|
||||
wsrev := sws.watchStream.Rev()
|
||||
rev := creq.StartRevision
|
||||
if rev == 0 {
|
||||
rev = wsrev + 1
|
||||
}
|
||||
id := sws.watchStream.Watch(creq.Key, creq.RangeEnd, rev, filters...)
|
||||
if id != -1 {
|
||||
sws.mu.Lock()
|
||||
if creq.ProgressNotify {
|
||||
sws.progress[id] = true
|
||||
}
|
||||
if creq.PrevKv {
|
||||
sws.prevKV[id] = true
|
||||
}
|
||||
sws.mu.Unlock()
|
||||
}
|
||||
wr := &pb.WatchResponse{
|
||||
Header: sws.newResponseHeader(wsrev),
|
||||
WatchId: int64(id),
|
||||
Created: true,
|
||||
Canceled: id == -1,
|
||||
}
|
||||
select {
|
||||
case sws.ctrlStream <- wr:
|
||||
case <-sws.closec:
|
||||
return nil
|
||||
}
|
||||
case *pb.WatchRequest_CancelRequest:
|
||||
if uv.CancelRequest != nil {
|
||||
id := uv.CancelRequest.WatchId
|
||||
err := sws.watchStream.Cancel(mvcc.WatchID(id))
|
||||
if err == nil {
|
||||
sws.ctrlStream <- &pb.WatchResponse{
|
||||
Header: sws.newResponseHeader(sws.watchStream.Rev()),
|
||||
WatchId: id,
|
||||
Canceled: true,
|
||||
}
|
||||
sws.mu.Lock()
|
||||
delete(sws.progress, mvcc.WatchID(id))
|
||||
delete(sws.prevKV, mvcc.WatchID(id))
|
||||
sws.mu.Unlock()
|
||||
}
|
||||
}
|
||||
default:
|
||||
// we probably should not shutdown the entire stream when
|
||||
// receive an valid command.
|
||||
// so just do nothing instead.
|
||||
continue
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (sws *serverWatchStream) sendLoop() {
|
||||
// watch ids that are currently active
|
||||
ids := make(map[mvcc.WatchID]struct{})
|
||||
// watch responses pending on a watch id creation message
|
||||
pending := make(map[mvcc.WatchID][]*pb.WatchResponse)
|
||||
|
||||
interval := GetProgressReportInterval()
|
||||
progressTicker := time.NewTicker(interval)
|
||||
|
||||
defer func() {
|
||||
progressTicker.Stop()
|
||||
// drain the chan to clean up pending events
|
||||
for ws := range sws.watchStream.Chan() {
|
||||
mvcc.ReportEventReceived(len(ws.Events))
|
||||
}
|
||||
for _, wrs := range pending {
|
||||
for _, ws := range wrs {
|
||||
mvcc.ReportEventReceived(len(ws.Events))
|
||||
}
|
||||
}
|
||||
}()
|
||||
|
||||
for {
|
||||
select {
|
||||
case wresp, ok := <-sws.watchStream.Chan():
|
||||
if !ok {
|
||||
return
|
||||
}
|
||||
|
||||
// TODO: evs is []mvccpb.Event type
|
||||
// either return []*mvccpb.Event from the mvcc package
|
||||
// or define protocol buffer with []mvccpb.Event.
|
||||
evs := wresp.Events
|
||||
events := make([]*mvccpb.Event, len(evs))
|
||||
sws.mu.Lock()
|
||||
needPrevKV := sws.prevKV[wresp.WatchID]
|
||||
sws.mu.Unlock()
|
||||
for i := range evs {
|
||||
events[i] = &evs[i]
|
||||
|
||||
if needPrevKV {
|
||||
opt := mvcc.RangeOptions{Rev: evs[i].Kv.ModRevision - 1}
|
||||
r, err := sws.watchable.Range(evs[i].Kv.Key, nil, opt)
|
||||
if err == nil && len(r.KVs) != 0 {
|
||||
events[i].PrevKv = &(r.KVs[0])
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
wr := &pb.WatchResponse{
|
||||
Header: sws.newResponseHeader(wresp.Revision),
|
||||
WatchId: int64(wresp.WatchID),
|
||||
Events: events,
|
||||
CompactRevision: wresp.CompactRevision,
|
||||
}
|
||||
|
||||
if _, hasId := ids[wresp.WatchID]; !hasId {
|
||||
// buffer if id not yet announced
|
||||
wrs := append(pending[wresp.WatchID], wr)
|
||||
pending[wresp.WatchID] = wrs
|
||||
continue
|
||||
}
|
||||
|
||||
mvcc.ReportEventReceived(len(evs))
|
||||
if err := sws.gRPCStream.Send(wr); err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
sws.mu.Lock()
|
||||
if len(evs) > 0 && sws.progress[wresp.WatchID] {
|
||||
// elide next progress update if sent a key update
|
||||
sws.progress[wresp.WatchID] = false
|
||||
}
|
||||
sws.mu.Unlock()
|
||||
|
||||
case c, ok := <-sws.ctrlStream:
|
||||
if !ok {
|
||||
return
|
||||
}
|
||||
|
||||
if err := sws.gRPCStream.Send(c); err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
// track id creation
|
||||
wid := mvcc.WatchID(c.WatchId)
|
||||
if c.Canceled {
|
||||
delete(ids, wid)
|
||||
continue
|
||||
}
|
||||
if c.Created {
|
||||
// flush buffered events
|
||||
ids[wid] = struct{}{}
|
||||
for _, v := range pending[wid] {
|
||||
mvcc.ReportEventReceived(len(v.Events))
|
||||
if err := sws.gRPCStream.Send(v); err != nil {
|
||||
return
|
||||
}
|
||||
}
|
||||
delete(pending, wid)
|
||||
}
|
||||
case <-progressTicker.C:
|
||||
sws.mu.Lock()
|
||||
for id, ok := range sws.progress {
|
||||
if ok {
|
||||
sws.watchStream.RequestProgress(id)
|
||||
}
|
||||
sws.progress[id] = true
|
||||
}
|
||||
sws.mu.Unlock()
|
||||
case <-sws.closec:
|
||||
return
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (sws *serverWatchStream) close() {
|
||||
sws.watchStream.Close()
|
||||
close(sws.closec)
|
||||
sws.wg.Wait()
|
||||
}
|
||||
|
||||
func (sws *serverWatchStream) newResponseHeader(rev int64) *pb.ResponseHeader {
|
||||
return &pb.ResponseHeader{
|
||||
ClusterId: uint64(sws.clusterID),
|
||||
MemberId: uint64(sws.memberID),
|
||||
Revision: rev,
|
||||
RaftTerm: sws.raftTimer.Term(),
|
||||
}
|
||||
}
|
||||
|
||||
func filterNoDelete(e mvccpb.Event) bool {
|
||||
return e.Type == mvccpb.DELETE
|
||||
}
|
||||
|
||||
func filterNoPut(e mvccpb.Event) bool {
|
||||
return e.Type == mvccpb.PUT
|
||||
}
|
||||
|
||||
func FiltersFromRequest(creq *pb.WatchCreateRequest) []mvcc.FilterFunc {
|
||||
filters := make([]mvcc.FilterFunc, 0, len(creq.Filters))
|
||||
for _, ft := range creq.Filters {
|
||||
switch ft {
|
||||
case pb.WatchCreateRequest_NOPUT:
|
||||
filters = append(filters, filterNoPut)
|
||||
case pb.WatchCreateRequest_NODELETE:
|
||||
filters = append(filters, filterNoDelete)
|
||||
default:
|
||||
}
|
||||
}
|
||||
return filters
|
||||
}
|
||||
878
vendor/github.com/coreos/etcd/etcdserver/apply.go
generated
vendored
878
vendor/github.com/coreos/etcd/etcdserver/apply.go
generated
vendored
@@ -1,878 +0,0 @@
|
||||
// Copyright 2016 The etcd Authors
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package etcdserver
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"sort"
|
||||
"time"
|
||||
|
||||
pb "github.com/coreos/etcd/etcdserver/etcdserverpb"
|
||||
"github.com/coreos/etcd/lease"
|
||||
"github.com/coreos/etcd/mvcc"
|
||||
"github.com/coreos/etcd/mvcc/mvccpb"
|
||||
"github.com/coreos/etcd/pkg/types"
|
||||
"github.com/gogo/protobuf/proto"
|
||||
"golang.org/x/net/context"
|
||||
)
|
||||
|
||||
const (
|
||||
warnApplyDuration = 100 * time.Millisecond
|
||||
)
|
||||
|
||||
type applyResult struct {
|
||||
resp proto.Message
|
||||
err error
|
||||
// physc signals the physical effect of the request has completed in addition
|
||||
// to being logically reflected by the node. Currently only used for
|
||||
// Compaction requests.
|
||||
physc <-chan struct{}
|
||||
}
|
||||
|
||||
// applierV3 is the interface for processing V3 raft messages
|
||||
type applierV3 interface {
|
||||
Apply(r *pb.InternalRaftRequest) *applyResult
|
||||
|
||||
Put(txn mvcc.TxnWrite, p *pb.PutRequest) (*pb.PutResponse, error)
|
||||
Range(txn mvcc.TxnRead, r *pb.RangeRequest) (*pb.RangeResponse, error)
|
||||
DeleteRange(txn mvcc.TxnWrite, dr *pb.DeleteRangeRequest) (*pb.DeleteRangeResponse, error)
|
||||
Txn(rt *pb.TxnRequest) (*pb.TxnResponse, error)
|
||||
Compaction(compaction *pb.CompactionRequest) (*pb.CompactionResponse, <-chan struct{}, error)
|
||||
|
||||
LeaseGrant(lc *pb.LeaseGrantRequest) (*pb.LeaseGrantResponse, error)
|
||||
LeaseRevoke(lc *pb.LeaseRevokeRequest) (*pb.LeaseRevokeResponse, error)
|
||||
|
||||
Alarm(*pb.AlarmRequest) (*pb.AlarmResponse, error)
|
||||
|
||||
Authenticate(r *pb.InternalAuthenticateRequest) (*pb.AuthenticateResponse, error)
|
||||
|
||||
AuthEnable() (*pb.AuthEnableResponse, error)
|
||||
AuthDisable() (*pb.AuthDisableResponse, error)
|
||||
|
||||
UserAdd(ua *pb.AuthUserAddRequest) (*pb.AuthUserAddResponse, error)
|
||||
UserDelete(ua *pb.AuthUserDeleteRequest) (*pb.AuthUserDeleteResponse, error)
|
||||
UserChangePassword(ua *pb.AuthUserChangePasswordRequest) (*pb.AuthUserChangePasswordResponse, error)
|
||||
UserGrantRole(ua *pb.AuthUserGrantRoleRequest) (*pb.AuthUserGrantRoleResponse, error)
|
||||
UserGet(ua *pb.AuthUserGetRequest) (*pb.AuthUserGetResponse, error)
|
||||
UserRevokeRole(ua *pb.AuthUserRevokeRoleRequest) (*pb.AuthUserRevokeRoleResponse, error)
|
||||
RoleAdd(ua *pb.AuthRoleAddRequest) (*pb.AuthRoleAddResponse, error)
|
||||
RoleGrantPermission(ua *pb.AuthRoleGrantPermissionRequest) (*pb.AuthRoleGrantPermissionResponse, error)
|
||||
RoleGet(ua *pb.AuthRoleGetRequest) (*pb.AuthRoleGetResponse, error)
|
||||
RoleRevokePermission(ua *pb.AuthRoleRevokePermissionRequest) (*pb.AuthRoleRevokePermissionResponse, error)
|
||||
RoleDelete(ua *pb.AuthRoleDeleteRequest) (*pb.AuthRoleDeleteResponse, error)
|
||||
UserList(ua *pb.AuthUserListRequest) (*pb.AuthUserListResponse, error)
|
||||
RoleList(ua *pb.AuthRoleListRequest) (*pb.AuthRoleListResponse, error)
|
||||
}
|
||||
|
||||
type applierV3backend struct {
|
||||
s *EtcdServer
|
||||
}
|
||||
|
||||
func (s *EtcdServer) newApplierV3() applierV3 {
|
||||
return newAuthApplierV3(
|
||||
s.AuthStore(),
|
||||
newQuotaApplierV3(s, &applierV3backend{s}),
|
||||
)
|
||||
}
|
||||
|
||||
func (a *applierV3backend) Apply(r *pb.InternalRaftRequest) *applyResult {
|
||||
ar := &applyResult{}
|
||||
|
||||
// call into a.s.applyV3.F instead of a.F so upper appliers can check individual calls
|
||||
switch {
|
||||
case r.Range != nil:
|
||||
ar.resp, ar.err = a.s.applyV3.Range(nil, r.Range)
|
||||
case r.Put != nil:
|
||||
ar.resp, ar.err = a.s.applyV3.Put(nil, r.Put)
|
||||
case r.DeleteRange != nil:
|
||||
ar.resp, ar.err = a.s.applyV3.DeleteRange(nil, r.DeleteRange)
|
||||
case r.Txn != nil:
|
||||
ar.resp, ar.err = a.s.applyV3.Txn(r.Txn)
|
||||
case r.Compaction != nil:
|
||||
ar.resp, ar.physc, ar.err = a.s.applyV3.Compaction(r.Compaction)
|
||||
case r.LeaseGrant != nil:
|
||||
ar.resp, ar.err = a.s.applyV3.LeaseGrant(r.LeaseGrant)
|
||||
case r.LeaseRevoke != nil:
|
||||
ar.resp, ar.err = a.s.applyV3.LeaseRevoke(r.LeaseRevoke)
|
||||
case r.Alarm != nil:
|
||||
ar.resp, ar.err = a.s.applyV3.Alarm(r.Alarm)
|
||||
case r.Authenticate != nil:
|
||||
ar.resp, ar.err = a.s.applyV3.Authenticate(r.Authenticate)
|
||||
case r.AuthEnable != nil:
|
||||
ar.resp, ar.err = a.s.applyV3.AuthEnable()
|
||||
case r.AuthDisable != nil:
|
||||
ar.resp, ar.err = a.s.applyV3.AuthDisable()
|
||||
case r.AuthUserAdd != nil:
|
||||
ar.resp, ar.err = a.s.applyV3.UserAdd(r.AuthUserAdd)
|
||||
case r.AuthUserDelete != nil:
|
||||
ar.resp, ar.err = a.s.applyV3.UserDelete(r.AuthUserDelete)
|
||||
case r.AuthUserChangePassword != nil:
|
||||
ar.resp, ar.err = a.s.applyV3.UserChangePassword(r.AuthUserChangePassword)
|
||||
case r.AuthUserGrantRole != nil:
|
||||
ar.resp, ar.err = a.s.applyV3.UserGrantRole(r.AuthUserGrantRole)
|
||||
case r.AuthUserGet != nil:
|
||||
ar.resp, ar.err = a.s.applyV3.UserGet(r.AuthUserGet)
|
||||
case r.AuthUserRevokeRole != nil:
|
||||
ar.resp, ar.err = a.s.applyV3.UserRevokeRole(r.AuthUserRevokeRole)
|
||||
case r.AuthRoleAdd != nil:
|
||||
ar.resp, ar.err = a.s.applyV3.RoleAdd(r.AuthRoleAdd)
|
||||
case r.AuthRoleGrantPermission != nil:
|
||||
ar.resp, ar.err = a.s.applyV3.RoleGrantPermission(r.AuthRoleGrantPermission)
|
||||
case r.AuthRoleGet != nil:
|
||||
ar.resp, ar.err = a.s.applyV3.RoleGet(r.AuthRoleGet)
|
||||
case r.AuthRoleRevokePermission != nil:
|
||||
ar.resp, ar.err = a.s.applyV3.RoleRevokePermission(r.AuthRoleRevokePermission)
|
||||
case r.AuthRoleDelete != nil:
|
||||
ar.resp, ar.err = a.s.applyV3.RoleDelete(r.AuthRoleDelete)
|
||||
case r.AuthUserList != nil:
|
||||
ar.resp, ar.err = a.s.applyV3.UserList(r.AuthUserList)
|
||||
case r.AuthRoleList != nil:
|
||||
ar.resp, ar.err = a.s.applyV3.RoleList(r.AuthRoleList)
|
||||
default:
|
||||
panic("not implemented")
|
||||
}
|
||||
return ar
|
||||
}
|
||||
|
||||
func (a *applierV3backend) Put(txn mvcc.TxnWrite, p *pb.PutRequest) (resp *pb.PutResponse, err error) {
|
||||
resp = &pb.PutResponse{}
|
||||
resp.Header = &pb.ResponseHeader{}
|
||||
|
||||
val, leaseID := p.Value, lease.LeaseID(p.Lease)
|
||||
if txn == nil {
|
||||
if leaseID != lease.NoLease {
|
||||
if l := a.s.lessor.Lookup(leaseID); l == nil {
|
||||
return nil, lease.ErrLeaseNotFound
|
||||
}
|
||||
}
|
||||
txn = a.s.KV().Write()
|
||||
defer txn.End()
|
||||
}
|
||||
|
||||
var rr *mvcc.RangeResult
|
||||
if p.IgnoreValue || p.IgnoreLease || p.PrevKv {
|
||||
rr, err = txn.Range(p.Key, nil, mvcc.RangeOptions{})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
if p.IgnoreValue || p.IgnoreLease {
|
||||
if rr == nil || len(rr.KVs) == 0 {
|
||||
// ignore_{lease,value} flag expects previous key-value pair
|
||||
return nil, ErrKeyNotFound
|
||||
}
|
||||
}
|
||||
if p.IgnoreValue {
|
||||
val = rr.KVs[0].Value
|
||||
}
|
||||
if p.IgnoreLease {
|
||||
leaseID = lease.LeaseID(rr.KVs[0].Lease)
|
||||
}
|
||||
if p.PrevKv {
|
||||
if rr != nil && len(rr.KVs) != 0 {
|
||||
resp.PrevKv = &rr.KVs[0]
|
||||
}
|
||||
}
|
||||
|
||||
resp.Header.Revision = txn.Put(p.Key, val, leaseID)
|
||||
return resp, nil
|
||||
}
|
||||
|
||||
func (a *applierV3backend) DeleteRange(txn mvcc.TxnWrite, dr *pb.DeleteRangeRequest) (*pb.DeleteRangeResponse, error) {
|
||||
resp := &pb.DeleteRangeResponse{}
|
||||
resp.Header = &pb.ResponseHeader{}
|
||||
|
||||
if txn == nil {
|
||||
txn = a.s.kv.Write()
|
||||
defer txn.End()
|
||||
}
|
||||
|
||||
if isGteRange(dr.RangeEnd) {
|
||||
dr.RangeEnd = []byte{}
|
||||
}
|
||||
|
||||
if dr.PrevKv {
|
||||
rr, err := txn.Range(dr.Key, dr.RangeEnd, mvcc.RangeOptions{})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if rr != nil {
|
||||
for i := range rr.KVs {
|
||||
resp.PrevKvs = append(resp.PrevKvs, &rr.KVs[i])
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
resp.Deleted, resp.Header.Revision = txn.DeleteRange(dr.Key, dr.RangeEnd)
|
||||
return resp, nil
|
||||
}
|
||||
|
||||
func (a *applierV3backend) Range(txn mvcc.TxnRead, r *pb.RangeRequest) (*pb.RangeResponse, error) {
|
||||
resp := &pb.RangeResponse{}
|
||||
resp.Header = &pb.ResponseHeader{}
|
||||
|
||||
if txn == nil {
|
||||
txn = a.s.kv.Read()
|
||||
defer txn.End()
|
||||
}
|
||||
|
||||
if isGteRange(r.RangeEnd) {
|
||||
r.RangeEnd = []byte{}
|
||||
}
|
||||
|
||||
limit := r.Limit
|
||||
if r.SortOrder != pb.RangeRequest_NONE ||
|
||||
r.MinModRevision != 0 || r.MaxModRevision != 0 ||
|
||||
r.MinCreateRevision != 0 || r.MaxCreateRevision != 0 {
|
||||
// fetch everything; sort and truncate afterwards
|
||||
limit = 0
|
||||
}
|
||||
if limit > 0 {
|
||||
// fetch one extra for 'more' flag
|
||||
limit = limit + 1
|
||||
}
|
||||
|
||||
ro := mvcc.RangeOptions{
|
||||
Limit: limit,
|
||||
Rev: r.Revision,
|
||||
Count: r.CountOnly,
|
||||
}
|
||||
|
||||
rr, err := txn.Range(r.Key, r.RangeEnd, ro)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if r.MaxModRevision != 0 {
|
||||
f := func(kv *mvccpb.KeyValue) bool { return kv.ModRevision > r.MaxModRevision }
|
||||
pruneKVs(rr, f)
|
||||
}
|
||||
if r.MinModRevision != 0 {
|
||||
f := func(kv *mvccpb.KeyValue) bool { return kv.ModRevision < r.MinModRevision }
|
||||
pruneKVs(rr, f)
|
||||
}
|
||||
if r.MaxCreateRevision != 0 {
|
||||
f := func(kv *mvccpb.KeyValue) bool { return kv.CreateRevision > r.MaxCreateRevision }
|
||||
pruneKVs(rr, f)
|
||||
}
|
||||
if r.MinCreateRevision != 0 {
|
||||
f := func(kv *mvccpb.KeyValue) bool { return kv.CreateRevision < r.MinCreateRevision }
|
||||
pruneKVs(rr, f)
|
||||
}
|
||||
|
||||
sortOrder := r.SortOrder
|
||||
if r.SortTarget != pb.RangeRequest_KEY && sortOrder == pb.RangeRequest_NONE {
|
||||
// Since current mvcc.Range implementation returns results
|
||||
// sorted by keys in lexiographically ascending order,
|
||||
// sort ASCEND by default only when target is not 'KEY'
|
||||
sortOrder = pb.RangeRequest_ASCEND
|
||||
}
|
||||
if sortOrder != pb.RangeRequest_NONE {
|
||||
var sorter sort.Interface
|
||||
switch {
|
||||
case r.SortTarget == pb.RangeRequest_KEY:
|
||||
sorter = &kvSortByKey{&kvSort{rr.KVs}}
|
||||
case r.SortTarget == pb.RangeRequest_VERSION:
|
||||
sorter = &kvSortByVersion{&kvSort{rr.KVs}}
|
||||
case r.SortTarget == pb.RangeRequest_CREATE:
|
||||
sorter = &kvSortByCreate{&kvSort{rr.KVs}}
|
||||
case r.SortTarget == pb.RangeRequest_MOD:
|
||||
sorter = &kvSortByMod{&kvSort{rr.KVs}}
|
||||
case r.SortTarget == pb.RangeRequest_VALUE:
|
||||
sorter = &kvSortByValue{&kvSort{rr.KVs}}
|
||||
}
|
||||
switch {
|
||||
case sortOrder == pb.RangeRequest_ASCEND:
|
||||
sort.Sort(sorter)
|
||||
case sortOrder == pb.RangeRequest_DESCEND:
|
||||
sort.Sort(sort.Reverse(sorter))
|
||||
}
|
||||
}
|
||||
|
||||
if r.Limit > 0 && len(rr.KVs) > int(r.Limit) {
|
||||
rr.KVs = rr.KVs[:r.Limit]
|
||||
resp.More = true
|
||||
}
|
||||
|
||||
resp.Header.Revision = rr.Rev
|
||||
resp.Count = int64(rr.Count)
|
||||
for i := range rr.KVs {
|
||||
if r.KeysOnly {
|
||||
rr.KVs[i].Value = nil
|
||||
}
|
||||
resp.Kvs = append(resp.Kvs, &rr.KVs[i])
|
||||
}
|
||||
return resp, nil
|
||||
}
|
||||
|
||||
func (a *applierV3backend) Txn(rt *pb.TxnRequest) (*pb.TxnResponse, error) {
|
||||
isWrite := !isTxnReadonly(rt)
|
||||
txn := mvcc.NewReadOnlyTxnWrite(a.s.KV().Read())
|
||||
|
||||
reqs, ok := a.compareToOps(txn, rt)
|
||||
if isWrite {
|
||||
if err := a.checkRequestPut(txn, reqs); err != nil {
|
||||
txn.End()
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
if err := checkRequestRange(txn, reqs); err != nil {
|
||||
txn.End()
|
||||
return nil, err
|
||||
}
|
||||
|
||||
resps := make([]*pb.ResponseOp, len(reqs))
|
||||
txnResp := &pb.TxnResponse{
|
||||
Responses: resps,
|
||||
Succeeded: ok,
|
||||
Header: &pb.ResponseHeader{},
|
||||
}
|
||||
|
||||
// When executing mutable txn ops, etcd must hold the txn lock so
|
||||
// readers do not see any intermediate results. Since writes are
|
||||
// serialized on the raft loop, the revision in the read view will
|
||||
// be the revision of the write txn.
|
||||
if isWrite {
|
||||
txn.End()
|
||||
txn = a.s.KV().Write()
|
||||
}
|
||||
for i := range reqs {
|
||||
resps[i] = a.applyUnion(txn, reqs[i])
|
||||
}
|
||||
rev := txn.Rev()
|
||||
if len(txn.Changes()) != 0 {
|
||||
rev++
|
||||
}
|
||||
txn.End()
|
||||
|
||||
txnResp.Header.Revision = rev
|
||||
return txnResp, nil
|
||||
}
|
||||
|
||||
func (a *applierV3backend) compareToOps(rv mvcc.ReadView, rt *pb.TxnRequest) ([]*pb.RequestOp, bool) {
|
||||
for _, c := range rt.Compare {
|
||||
if !applyCompare(rv, c) {
|
||||
return rt.Failure, false
|
||||
}
|
||||
}
|
||||
return rt.Success, true
|
||||
}
|
||||
|
||||
// applyCompare applies the compare request.
|
||||
// If the comparison succeeds, it returns true. Otherwise, returns false.
|
||||
func applyCompare(rv mvcc.ReadView, c *pb.Compare) bool {
|
||||
rr, err := rv.Range(c.Key, nil, mvcc.RangeOptions{})
|
||||
if err != nil {
|
||||
return false
|
||||
}
|
||||
var ckv mvccpb.KeyValue
|
||||
if len(rr.KVs) != 0 {
|
||||
ckv = rr.KVs[0]
|
||||
} else {
|
||||
// Use the zero value of ckv normally. However...
|
||||
if c.Target == pb.Compare_VALUE {
|
||||
// Always fail if we're comparing a value on a key that doesn't exist.
|
||||
// We can treat non-existence as the empty set explicitly, such that
|
||||
// even a key with a value of length 0 bytes is still a real key
|
||||
// that was written that way
|
||||
return false
|
||||
}
|
||||
}
|
||||
|
||||
// -1 is less, 0 is equal, 1 is greater
|
||||
var result int
|
||||
switch c.Target {
|
||||
case pb.Compare_VALUE:
|
||||
tv, _ := c.TargetUnion.(*pb.Compare_Value)
|
||||
if tv != nil {
|
||||
result = bytes.Compare(ckv.Value, tv.Value)
|
||||
}
|
||||
case pb.Compare_CREATE:
|
||||
tv, _ := c.TargetUnion.(*pb.Compare_CreateRevision)
|
||||
if tv != nil {
|
||||
result = compareInt64(ckv.CreateRevision, tv.CreateRevision)
|
||||
}
|
||||
|
||||
case pb.Compare_MOD:
|
||||
tv, _ := c.TargetUnion.(*pb.Compare_ModRevision)
|
||||
if tv != nil {
|
||||
result = compareInt64(ckv.ModRevision, tv.ModRevision)
|
||||
}
|
||||
case pb.Compare_VERSION:
|
||||
tv, _ := c.TargetUnion.(*pb.Compare_Version)
|
||||
if tv != nil {
|
||||
result = compareInt64(ckv.Version, tv.Version)
|
||||
}
|
||||
}
|
||||
|
||||
switch c.Result {
|
||||
case pb.Compare_EQUAL:
|
||||
return result == 0
|
||||
case pb.Compare_NOT_EQUAL:
|
||||
return result != 0
|
||||
case pb.Compare_GREATER:
|
||||
return result > 0
|
||||
case pb.Compare_LESS:
|
||||
return result < 0
|
||||
}
|
||||
return true
|
||||
}
|
||||
|
||||
func (a *applierV3backend) applyUnion(txn mvcc.TxnWrite, union *pb.RequestOp) *pb.ResponseOp {
|
||||
switch tv := union.Request.(type) {
|
||||
case *pb.RequestOp_RequestRange:
|
||||
if tv.RequestRange != nil {
|
||||
resp, err := a.Range(txn, tv.RequestRange)
|
||||
if err != nil {
|
||||
plog.Panicf("unexpected error during txn: %v", err)
|
||||
}
|
||||
return &pb.ResponseOp{Response: &pb.ResponseOp_ResponseRange{ResponseRange: resp}}
|
||||
}
|
||||
case *pb.RequestOp_RequestPut:
|
||||
if tv.RequestPut != nil {
|
||||
resp, err := a.Put(txn, tv.RequestPut)
|
||||
if err != nil {
|
||||
plog.Panicf("unexpected error during txn: %v", err)
|
||||
}
|
||||
return &pb.ResponseOp{Response: &pb.ResponseOp_ResponsePut{ResponsePut: resp}}
|
||||
}
|
||||
case *pb.RequestOp_RequestDeleteRange:
|
||||
if tv.RequestDeleteRange != nil {
|
||||
resp, err := a.DeleteRange(txn, tv.RequestDeleteRange)
|
||||
if err != nil {
|
||||
plog.Panicf("unexpected error during txn: %v", err)
|
||||
}
|
||||
return &pb.ResponseOp{Response: &pb.ResponseOp_ResponseDeleteRange{ResponseDeleteRange: resp}}
|
||||
}
|
||||
default:
|
||||
// empty union
|
||||
return nil
|
||||
}
|
||||
return nil
|
||||
|
||||
}
|
||||
|
||||
func (a *applierV3backend) Compaction(compaction *pb.CompactionRequest) (*pb.CompactionResponse, <-chan struct{}, error) {
|
||||
resp := &pb.CompactionResponse{}
|
||||
resp.Header = &pb.ResponseHeader{}
|
||||
ch, err := a.s.KV().Compact(compaction.Revision)
|
||||
if err != nil {
|
||||
return nil, ch, err
|
||||
}
|
||||
// get the current revision. which key to get is not important.
|
||||
rr, _ := a.s.KV().Range([]byte("compaction"), nil, mvcc.RangeOptions{})
|
||||
resp.Header.Revision = rr.Rev
|
||||
return resp, ch, err
|
||||
}
|
||||
|
||||
func (a *applierV3backend) LeaseGrant(lc *pb.LeaseGrantRequest) (*pb.LeaseGrantResponse, error) {
|
||||
l, err := a.s.lessor.Grant(lease.LeaseID(lc.ID), lc.TTL)
|
||||
resp := &pb.LeaseGrantResponse{}
|
||||
if err == nil {
|
||||
resp.ID = int64(l.ID)
|
||||
resp.TTL = l.TTL()
|
||||
resp.Header = newHeader(a.s)
|
||||
}
|
||||
return resp, err
|
||||
}
|
||||
|
||||
func (a *applierV3backend) LeaseRevoke(lc *pb.LeaseRevokeRequest) (*pb.LeaseRevokeResponse, error) {
|
||||
err := a.s.lessor.Revoke(lease.LeaseID(lc.ID))
|
||||
return &pb.LeaseRevokeResponse{Header: newHeader(a.s)}, err
|
||||
}
|
||||
|
||||
func (a *applierV3backend) Alarm(ar *pb.AlarmRequest) (*pb.AlarmResponse, error) {
|
||||
resp := &pb.AlarmResponse{}
|
||||
oldCount := len(a.s.alarmStore.Get(ar.Alarm))
|
||||
|
||||
switch ar.Action {
|
||||
case pb.AlarmRequest_GET:
|
||||
resp.Alarms = a.s.alarmStore.Get(ar.Alarm)
|
||||
case pb.AlarmRequest_ACTIVATE:
|
||||
m := a.s.alarmStore.Activate(types.ID(ar.MemberID), ar.Alarm)
|
||||
if m == nil {
|
||||
break
|
||||
}
|
||||
resp.Alarms = append(resp.Alarms, m)
|
||||
activated := oldCount == 0 && len(a.s.alarmStore.Get(m.Alarm)) == 1
|
||||
if !activated {
|
||||
break
|
||||
}
|
||||
|
||||
switch m.Alarm {
|
||||
case pb.AlarmType_NOSPACE:
|
||||
plog.Warningf("alarm raised %+v", m)
|
||||
a.s.applyV3 = newApplierV3Capped(a)
|
||||
default:
|
||||
plog.Errorf("unimplemented alarm activation (%+v)", m)
|
||||
}
|
||||
case pb.AlarmRequest_DEACTIVATE:
|
||||
m := a.s.alarmStore.Deactivate(types.ID(ar.MemberID), ar.Alarm)
|
||||
if m == nil {
|
||||
break
|
||||
}
|
||||
resp.Alarms = append(resp.Alarms, m)
|
||||
deactivated := oldCount > 0 && len(a.s.alarmStore.Get(ar.Alarm)) == 0
|
||||
if !deactivated {
|
||||
break
|
||||
}
|
||||
|
||||
switch m.Alarm {
|
||||
case pb.AlarmType_NOSPACE:
|
||||
plog.Infof("alarm disarmed %+v", ar)
|
||||
a.s.applyV3 = a.s.newApplierV3()
|
||||
default:
|
||||
plog.Errorf("unimplemented alarm deactivation (%+v)", m)
|
||||
}
|
||||
default:
|
||||
return nil, nil
|
||||
}
|
||||
return resp, nil
|
||||
}
|
||||
|
||||
type applierV3Capped struct {
|
||||
applierV3
|
||||
q backendQuota
|
||||
}
|
||||
|
||||
// newApplierV3Capped creates an applyV3 that will reject Puts and transactions
|
||||
// with Puts so that the number of keys in the store is capped.
|
||||
func newApplierV3Capped(base applierV3) applierV3 { return &applierV3Capped{applierV3: base} }
|
||||
|
||||
func (a *applierV3Capped) Put(txn mvcc.TxnWrite, p *pb.PutRequest) (*pb.PutResponse, error) {
|
||||
return nil, ErrNoSpace
|
||||
}
|
||||
|
||||
func (a *applierV3Capped) Txn(r *pb.TxnRequest) (*pb.TxnResponse, error) {
|
||||
if a.q.Cost(r) > 0 {
|
||||
return nil, ErrNoSpace
|
||||
}
|
||||
return a.applierV3.Txn(r)
|
||||
}
|
||||
|
||||
func (a *applierV3Capped) LeaseGrant(lc *pb.LeaseGrantRequest) (*pb.LeaseGrantResponse, error) {
|
||||
return nil, ErrNoSpace
|
||||
}
|
||||
|
||||
func (a *applierV3backend) AuthEnable() (*pb.AuthEnableResponse, error) {
|
||||
err := a.s.AuthStore().AuthEnable()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return &pb.AuthEnableResponse{Header: newHeader(a.s)}, nil
|
||||
}
|
||||
|
||||
func (a *applierV3backend) AuthDisable() (*pb.AuthDisableResponse, error) {
|
||||
a.s.AuthStore().AuthDisable()
|
||||
return &pb.AuthDisableResponse{Header: newHeader(a.s)}, nil
|
||||
}
|
||||
|
||||
func (a *applierV3backend) Authenticate(r *pb.InternalAuthenticateRequest) (*pb.AuthenticateResponse, error) {
|
||||
ctx := context.WithValue(context.WithValue(a.s.ctx, "index", a.s.consistIndex.ConsistentIndex()), "simpleToken", r.SimpleToken)
|
||||
resp, err := a.s.AuthStore().Authenticate(ctx, r.Name, r.Password)
|
||||
if resp != nil {
|
||||
resp.Header = newHeader(a.s)
|
||||
}
|
||||
return resp, err
|
||||
}
|
||||
|
||||
func (a *applierV3backend) UserAdd(r *pb.AuthUserAddRequest) (*pb.AuthUserAddResponse, error) {
|
||||
resp, err := a.s.AuthStore().UserAdd(r)
|
||||
if resp != nil {
|
||||
resp.Header = newHeader(a.s)
|
||||
}
|
||||
return resp, err
|
||||
}
|
||||
|
||||
func (a *applierV3backend) UserDelete(r *pb.AuthUserDeleteRequest) (*pb.AuthUserDeleteResponse, error) {
|
||||
resp, err := a.s.AuthStore().UserDelete(r)
|
||||
if resp != nil {
|
||||
resp.Header = newHeader(a.s)
|
||||
}
|
||||
return resp, err
|
||||
}
|
||||
|
||||
func (a *applierV3backend) UserChangePassword(r *pb.AuthUserChangePasswordRequest) (*pb.AuthUserChangePasswordResponse, error) {
|
||||
resp, err := a.s.AuthStore().UserChangePassword(r)
|
||||
if resp != nil {
|
||||
resp.Header = newHeader(a.s)
|
||||
}
|
||||
return resp, err
|
||||
}
|
||||
|
||||
func (a *applierV3backend) UserGrantRole(r *pb.AuthUserGrantRoleRequest) (*pb.AuthUserGrantRoleResponse, error) {
|
||||
resp, err := a.s.AuthStore().UserGrantRole(r)
|
||||
if resp != nil {
|
||||
resp.Header = newHeader(a.s)
|
||||
}
|
||||
return resp, err
|
||||
}
|
||||
|
||||
func (a *applierV3backend) UserGet(r *pb.AuthUserGetRequest) (*pb.AuthUserGetResponse, error) {
|
||||
resp, err := a.s.AuthStore().UserGet(r)
|
||||
if resp != nil {
|
||||
resp.Header = newHeader(a.s)
|
||||
}
|
||||
return resp, err
|
||||
}
|
||||
|
||||
func (a *applierV3backend) UserRevokeRole(r *pb.AuthUserRevokeRoleRequest) (*pb.AuthUserRevokeRoleResponse, error) {
|
||||
resp, err := a.s.AuthStore().UserRevokeRole(r)
|
||||
if resp != nil {
|
||||
resp.Header = newHeader(a.s)
|
||||
}
|
||||
return resp, err
|
||||
}
|
||||
|
||||
func (a *applierV3backend) RoleAdd(r *pb.AuthRoleAddRequest) (*pb.AuthRoleAddResponse, error) {
|
||||
resp, err := a.s.AuthStore().RoleAdd(r)
|
||||
if resp != nil {
|
||||
resp.Header = newHeader(a.s)
|
||||
}
|
||||
return resp, err
|
||||
}
|
||||
|
||||
func (a *applierV3backend) RoleGrantPermission(r *pb.AuthRoleGrantPermissionRequest) (*pb.AuthRoleGrantPermissionResponse, error) {
|
||||
resp, err := a.s.AuthStore().RoleGrantPermission(r)
|
||||
if resp != nil {
|
||||
resp.Header = newHeader(a.s)
|
||||
}
|
||||
return resp, err
|
||||
}
|
||||
|
||||
func (a *applierV3backend) RoleGet(r *pb.AuthRoleGetRequest) (*pb.AuthRoleGetResponse, error) {
|
||||
resp, err := a.s.AuthStore().RoleGet(r)
|
||||
if resp != nil {
|
||||
resp.Header = newHeader(a.s)
|
||||
}
|
||||
return resp, err
|
||||
}
|
||||
|
||||
func (a *applierV3backend) RoleRevokePermission(r *pb.AuthRoleRevokePermissionRequest) (*pb.AuthRoleRevokePermissionResponse, error) {
|
||||
resp, err := a.s.AuthStore().RoleRevokePermission(r)
|
||||
if resp != nil {
|
||||
resp.Header = newHeader(a.s)
|
||||
}
|
||||
return resp, err
|
||||
}
|
||||
|
||||
func (a *applierV3backend) RoleDelete(r *pb.AuthRoleDeleteRequest) (*pb.AuthRoleDeleteResponse, error) {
|
||||
resp, err := a.s.AuthStore().RoleDelete(r)
|
||||
if resp != nil {
|
||||
resp.Header = newHeader(a.s)
|
||||
}
|
||||
return resp, err
|
||||
}
|
||||
|
||||
func (a *applierV3backend) UserList(r *pb.AuthUserListRequest) (*pb.AuthUserListResponse, error) {
|
||||
resp, err := a.s.AuthStore().UserList(r)
|
||||
if resp != nil {
|
||||
resp.Header = newHeader(a.s)
|
||||
}
|
||||
return resp, err
|
||||
}
|
||||
|
||||
func (a *applierV3backend) RoleList(r *pb.AuthRoleListRequest) (*pb.AuthRoleListResponse, error) {
|
||||
resp, err := a.s.AuthStore().RoleList(r)
|
||||
if resp != nil {
|
||||
resp.Header = newHeader(a.s)
|
||||
}
|
||||
return resp, err
|
||||
}
|
||||
|
||||
type quotaApplierV3 struct {
|
||||
applierV3
|
||||
q Quota
|
||||
}
|
||||
|
||||
func newQuotaApplierV3(s *EtcdServer, app applierV3) applierV3 {
|
||||
return "aApplierV3{app, NewBackendQuota(s)}
|
||||
}
|
||||
|
||||
func (a *quotaApplierV3) Put(txn mvcc.TxnWrite, p *pb.PutRequest) (*pb.PutResponse, error) {
|
||||
ok := a.q.Available(p)
|
||||
resp, err := a.applierV3.Put(txn, p)
|
||||
if err == nil && !ok {
|
||||
err = ErrNoSpace
|
||||
}
|
||||
return resp, err
|
||||
}
|
||||
|
||||
func (a *quotaApplierV3) Txn(rt *pb.TxnRequest) (*pb.TxnResponse, error) {
|
||||
ok := a.q.Available(rt)
|
||||
resp, err := a.applierV3.Txn(rt)
|
||||
if err == nil && !ok {
|
||||
err = ErrNoSpace
|
||||
}
|
||||
return resp, err
|
||||
}
|
||||
|
||||
func (a *quotaApplierV3) LeaseGrant(lc *pb.LeaseGrantRequest) (*pb.LeaseGrantResponse, error) {
|
||||
ok := a.q.Available(lc)
|
||||
resp, err := a.applierV3.LeaseGrant(lc)
|
||||
if err == nil && !ok {
|
||||
err = ErrNoSpace
|
||||
}
|
||||
return resp, err
|
||||
}
|
||||
|
||||
type kvSort struct{ kvs []mvccpb.KeyValue }
|
||||
|
||||
func (s *kvSort) Swap(i, j int) {
|
||||
t := s.kvs[i]
|
||||
s.kvs[i] = s.kvs[j]
|
||||
s.kvs[j] = t
|
||||
}
|
||||
func (s *kvSort) Len() int { return len(s.kvs) }
|
||||
|
||||
type kvSortByKey struct{ *kvSort }
|
||||
|
||||
func (s *kvSortByKey) Less(i, j int) bool {
|
||||
return bytes.Compare(s.kvs[i].Key, s.kvs[j].Key) < 0
|
||||
}
|
||||
|
||||
type kvSortByVersion struct{ *kvSort }
|
||||
|
||||
func (s *kvSortByVersion) Less(i, j int) bool {
|
||||
return (s.kvs[i].Version - s.kvs[j].Version) < 0
|
||||
}
|
||||
|
||||
type kvSortByCreate struct{ *kvSort }
|
||||
|
||||
func (s *kvSortByCreate) Less(i, j int) bool {
|
||||
return (s.kvs[i].CreateRevision - s.kvs[j].CreateRevision) < 0
|
||||
}
|
||||
|
||||
type kvSortByMod struct{ *kvSort }
|
||||
|
||||
func (s *kvSortByMod) Less(i, j int) bool {
|
||||
return (s.kvs[i].ModRevision - s.kvs[j].ModRevision) < 0
|
||||
}
|
||||
|
||||
type kvSortByValue struct{ *kvSort }
|
||||
|
||||
func (s *kvSortByValue) Less(i, j int) bool {
|
||||
return bytes.Compare(s.kvs[i].Value, s.kvs[j].Value) < 0
|
||||
}
|
||||
|
||||
func (a *applierV3backend) checkRequestPut(rv mvcc.ReadView, reqs []*pb.RequestOp) error {
|
||||
for _, requ := range reqs {
|
||||
tv, ok := requ.Request.(*pb.RequestOp_RequestPut)
|
||||
if !ok {
|
||||
continue
|
||||
}
|
||||
preq := tv.RequestPut
|
||||
if preq == nil {
|
||||
continue
|
||||
}
|
||||
if preq.IgnoreValue || preq.IgnoreLease {
|
||||
// expects previous key-value, error if not exist
|
||||
rr, err := rv.Range(preq.Key, nil, mvcc.RangeOptions{})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if rr == nil || len(rr.KVs) == 0 {
|
||||
return ErrKeyNotFound
|
||||
}
|
||||
}
|
||||
if lease.LeaseID(preq.Lease) == lease.NoLease {
|
||||
continue
|
||||
}
|
||||
if l := a.s.lessor.Lookup(lease.LeaseID(preq.Lease)); l == nil {
|
||||
return lease.ErrLeaseNotFound
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func checkRequestRange(rv mvcc.ReadView, reqs []*pb.RequestOp) error {
|
||||
for _, requ := range reqs {
|
||||
tv, ok := requ.Request.(*pb.RequestOp_RequestRange)
|
||||
if !ok {
|
||||
continue
|
||||
}
|
||||
greq := tv.RequestRange
|
||||
if greq == nil || greq.Revision == 0 {
|
||||
continue
|
||||
}
|
||||
|
||||
if greq.Revision > rv.Rev() {
|
||||
return mvcc.ErrFutureRev
|
||||
}
|
||||
if greq.Revision < rv.FirstRev() {
|
||||
return mvcc.ErrCompacted
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func compareInt64(a, b int64) int {
|
||||
switch {
|
||||
case a < b:
|
||||
return -1
|
||||
case a > b:
|
||||
return 1
|
||||
default:
|
||||
return 0
|
||||
}
|
||||
}
|
||||
|
||||
// isGteRange determines if the range end is a >= range. This works around grpc
|
||||
// sending empty byte strings as nil; >= is encoded in the range end as '\0'.
|
||||
func isGteRange(rangeEnd []byte) bool {
|
||||
return len(rangeEnd) == 1 && rangeEnd[0] == 0
|
||||
}
|
||||
|
||||
func noSideEffect(r *pb.InternalRaftRequest) bool {
|
||||
return r.Range != nil || r.AuthUserGet != nil || r.AuthRoleGet != nil
|
||||
}
|
||||
|
||||
func removeNeedlessRangeReqs(txn *pb.TxnRequest) {
|
||||
f := func(ops []*pb.RequestOp) []*pb.RequestOp {
|
||||
j := 0
|
||||
for i := 0; i < len(ops); i++ {
|
||||
if _, ok := ops[i].Request.(*pb.RequestOp_RequestRange); ok {
|
||||
continue
|
||||
}
|
||||
ops[j] = ops[i]
|
||||
j++
|
||||
}
|
||||
|
||||
return ops[:j]
|
||||
}
|
||||
|
||||
txn.Success = f(txn.Success)
|
||||
txn.Failure = f(txn.Failure)
|
||||
}
|
||||
|
||||
func pruneKVs(rr *mvcc.RangeResult, isPrunable func(*mvccpb.KeyValue) bool) {
|
||||
j := 0
|
||||
for i := range rr.KVs {
|
||||
rr.KVs[j] = rr.KVs[i]
|
||||
if !isPrunable(&rr.KVs[i]) {
|
||||
j++
|
||||
}
|
||||
}
|
||||
rr.KVs = rr.KVs[:j]
|
||||
}
|
||||
|
||||
func newHeader(s *EtcdServer) *pb.ResponseHeader {
|
||||
return &pb.ResponseHeader{
|
||||
ClusterId: uint64(s.Cluster().ID()),
|
||||
MemberId: uint64(s.ID()),
|
||||
Revision: s.KV().Rev(),
|
||||
RaftTerm: s.Term(),
|
||||
}
|
||||
}
|
||||
196
vendor/github.com/coreos/etcd/etcdserver/apply_auth.go
generated
vendored
196
vendor/github.com/coreos/etcd/etcdserver/apply_auth.go
generated
vendored
@@ -1,196 +0,0 @@
|
||||
// Copyright 2016 The etcd Authors
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package etcdserver
|
||||
|
||||
import (
|
||||
"sync"
|
||||
|
||||
"github.com/coreos/etcd/auth"
|
||||
pb "github.com/coreos/etcd/etcdserver/etcdserverpb"
|
||||
"github.com/coreos/etcd/mvcc"
|
||||
)
|
||||
|
||||
type authApplierV3 struct {
|
||||
applierV3
|
||||
as auth.AuthStore
|
||||
|
||||
// mu serializes Apply so that user isn't corrupted and so that
|
||||
// serialized requests don't leak data from TOCTOU errors
|
||||
mu sync.Mutex
|
||||
|
||||
authInfo auth.AuthInfo
|
||||
}
|
||||
|
||||
func newAuthApplierV3(as auth.AuthStore, base applierV3) *authApplierV3 {
|
||||
return &authApplierV3{applierV3: base, as: as}
|
||||
}
|
||||
|
||||
func (aa *authApplierV3) Apply(r *pb.InternalRaftRequest) *applyResult {
|
||||
aa.mu.Lock()
|
||||
defer aa.mu.Unlock()
|
||||
if r.Header != nil {
|
||||
// backward-compatible with pre-3.0 releases when internalRaftRequest
|
||||
// does not have header field
|
||||
aa.authInfo.Username = r.Header.Username
|
||||
aa.authInfo.Revision = r.Header.AuthRevision
|
||||
}
|
||||
if needAdminPermission(r) {
|
||||
if err := aa.as.IsAdminPermitted(&aa.authInfo); err != nil {
|
||||
aa.authInfo.Username = ""
|
||||
aa.authInfo.Revision = 0
|
||||
return &applyResult{err: err}
|
||||
}
|
||||
}
|
||||
ret := aa.applierV3.Apply(r)
|
||||
aa.authInfo.Username = ""
|
||||
aa.authInfo.Revision = 0
|
||||
return ret
|
||||
}
|
||||
|
||||
func (aa *authApplierV3) Put(txn mvcc.TxnWrite, r *pb.PutRequest) (*pb.PutResponse, error) {
|
||||
if err := aa.as.IsPutPermitted(&aa.authInfo, r.Key); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if r.PrevKv {
|
||||
err := aa.as.IsRangePermitted(&aa.authInfo, r.Key, nil)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
return aa.applierV3.Put(txn, r)
|
||||
}
|
||||
|
||||
func (aa *authApplierV3) Range(txn mvcc.TxnRead, r *pb.RangeRequest) (*pb.RangeResponse, error) {
|
||||
if err := aa.as.IsRangePermitted(&aa.authInfo, r.Key, r.RangeEnd); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return aa.applierV3.Range(txn, r)
|
||||
}
|
||||
|
||||
func (aa *authApplierV3) DeleteRange(txn mvcc.TxnWrite, r *pb.DeleteRangeRequest) (*pb.DeleteRangeResponse, error) {
|
||||
if err := aa.as.IsDeleteRangePermitted(&aa.authInfo, r.Key, r.RangeEnd); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if r.PrevKv {
|
||||
err := aa.as.IsRangePermitted(&aa.authInfo, r.Key, r.RangeEnd)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
|
||||
return aa.applierV3.DeleteRange(txn, r)
|
||||
}
|
||||
|
||||
func checkTxnReqsPermission(as auth.AuthStore, ai *auth.AuthInfo, reqs []*pb.RequestOp) error {
|
||||
for _, requ := range reqs {
|
||||
switch tv := requ.Request.(type) {
|
||||
case *pb.RequestOp_RequestRange:
|
||||
if tv.RequestRange == nil {
|
||||
continue
|
||||
}
|
||||
|
||||
if err := as.IsRangePermitted(ai, tv.RequestRange.Key, tv.RequestRange.RangeEnd); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
case *pb.RequestOp_RequestPut:
|
||||
if tv.RequestPut == nil {
|
||||
continue
|
||||
}
|
||||
|
||||
if err := as.IsPutPermitted(ai, tv.RequestPut.Key); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
case *pb.RequestOp_RequestDeleteRange:
|
||||
if tv.RequestDeleteRange == nil {
|
||||
continue
|
||||
}
|
||||
|
||||
if tv.RequestDeleteRange.PrevKv {
|
||||
err := as.IsRangePermitted(ai, tv.RequestDeleteRange.Key, tv.RequestDeleteRange.RangeEnd)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
err := as.IsDeleteRangePermitted(ai, tv.RequestDeleteRange.Key, tv.RequestDeleteRange.RangeEnd)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func checkTxnAuth(as auth.AuthStore, ai *auth.AuthInfo, rt *pb.TxnRequest) error {
|
||||
for _, c := range rt.Compare {
|
||||
if err := as.IsRangePermitted(ai, c.Key, nil); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
if err := checkTxnReqsPermission(as, ai, rt.Success); err != nil {
|
||||
return err
|
||||
}
|
||||
if err := checkTxnReqsPermission(as, ai, rt.Failure); err != nil {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (aa *authApplierV3) Txn(rt *pb.TxnRequest) (*pb.TxnResponse, error) {
|
||||
if err := checkTxnAuth(aa.as, &aa.authInfo, rt); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return aa.applierV3.Txn(rt)
|
||||
}
|
||||
|
||||
func needAdminPermission(r *pb.InternalRaftRequest) bool {
|
||||
switch {
|
||||
case r.AuthEnable != nil:
|
||||
return true
|
||||
case r.AuthDisable != nil:
|
||||
return true
|
||||
case r.AuthUserAdd != nil:
|
||||
return true
|
||||
case r.AuthUserDelete != nil:
|
||||
return true
|
||||
case r.AuthUserChangePassword != nil:
|
||||
return true
|
||||
case r.AuthUserGrantRole != nil:
|
||||
return true
|
||||
case r.AuthUserGet != nil:
|
||||
return true
|
||||
case r.AuthUserRevokeRole != nil:
|
||||
return true
|
||||
case r.AuthRoleAdd != nil:
|
||||
return true
|
||||
case r.AuthRoleGrantPermission != nil:
|
||||
return true
|
||||
case r.AuthRoleGet != nil:
|
||||
return true
|
||||
case r.AuthRoleRevokePermission != nil:
|
||||
return true
|
||||
case r.AuthRoleDelete != nil:
|
||||
return true
|
||||
case r.AuthUserList != nil:
|
||||
return true
|
||||
case r.AuthRoleList != nil:
|
||||
return true
|
||||
default:
|
||||
return false
|
||||
}
|
||||
}
|
||||
140
vendor/github.com/coreos/etcd/etcdserver/apply_v2.go
generated
vendored
140
vendor/github.com/coreos/etcd/etcdserver/apply_v2.go
generated
vendored
@@ -1,140 +0,0 @@
|
||||
// Copyright 2016 The etcd Authors
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package etcdserver
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"path"
|
||||
"time"
|
||||
|
||||
"github.com/coreos/etcd/etcdserver/api"
|
||||
pb "github.com/coreos/etcd/etcdserver/etcdserverpb"
|
||||
"github.com/coreos/etcd/etcdserver/membership"
|
||||
"github.com/coreos/etcd/pkg/pbutil"
|
||||
"github.com/coreos/etcd/store"
|
||||
"github.com/coreos/go-semver/semver"
|
||||
)
|
||||
|
||||
// ApplierV2 is the interface for processing V2 raft messages
|
||||
type ApplierV2 interface {
|
||||
Delete(r *pb.Request) Response
|
||||
Post(r *pb.Request) Response
|
||||
Put(r *pb.Request) Response
|
||||
QGet(r *pb.Request) Response
|
||||
Sync(r *pb.Request) Response
|
||||
}
|
||||
|
||||
func NewApplierV2(s store.Store, c *membership.RaftCluster) ApplierV2 {
|
||||
return &applierV2store{store: s, cluster: c}
|
||||
}
|
||||
|
||||
type applierV2store struct {
|
||||
store store.Store
|
||||
cluster *membership.RaftCluster
|
||||
}
|
||||
|
||||
func (a *applierV2store) Delete(r *pb.Request) Response {
|
||||
switch {
|
||||
case r.PrevIndex > 0 || r.PrevValue != "":
|
||||
return toResponse(a.store.CompareAndDelete(r.Path, r.PrevValue, r.PrevIndex))
|
||||
default:
|
||||
return toResponse(a.store.Delete(r.Path, r.Dir, r.Recursive))
|
||||
}
|
||||
}
|
||||
|
||||
func (a *applierV2store) Post(r *pb.Request) Response {
|
||||
return toResponse(a.store.Create(r.Path, r.Dir, r.Val, true, toTTLOptions(r)))
|
||||
}
|
||||
|
||||
func (a *applierV2store) Put(r *pb.Request) Response {
|
||||
ttlOptions := toTTLOptions(r)
|
||||
exists, existsSet := pbutil.GetBool(r.PrevExist)
|
||||
switch {
|
||||
case existsSet:
|
||||
if exists {
|
||||
if r.PrevIndex == 0 && r.PrevValue == "" {
|
||||
return toResponse(a.store.Update(r.Path, r.Val, ttlOptions))
|
||||
}
|
||||
return toResponse(a.store.CompareAndSwap(r.Path, r.PrevValue, r.PrevIndex, r.Val, ttlOptions))
|
||||
}
|
||||
return toResponse(a.store.Create(r.Path, r.Dir, r.Val, false, ttlOptions))
|
||||
case r.PrevIndex > 0 || r.PrevValue != "":
|
||||
return toResponse(a.store.CompareAndSwap(r.Path, r.PrevValue, r.PrevIndex, r.Val, ttlOptions))
|
||||
default:
|
||||
if storeMemberAttributeRegexp.MatchString(r.Path) {
|
||||
id := membership.MustParseMemberIDFromKey(path.Dir(r.Path))
|
||||
var attr membership.Attributes
|
||||
if err := json.Unmarshal([]byte(r.Val), &attr); err != nil {
|
||||
plog.Panicf("unmarshal %s should never fail: %v", r.Val, err)
|
||||
}
|
||||
if a.cluster != nil {
|
||||
a.cluster.UpdateAttributes(id, attr)
|
||||
}
|
||||
// return an empty response since there is no consumer.
|
||||
return Response{}
|
||||
}
|
||||
if r.Path == membership.StoreClusterVersionKey() {
|
||||
if a.cluster != nil {
|
||||
a.cluster.SetVersion(semver.Must(semver.NewVersion(r.Val)), api.UpdateCapability)
|
||||
}
|
||||
// return an empty response since there is no consumer.
|
||||
return Response{}
|
||||
}
|
||||
return toResponse(a.store.Set(r.Path, r.Dir, r.Val, ttlOptions))
|
||||
}
|
||||
}
|
||||
|
||||
func (a *applierV2store) QGet(r *pb.Request) Response {
|
||||
return toResponse(a.store.Get(r.Path, r.Recursive, r.Sorted))
|
||||
}
|
||||
|
||||
func (a *applierV2store) Sync(r *pb.Request) Response {
|
||||
a.store.DeleteExpiredKeys(time.Unix(0, r.Time))
|
||||
return Response{}
|
||||
}
|
||||
|
||||
// applyV2Request interprets r as a call to store.X and returns a Response interpreted
|
||||
// from store.Event
|
||||
func (s *EtcdServer) applyV2Request(r *pb.Request) Response {
|
||||
toTTLOptions(r)
|
||||
switch r.Method {
|
||||
case "POST":
|
||||
return s.applyV2.Post(r)
|
||||
case "PUT":
|
||||
return s.applyV2.Put(r)
|
||||
case "DELETE":
|
||||
return s.applyV2.Delete(r)
|
||||
case "QGET":
|
||||
return s.applyV2.QGet(r)
|
||||
case "SYNC":
|
||||
return s.applyV2.Sync(r)
|
||||
default:
|
||||
// This should never be reached, but just in case:
|
||||
return Response{err: ErrUnknownMethod}
|
||||
}
|
||||
}
|
||||
|
||||
func toTTLOptions(r *pb.Request) store.TTLOptionSet {
|
||||
refresh, _ := pbutil.GetBool(r.Refresh)
|
||||
ttlOptions := store.TTLOptionSet{Refresh: refresh}
|
||||
if r.Expiration != 0 {
|
||||
ttlOptions.ExpireTime = time.Unix(0, r.Expiration)
|
||||
}
|
||||
return ttlOptions
|
||||
}
|
||||
|
||||
func toResponse(ev *store.Event, err error) Response {
|
||||
return Response{Event: ev, err: err}
|
||||
}
|
||||
81
vendor/github.com/coreos/etcd/etcdserver/backend.go
generated
vendored
81
vendor/github.com/coreos/etcd/etcdserver/backend.go
generated
vendored
@@ -1,81 +0,0 @@
|
||||
// Copyright 2017 The etcd Authors
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package etcdserver
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"os"
|
||||
"time"
|
||||
|
||||
"github.com/coreos/etcd/lease"
|
||||
"github.com/coreos/etcd/mvcc"
|
||||
"github.com/coreos/etcd/mvcc/backend"
|
||||
"github.com/coreos/etcd/raft/raftpb"
|
||||
"github.com/coreos/etcd/snap"
|
||||
)
|
||||
|
||||
func newBackend(cfg *ServerConfig) backend.Backend {
|
||||
bcfg := backend.DefaultBackendConfig()
|
||||
bcfg.Path = cfg.backendPath()
|
||||
if cfg.QuotaBackendBytes > 0 && cfg.QuotaBackendBytes != DefaultQuotaBytes {
|
||||
// permit 10% excess over quota for disarm
|
||||
bcfg.MmapSize = uint64(cfg.QuotaBackendBytes + cfg.QuotaBackendBytes/10)
|
||||
}
|
||||
return backend.New(bcfg)
|
||||
}
|
||||
|
||||
// openSnapshotBackend renames a snapshot db to the current etcd db and opens it.
|
||||
func openSnapshotBackend(cfg *ServerConfig, ss *snap.Snapshotter, snapshot raftpb.Snapshot) (backend.Backend, error) {
|
||||
snapPath, err := ss.DBFilePath(snapshot.Metadata.Index)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("database snapshot file path error: %v", err)
|
||||
}
|
||||
if err := os.Rename(snapPath, cfg.backendPath()); err != nil {
|
||||
return nil, fmt.Errorf("rename snapshot file error: %v", err)
|
||||
}
|
||||
return openBackend(cfg), nil
|
||||
}
|
||||
|
||||
// openBackend returns a backend using the current etcd db.
|
||||
func openBackend(cfg *ServerConfig) backend.Backend {
|
||||
fn := cfg.backendPath()
|
||||
beOpened := make(chan backend.Backend)
|
||||
go func() {
|
||||
beOpened <- newBackend(cfg)
|
||||
}()
|
||||
select {
|
||||
case be := <-beOpened:
|
||||
return be
|
||||
case <-time.After(time.Second):
|
||||
plog.Warningf("another etcd process is using %q and holds the file lock.", fn)
|
||||
plog.Warningf("waiting for it to exit before starting...")
|
||||
}
|
||||
return <-beOpened
|
||||
}
|
||||
|
||||
// recoverBackendSnapshot recovers the DB from a snapshot in case etcd crashes
|
||||
// before updating the backend db after persisting raft snapshot to disk,
|
||||
// violating the invariant snapshot.Metadata.Index < db.consistentIndex. In this
|
||||
// case, replace the db with the snapshot db sent by the leader.
|
||||
func recoverSnapshotBackend(cfg *ServerConfig, oldbe backend.Backend, snapshot raftpb.Snapshot) (backend.Backend, error) {
|
||||
var cIndex consistentIndex
|
||||
kv := mvcc.New(oldbe, &lease.FakeLessor{}, &cIndex)
|
||||
defer kv.Close()
|
||||
if snapshot.Metadata.Index <= kv.ConsistentIndex() {
|
||||
return oldbe, nil
|
||||
}
|
||||
oldbe.Close()
|
||||
return openSnapshotBackend(cfg, snap.New(cfg.SnapDir()), snapshot)
|
||||
}
|
||||
258
vendor/github.com/coreos/etcd/etcdserver/cluster_util.go
generated
vendored
258
vendor/github.com/coreos/etcd/etcdserver/cluster_util.go
generated
vendored
@@ -1,258 +0,0 @@
|
||||
// Copyright 2015 The etcd Authors
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package etcdserver
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
"net/http"
|
||||
"sort"
|
||||
"time"
|
||||
|
||||
"github.com/coreos/etcd/etcdserver/membership"
|
||||
"github.com/coreos/etcd/pkg/types"
|
||||
"github.com/coreos/etcd/version"
|
||||
"github.com/coreos/go-semver/semver"
|
||||
)
|
||||
|
||||
// isMemberBootstrapped tries to check if the given member has been bootstrapped
|
||||
// in the given cluster.
|
||||
func isMemberBootstrapped(cl *membership.RaftCluster, member string, rt http.RoundTripper, timeout time.Duration) bool {
|
||||
rcl, err := getClusterFromRemotePeers(getRemotePeerURLs(cl, member), timeout, false, rt)
|
||||
if err != nil {
|
||||
return false
|
||||
}
|
||||
id := cl.MemberByName(member).ID
|
||||
m := rcl.Member(id)
|
||||
if m == nil {
|
||||
return false
|
||||
}
|
||||
if len(m.ClientURLs) > 0 {
|
||||
return true
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
// GetClusterFromRemotePeers takes a set of URLs representing etcd peers, and
|
||||
// attempts to construct a Cluster by accessing the members endpoint on one of
|
||||
// these URLs. The first URL to provide a response is used. If no URLs provide
|
||||
// a response, or a Cluster cannot be successfully created from a received
|
||||
// response, an error is returned.
|
||||
// Each request has a 10-second timeout. Because the upper limit of TTL is 5s,
|
||||
// 10 second is enough for building connection and finishing request.
|
||||
func GetClusterFromRemotePeers(urls []string, rt http.RoundTripper) (*membership.RaftCluster, error) {
|
||||
return getClusterFromRemotePeers(urls, 10*time.Second, true, rt)
|
||||
}
|
||||
|
||||
// If logerr is true, it prints out more error messages.
|
||||
func getClusterFromRemotePeers(urls []string, timeout time.Duration, logerr bool, rt http.RoundTripper) (*membership.RaftCluster, error) {
|
||||
cc := &http.Client{
|
||||
Transport: rt,
|
||||
Timeout: timeout,
|
||||
}
|
||||
for _, u := range urls {
|
||||
resp, err := cc.Get(u + "/members")
|
||||
if err != nil {
|
||||
if logerr {
|
||||
plog.Warningf("could not get cluster response from %s: %v", u, err)
|
||||
}
|
||||
continue
|
||||
}
|
||||
b, err := ioutil.ReadAll(resp.Body)
|
||||
resp.Body.Close()
|
||||
if err != nil {
|
||||
if logerr {
|
||||
plog.Warningf("could not read the body of cluster response: %v", err)
|
||||
}
|
||||
continue
|
||||
}
|
||||
var membs []*membership.Member
|
||||
if err = json.Unmarshal(b, &membs); err != nil {
|
||||
if logerr {
|
||||
plog.Warningf("could not unmarshal cluster response: %v", err)
|
||||
}
|
||||
continue
|
||||
}
|
||||
id, err := types.IDFromString(resp.Header.Get("X-Etcd-Cluster-ID"))
|
||||
if err != nil {
|
||||
if logerr {
|
||||
plog.Warningf("could not parse the cluster ID from cluster res: %v", err)
|
||||
}
|
||||
continue
|
||||
}
|
||||
|
||||
// check the length of membership members
|
||||
// if the membership members are present then prepare and return raft cluster
|
||||
// if membership members are not present then the raft cluster formed will be
|
||||
// an invalid empty cluster hence return failed to get raft cluster member(s) from the given urls error
|
||||
if len(membs) > 0 {
|
||||
return membership.NewClusterFromMembers("", id, membs), nil
|
||||
}
|
||||
|
||||
return nil, fmt.Errorf("failed to get raft cluster member(s) from the given urls.")
|
||||
}
|
||||
return nil, fmt.Errorf("could not retrieve cluster information from the given urls")
|
||||
}
|
||||
|
||||
// getRemotePeerURLs returns peer urls of remote members in the cluster. The
|
||||
// returned list is sorted in ascending lexicographical order.
|
||||
func getRemotePeerURLs(cl *membership.RaftCluster, local string) []string {
|
||||
us := make([]string, 0)
|
||||
for _, m := range cl.Members() {
|
||||
if m.Name == local {
|
||||
continue
|
||||
}
|
||||
us = append(us, m.PeerURLs...)
|
||||
}
|
||||
sort.Strings(us)
|
||||
return us
|
||||
}
|
||||
|
||||
// getVersions returns the versions of the members in the given cluster.
|
||||
// The key of the returned map is the member's ID. The value of the returned map
|
||||
// is the semver versions string, including server and cluster.
|
||||
// If it fails to get the version of a member, the key will be nil.
|
||||
func getVersions(cl *membership.RaftCluster, local types.ID, rt http.RoundTripper) map[string]*version.Versions {
|
||||
members := cl.Members()
|
||||
vers := make(map[string]*version.Versions)
|
||||
for _, m := range members {
|
||||
if m.ID == local {
|
||||
cv := "not_decided"
|
||||
if cl.Version() != nil {
|
||||
cv = cl.Version().String()
|
||||
}
|
||||
vers[m.ID.String()] = &version.Versions{Server: version.Version, Cluster: cv}
|
||||
continue
|
||||
}
|
||||
ver, err := getVersion(m, rt)
|
||||
if err != nil {
|
||||
plog.Warningf("cannot get the version of member %s (%v)", m.ID, err)
|
||||
vers[m.ID.String()] = nil
|
||||
} else {
|
||||
vers[m.ID.String()] = ver
|
||||
}
|
||||
}
|
||||
return vers
|
||||
}
|
||||
|
||||
// decideClusterVersion decides the cluster version based on the versions map.
|
||||
// The returned version is the min server version in the map, or nil if the min
|
||||
// version in unknown.
|
||||
func decideClusterVersion(vers map[string]*version.Versions) *semver.Version {
|
||||
var cv *semver.Version
|
||||
lv := semver.Must(semver.NewVersion(version.Version))
|
||||
|
||||
for mid, ver := range vers {
|
||||
if ver == nil {
|
||||
return nil
|
||||
}
|
||||
v, err := semver.NewVersion(ver.Server)
|
||||
if err != nil {
|
||||
plog.Errorf("cannot understand the version of member %s (%v)", mid, err)
|
||||
return nil
|
||||
}
|
||||
if lv.LessThan(*v) {
|
||||
plog.Warningf("the local etcd version %s is not up-to-date", lv.String())
|
||||
plog.Warningf("member %s has a higher version %s", mid, ver.Server)
|
||||
}
|
||||
if cv == nil {
|
||||
cv = v
|
||||
} else if v.LessThan(*cv) {
|
||||
cv = v
|
||||
}
|
||||
}
|
||||
return cv
|
||||
}
|
||||
|
||||
// isCompatibleWithCluster return true if the local member has a compatible version with
|
||||
// the current running cluster.
|
||||
// The version is considered as compatible when at least one of the other members in the cluster has a
|
||||
// cluster version in the range of [MinClusterVersion, Version] and no known members has a cluster version
|
||||
// out of the range.
|
||||
// We set this rule since when the local member joins, another member might be offline.
|
||||
func isCompatibleWithCluster(cl *membership.RaftCluster, local types.ID, rt http.RoundTripper) bool {
|
||||
vers := getVersions(cl, local, rt)
|
||||
minV := semver.Must(semver.NewVersion(version.MinClusterVersion))
|
||||
maxV := semver.Must(semver.NewVersion(version.Version))
|
||||
maxV = &semver.Version{
|
||||
Major: maxV.Major,
|
||||
Minor: maxV.Minor,
|
||||
}
|
||||
|
||||
return isCompatibleWithVers(vers, local, minV, maxV)
|
||||
}
|
||||
|
||||
func isCompatibleWithVers(vers map[string]*version.Versions, local types.ID, minV, maxV *semver.Version) bool {
|
||||
var ok bool
|
||||
for id, v := range vers {
|
||||
// ignore comparison with local version
|
||||
if id == local.String() {
|
||||
continue
|
||||
}
|
||||
if v == nil {
|
||||
continue
|
||||
}
|
||||
clusterv, err := semver.NewVersion(v.Cluster)
|
||||
if err != nil {
|
||||
plog.Errorf("cannot understand the cluster version of member %s (%v)", id, err)
|
||||
continue
|
||||
}
|
||||
if clusterv.LessThan(*minV) {
|
||||
plog.Warningf("the running cluster version(%v) is lower than the minimal cluster version(%v) supported", clusterv.String(), minV.String())
|
||||
return false
|
||||
}
|
||||
if maxV.LessThan(*clusterv) {
|
||||
plog.Warningf("the running cluster version(%v) is higher than the maximum cluster version(%v) supported", clusterv.String(), maxV.String())
|
||||
return false
|
||||
}
|
||||
ok = true
|
||||
}
|
||||
return ok
|
||||
}
|
||||
|
||||
// getVersion returns the Versions of the given member via its
|
||||
// peerURLs. Returns the last error if it fails to get the version.
|
||||
func getVersion(m *membership.Member, rt http.RoundTripper) (*version.Versions, error) {
|
||||
cc := &http.Client{
|
||||
Transport: rt,
|
||||
}
|
||||
var (
|
||||
err error
|
||||
resp *http.Response
|
||||
)
|
||||
|
||||
for _, u := range m.PeerURLs {
|
||||
resp, err = cc.Get(u + "/version")
|
||||
if err != nil {
|
||||
plog.Warningf("failed to reach the peerURL(%s) of member %s (%v)", u, m.ID, err)
|
||||
continue
|
||||
}
|
||||
var b []byte
|
||||
b, err = ioutil.ReadAll(resp.Body)
|
||||
resp.Body.Close()
|
||||
if err != nil {
|
||||
plog.Warningf("failed to read out the response body from the peerURL(%s) of member %s (%v)", u, m.ID, err)
|
||||
continue
|
||||
}
|
||||
var vers version.Versions
|
||||
if err = json.Unmarshal(b, &vers); err != nil {
|
||||
plog.Warningf("failed to unmarshal the response body got from the peerURL(%s) of member %s (%v)", u, m.ID, err)
|
||||
continue
|
||||
}
|
||||
return &vers, nil
|
||||
}
|
||||
return nil, err
|
||||
}
|
||||
204
vendor/github.com/coreos/etcd/etcdserver/config.go
generated
vendored
204
vendor/github.com/coreos/etcd/etcdserver/config.go
generated
vendored
@@ -1,204 +0,0 @@
|
||||
// Copyright 2015 The etcd Authors
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package etcdserver
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"path/filepath"
|
||||
"sort"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"golang.org/x/net/context"
|
||||
|
||||
"github.com/coreos/etcd/pkg/netutil"
|
||||
"github.com/coreos/etcd/pkg/transport"
|
||||
"github.com/coreos/etcd/pkg/types"
|
||||
)
|
||||
|
||||
// ServerConfig holds the configuration of etcd as taken from the command line or discovery.
|
||||
type ServerConfig struct {
|
||||
Name string
|
||||
DiscoveryURL string
|
||||
DiscoveryProxy string
|
||||
ClientURLs types.URLs
|
||||
PeerURLs types.URLs
|
||||
DataDir string
|
||||
// DedicatedWALDir config will make the etcd to write the WAL to the WALDir
|
||||
// rather than the dataDir/member/wal.
|
||||
DedicatedWALDir string
|
||||
SnapCount uint64
|
||||
MaxSnapFiles uint
|
||||
MaxWALFiles uint
|
||||
InitialPeerURLsMap types.URLsMap
|
||||
InitialClusterToken string
|
||||
NewCluster bool
|
||||
ForceNewCluster bool
|
||||
PeerTLSInfo transport.TLSInfo
|
||||
|
||||
TickMs uint
|
||||
ElectionTicks int
|
||||
BootstrapTimeout time.Duration
|
||||
|
||||
AutoCompactionRetention int
|
||||
QuotaBackendBytes int64
|
||||
|
||||
StrictReconfigCheck bool
|
||||
|
||||
// ClientCertAuthEnabled is true when cert has been signed by the client CA.
|
||||
ClientCertAuthEnabled bool
|
||||
|
||||
AuthToken string
|
||||
}
|
||||
|
||||
// VerifyBootstrap sanity-checks the initial config for bootstrap case
|
||||
// and returns an error for things that should never happen.
|
||||
func (c *ServerConfig) VerifyBootstrap() error {
|
||||
if err := c.hasLocalMember(); err != nil {
|
||||
return err
|
||||
}
|
||||
if err := c.advertiseMatchesCluster(); err != nil {
|
||||
return err
|
||||
}
|
||||
if checkDuplicateURL(c.InitialPeerURLsMap) {
|
||||
return fmt.Errorf("initial cluster %s has duplicate url", c.InitialPeerURLsMap)
|
||||
}
|
||||
if c.InitialPeerURLsMap.String() == "" && c.DiscoveryURL == "" {
|
||||
return fmt.Errorf("initial cluster unset and no discovery URL found")
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// VerifyJoinExisting sanity-checks the initial config for join existing cluster
|
||||
// case and returns an error for things that should never happen.
|
||||
func (c *ServerConfig) VerifyJoinExisting() error {
|
||||
// The member has announced its peer urls to the cluster before starting; no need to
|
||||
// set the configuration again.
|
||||
if err := c.hasLocalMember(); err != nil {
|
||||
return err
|
||||
}
|
||||
if checkDuplicateURL(c.InitialPeerURLsMap) {
|
||||
return fmt.Errorf("initial cluster %s has duplicate url", c.InitialPeerURLsMap)
|
||||
}
|
||||
if c.DiscoveryURL != "" {
|
||||
return fmt.Errorf("discovery URL should not be set when joining existing initial cluster")
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// hasLocalMember checks that the cluster at least contains the local server.
|
||||
func (c *ServerConfig) hasLocalMember() error {
|
||||
if urls := c.InitialPeerURLsMap[c.Name]; urls == nil {
|
||||
return fmt.Errorf("couldn't find local name %q in the initial cluster configuration", c.Name)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// advertiseMatchesCluster confirms peer URLs match those in the cluster peer list.
|
||||
func (c *ServerConfig) advertiseMatchesCluster() error {
|
||||
urls, apurls := c.InitialPeerURLsMap[c.Name], c.PeerURLs.StringSlice()
|
||||
urls.Sort()
|
||||
sort.Strings(apurls)
|
||||
ctx, cancel := context.WithTimeout(context.TODO(), 30*time.Second)
|
||||
defer cancel()
|
||||
if !netutil.URLStringsEqual(ctx, apurls, urls.StringSlice()) {
|
||||
umap := map[string]types.URLs{c.Name: c.PeerURLs}
|
||||
return fmt.Errorf("--initial-cluster must include %s given --initial-advertise-peer-urls=%s", types.URLsMap(umap).String(), strings.Join(apurls, ","))
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (c *ServerConfig) MemberDir() string { return filepath.Join(c.DataDir, "member") }
|
||||
|
||||
func (c *ServerConfig) WALDir() string {
|
||||
if c.DedicatedWALDir != "" {
|
||||
return c.DedicatedWALDir
|
||||
}
|
||||
return filepath.Join(c.MemberDir(), "wal")
|
||||
}
|
||||
|
||||
func (c *ServerConfig) SnapDir() string { return filepath.Join(c.MemberDir(), "snap") }
|
||||
|
||||
func (c *ServerConfig) ShouldDiscover() bool { return c.DiscoveryURL != "" }
|
||||
|
||||
// ReqTimeout returns timeout for request to finish.
|
||||
func (c *ServerConfig) ReqTimeout() time.Duration {
|
||||
// 5s for queue waiting, computation and disk IO delay
|
||||
// + 2 * election timeout for possible leader election
|
||||
return 5*time.Second + 2*time.Duration(c.ElectionTicks)*time.Duration(c.TickMs)*time.Millisecond
|
||||
}
|
||||
|
||||
func (c *ServerConfig) electionTimeout() time.Duration {
|
||||
return time.Duration(c.ElectionTicks) * time.Duration(c.TickMs) * time.Millisecond
|
||||
}
|
||||
|
||||
func (c *ServerConfig) peerDialTimeout() time.Duration {
|
||||
// 1s for queue wait and system delay
|
||||
// + one RTT, which is smaller than 1/5 election timeout
|
||||
return time.Second + time.Duration(c.ElectionTicks)*time.Duration(c.TickMs)*time.Millisecond/5
|
||||
}
|
||||
|
||||
func (c *ServerConfig) PrintWithInitial() { c.print(true) }
|
||||
|
||||
func (c *ServerConfig) Print() { c.print(false) }
|
||||
|
||||
func (c *ServerConfig) print(initial bool) {
|
||||
plog.Infof("name = %s", c.Name)
|
||||
if c.ForceNewCluster {
|
||||
plog.Infof("force new cluster")
|
||||
}
|
||||
plog.Infof("data dir = %s", c.DataDir)
|
||||
plog.Infof("member dir = %s", c.MemberDir())
|
||||
if c.DedicatedWALDir != "" {
|
||||
plog.Infof("dedicated WAL dir = %s", c.DedicatedWALDir)
|
||||
}
|
||||
plog.Infof("heartbeat = %dms", c.TickMs)
|
||||
plog.Infof("election = %dms", c.ElectionTicks*int(c.TickMs))
|
||||
plog.Infof("snapshot count = %d", c.SnapCount)
|
||||
if len(c.DiscoveryURL) != 0 {
|
||||
plog.Infof("discovery URL= %s", c.DiscoveryURL)
|
||||
if len(c.DiscoveryProxy) != 0 {
|
||||
plog.Infof("discovery proxy = %s", c.DiscoveryProxy)
|
||||
}
|
||||
}
|
||||
plog.Infof("advertise client URLs = %s", c.ClientURLs)
|
||||
if initial {
|
||||
plog.Infof("initial advertise peer URLs = %s", c.PeerURLs)
|
||||
plog.Infof("initial cluster = %s", c.InitialPeerURLsMap)
|
||||
}
|
||||
}
|
||||
|
||||
func checkDuplicateURL(urlsmap types.URLsMap) bool {
|
||||
um := make(map[string]bool)
|
||||
for _, urls := range urlsmap {
|
||||
for _, url := range urls {
|
||||
u := url.String()
|
||||
if um[u] {
|
||||
return true
|
||||
}
|
||||
um[u] = true
|
||||
}
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
func (c *ServerConfig) bootstrapTimeout() time.Duration {
|
||||
if c.BootstrapTimeout != 0 {
|
||||
return c.BootstrapTimeout
|
||||
}
|
||||
return time.Second
|
||||
}
|
||||
|
||||
func (c *ServerConfig) backendPath() string { return filepath.Join(c.SnapDir(), "db") }
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user