forked from Ivasoft/traefik
Compare commits
43 Commits
v1.5.0-rc5
...
v1.5.2
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
52bad03c8d | ||
|
|
2fde3e8679 | ||
|
|
1e71f52b72 | ||
|
|
2b1d2853cd | ||
|
|
f07e8f58e6 | ||
|
|
7b19cb5631 | ||
|
|
dbd173b4e4 | ||
|
|
85cfd87c44 | ||
|
|
c867f48f11 | ||
|
|
514f9a7215 | ||
|
|
0b0380b690 | ||
|
|
4d0c8c189a | ||
|
|
afe4c307f9 | ||
|
|
ce3a0fdd46 | ||
|
|
203a5c5c48 | ||
|
|
be4aeaacde | ||
|
|
26dc2f4d61 | ||
|
|
6aac78fc36 | ||
|
|
f6c53f0450 | ||
|
|
54e09b98c7 | ||
|
|
4eebaa1a80 | ||
|
|
cb9bf3ce68 | ||
|
|
49a8cb76f5 | ||
|
|
bf12306f17 | ||
|
|
323b8237a0 | ||
|
|
039ccaf4f1 | ||
|
|
4afb39778a | ||
|
|
751781a3b7 | ||
|
|
f5d150c3b4 | ||
|
|
ae9342208e | ||
|
|
3040d9df0d | ||
|
|
00e0571811 | ||
|
|
bfb07746fe | ||
|
|
171cda6186 | ||
|
|
4cc17e112f | ||
|
|
b6af61fa6e | ||
|
|
4e07d92190 | ||
|
|
fc00e1c228 | ||
|
|
ae34486b57 | ||
|
|
d7b513e9aa | ||
|
|
d8297a055a | ||
|
|
ced5aa5dc6 | ||
|
|
adfa3f795c |
33
.travis.yml
33
.travis.yml
@@ -1,6 +1,9 @@
|
||||
sudo: required
|
||||
dist: trusty
|
||||
|
||||
git:
|
||||
depth: false
|
||||
|
||||
services:
|
||||
- docker
|
||||
|
||||
@@ -21,22 +24,16 @@ before_deploy:
|
||||
sudo -E apt-get -yq update;
|
||||
sudo -E apt-get -yq --no-install-suggests --no-install-recommends --force-yes install docker-ce=${DOCKER_VERSION}*;
|
||||
docker version;
|
||||
pip install --user -r requirements.txt;
|
||||
make -j${N_MAKE_JOBS} crossbinary-parallel;
|
||||
make image-dirty;
|
||||
mkdocs build --clean;
|
||||
tar cfz dist/traefik-${VERSION}.src.tar.gz --exclude-vcs --exclude dist .;
|
||||
make image;
|
||||
if [ "$TRAVIS_TAG" ]; then
|
||||
make -j${N_MAKE_JOBS} crossbinary-parallel;
|
||||
tar cfz dist/traefik-${VERSION}.src.tar.gz --exclude-vcs --exclude dist .;
|
||||
fi;
|
||||
curl -sI https://github.com/containous/structor/releases/latest | grep -Fi Location | tr -d '\r' | sed "s/tag/download/g" | awk -F " " '{ print $2 "/structor_linux-amd64"}' | wget --output-document=$GOPATH/bin/structor -i -;
|
||||
chmod +x $GOPATH/bin/structor;
|
||||
structor -o containous -r traefik --dockerfile-url="https://raw.githubusercontent.com/containous/traefik/master/docs.Dockerfile" --menu.js-url="https://raw.githubusercontent.com/containous/structor/master/traefik-menu.js.gotmpl" --exp-branch=master --debug;
|
||||
fi
|
||||
deploy:
|
||||
- provider: pages
|
||||
edge: true
|
||||
github_token: ${GITHUB_TOKEN}
|
||||
local_dir: site
|
||||
skip_cleanup: true
|
||||
on:
|
||||
repo: containous/traefik
|
||||
tags: true
|
||||
condition: ${TRAVIS_TAG} =~ ^v[0-9]+\.[0-9]+\.[0-9]+$
|
||||
- provider: releases
|
||||
api_key: ${GITHUB_TOKEN}
|
||||
file: dist/traefik*
|
||||
@@ -56,3 +53,11 @@ deploy:
|
||||
skip_cleanup: true
|
||||
on:
|
||||
repo: containous/traefik
|
||||
- provider: pages
|
||||
edge: true
|
||||
github_token: ${GITHUB_TOKEN}
|
||||
local_dir: site
|
||||
skip_cleanup: true
|
||||
on:
|
||||
repo: containous/traefik
|
||||
all_branches: true
|
||||
|
||||
198
CHANGELOG.md
198
CHANGELOG.md
@@ -1,5 +1,203 @@
|
||||
# Change Log
|
||||
|
||||
## [v1.5.2](https://github.com/containous/traefik/tree/v1.5.2) (2018-02-12)
|
||||
[All Commits](https://github.com/containous/traefik/compare/v1.5.1...v1.5.2)
|
||||
|
||||
**Bug fixes:**
|
||||
- **[acme,cluster,kv]** Compress ACME certificates in KV stores. ([#2814](https://github.com/containous/traefik/pull/2814) by [nmengin](https://github.com/nmengin))
|
||||
- **[acme]** Traefik still start when Let's encrypt is down ([#2794](https://github.com/containous/traefik/pull/2794) by [Juliens](https://github.com/Juliens))
|
||||
- **[docker]** Fix dnsrr endpoint mode excluded when not using swarm LB ([#2795](https://github.com/containous/traefik/pull/2795) by [mmatur](https://github.com/mmatur))
|
||||
- **[eureka]** Continue refresh the configuration after a failure. ([#2838](https://github.com/containous/traefik/pull/2838) by [ldez](https://github.com/ldez))
|
||||
- **[logs]** Reduce oxy round trip logs to debug. ([#2821](https://github.com/containous/traefik/pull/2821) by [timoreimann](https://github.com/timoreimann))
|
||||
- **[websocket]** Fix goroutine leaks in websocket ([#2825](https://github.com/containous/traefik/pull/2825) by [Juliens](https://github.com/Juliens))
|
||||
- Hide the pflag error when displaying help. ([#2800](https://github.com/containous/traefik/pull/2800) by [ldez](https://github.com/ldez))
|
||||
|
||||
**Documentation:**
|
||||
- **[docker]** Explain how to write entrypoints definition in a compose file ([#2834](https://github.com/containous/traefik/pull/2834) by [mmatur](https://github.com/mmatur))
|
||||
- **[docker]** Fix typo ([#2813](https://github.com/containous/traefik/pull/2813) by [uschtwill](https://github.com/uschtwill))
|
||||
- **[k8s]** typo in "i"ngress annotations. ([#2780](https://github.com/containous/traefik/pull/2780) by [RRAlex](https://github.com/RRAlex))
|
||||
- Clarify how setting a frontend priority works ([#2818](https://github.com/containous/traefik/pull/2818) by [sirlatrom](https://github.com/sirlatrom))
|
||||
- Fixed typo. ([#2811](https://github.com/containous/traefik/pull/2811) by [sonus21](https://github.com/sonus21))
|
||||
- Docs: regex+replacement hints for URL rewriting ([#2802](https://github.com/containous/traefik/pull/2802) by [djeeg](https://github.com/djeeg))
|
||||
- Add documentation about entry points definition with CLI. ([#2798](https://github.com/containous/traefik/pull/2798) by [ldez](https://github.com/ldez))
|
||||
|
||||
## [v1.5.1](https://github.com/containous/traefik/tree/v1.5.1) (2018-01-29)
|
||||
[All Commits](https://github.com/containous/traefik/compare/v1.5.0...v1.5.1)
|
||||
|
||||
**Bug fixes:**
|
||||
- **[acme]** Handle undefined entrypoint on ACME config and frontend config ([#2756](https://github.com/containous/traefik/pull/2756) by [Juliens](https://github.com/Juliens))
|
||||
- **[k8s]** Fix the k8s redirection template. ([#2748](https://github.com/containous/traefik/pull/2748) by [ldez](https://github.com/ldez))
|
||||
- **[middleware]** Change gzipwriter receiver to implement CloseNotifier ([#2766](https://github.com/containous/traefik/pull/2766) by [Juliens](https://github.com/Juliens))
|
||||
- **[tls]** Fix domain names in dynamic TLS configuration ([#2768](https://github.com/containous/traefik/pull/2768) by [nmengin](https://github.com/nmengin))
|
||||
|
||||
**Documentation:**
|
||||
- **[acme]** Add note on redirect for ACME http challenge ([#2767](https://github.com/containous/traefik/pull/2767) by [Juliens](https://github.com/Juliens))
|
||||
- **[file]** Enhance file provider documentation. ([#2777](https://github.com/containous/traefik/pull/2777) by [ldez](https://github.com/ldez))
|
||||
|
||||
## [v1.5.0](https://github.com/containous/traefik/tree/v1.5.0) (2018-01-23)
|
||||
[All Commits](https://github.com/containous/traefik/compare/v1.4.0-rc1...v1.5.0)
|
||||
|
||||
**Enhancements:**
|
||||
- **[acme,tls]** Rename TLSConfigurations to TLS. ([#2744](https://github.com/containous/traefik/pull/2744) by [ldez](https://github.com/ldez))
|
||||
- **[acme,provider,docker,tls]** Make the TLS certificates management dynamic. ([#2233](https://github.com/containous/traefik/pull/2233) by [nmengin](https://github.com/nmengin))
|
||||
- **[acme]** Add Let's Encrypt HTTP Challenge ([#2701](https://github.com/containous/traefik/pull/2701) by [Juliens](https://github.com/Juliens))
|
||||
- **[acme]** Update github.com/xenolf/lego to 0.4.1 ([#2304](https://github.com/containous/traefik/pull/2304) by [oldmantaiter](https://github.com/oldmantaiter))
|
||||
- **[api,healthcheck,metrics,provider,webui]** Split Web into API/Dashboard, ping, metric and Rest Provider ([#2335](https://github.com/containous/traefik/pull/2335) by [Juliens](https://github.com/Juliens))
|
||||
- **[authentication]** Pass through certain forward auth negative response headers ([#2127](https://github.com/containous/traefik/pull/2127) by [wheresmysocks](https://github.com/wheresmysocks))
|
||||
- **[cluster,consul,file]** Add file to storeconfig ([#2419](https://github.com/containous/traefik/pull/2419) by [emilevauge](https://github.com/emilevauge))
|
||||
- **[cluster,provider]** Support Etcd v3, enhance KV support ([#2407](https://github.com/containous/traefik/pull/2407) by [nmengin](https://github.com/nmengin))
|
||||
- **[docker,k8s,rancher,webui]** Redirect to another entryPoint per frontend ([#2133](https://github.com/containous/traefik/pull/2133) by [SantoDE](https://github.com/SantoDE))
|
||||
- **[docker,k8s,rancher]** Support regex redirect by frontend ([#2570](https://github.com/containous/traefik/pull/2570) by [ldez](https://github.com/ldez))
|
||||
- **[docker]** Add Custom header parsing to Docker Provider ([#2030](https://github.com/containous/traefik/pull/2030) by [dtomcej](https://github.com/dtomcej))
|
||||
- **[docker]** Docker labels ([#2473](https://github.com/containous/traefik/pull/2473) by [ldez](https://github.com/ldez))
|
||||
- **[docker]** Add docker security headers via labels ([#2334](https://github.com/containous/traefik/pull/2334) by [dtomcej](https://github.com/dtomcej))
|
||||
- **[docker]** Use Node IP in Swarm Standalone with "host" NetworkMode ([#2274](https://github.com/containous/traefik/pull/2274) by [BlakeMesdag](https://github.com/BlakeMesdag))
|
||||
- **[ecs]** ECS provider refactoring ([#2050](https://github.com/containous/traefik/pull/2050) by [mmatur](https://github.com/mmatur))
|
||||
- **[ecs]** Add health check label to ECS ([#2421](https://github.com/containous/traefik/pull/2421) by [oldmantaiter](https://github.com/oldmantaiter))
|
||||
- **[ecs]** Support Host NetworkMode for ECS provider ([#2320](https://github.com/containous/traefik/pull/2320) by [FriggaHel](https://github.com/FriggaHel))
|
||||
- **[etcd]** Manage certificates dynamically in kv store ([#2411](https://github.com/containous/traefik/pull/2411) by [dahefanteng](https://github.com/dahefanteng))
|
||||
- **[healthcheck]** Use health check for systemd watchdog ([#2283](https://github.com/containous/traefik/pull/2283) by [guilhem](https://github.com/guilhem))
|
||||
- **[k8s]** Kubernetes security header annotations ([#2460](https://github.com/containous/traefik/pull/2460) by [dtomcej](https://github.com/dtomcej))
|
||||
- **[k8s]** Add labels for `traefik.frontend.entryPoints` & `PassTLSCert` to Kubernetes ([#2324](https://github.com/containous/traefik/pull/2324) by [ryarnyah](https://github.com/ryarnyah))
|
||||
- **[k8s]** Only listen to configured k8s namespaces. ([#1895](https://github.com/containous/traefik/pull/1895) by [timoreimann](https://github.com/timoreimann))
|
||||
- **[logs,middleware,consul,docker]** Use constants from http package. ([#2425](https://github.com/containous/traefik/pull/2425) by [ldez](https://github.com/ldez))
|
||||
- **[logs]** Add json format support for Traefik logs ([#2056](https://github.com/containous/traefik/pull/2056) by [marco-jantke](https://github.com/marco-jantke))
|
||||
- **[marathon]** Marathon constraints filtering ([#2388](https://github.com/containous/traefik/pull/2388) by [aantono](https://github.com/aantono))
|
||||
- **[marathon]** Remove unused lightMarathonClient. ([#2383](https://github.com/containous/traefik/pull/2383) by [timoreimann](https://github.com/timoreimann))
|
||||
- **[metrics]** Add InfluxDB support for traefik metrics ([#2289](https://github.com/containous/traefik/pull/2289) by [adityacs](https://github.com/adityacs))
|
||||
- **[middleware]** Added ReplacePathRegex middleware ([#2033](https://github.com/containous/traefik/pull/2033) by [Tiscs](https://github.com/Tiscs))
|
||||
- **[middleware]** Fix custom headers replacement ([#2455](https://github.com/containous/traefik/pull/2455) by [mmatur](https://github.com/mmatur))
|
||||
- **[oxy]** Resync oxy with original repository ([#2451](https://github.com/containous/traefik/pull/2451) by [Juliens](https://github.com/Juliens))
|
||||
- **[provider]** Support template as raw string. ([#2413](https://github.com/containous/traefik/pull/2413) by [ldez](https://github.com/ldez))
|
||||
- **[rancher]** Run Rancher tests cases in parallel. ([#2424](https://github.com/containous/traefik/pull/2424) by [ldez](https://github.com/ldez))
|
||||
- **[rancher]** Update Rancher API integration to go-rancher client v2. ([#2291](https://github.com/containous/traefik/pull/2291) by [rawmind0](https://github.com/rawmind0))
|
||||
- **[servicefabric]** Add Service Fabric Provider ([#2117](https://github.com/containous/traefik/pull/2117) by [lawrencegripper](https://github.com/lawrencegripper))
|
||||
- **[tls]** Allow adding optional Client CA files ([#2306](https://github.com/containous/traefik/pull/2306) by [nmengin](https://github.com/nmengin))
|
||||
- **[websocket]** Add tests for websocket headers ([#2379](https://github.com/containous/traefik/pull/2379) by [Juliens](https://github.com/Juliens))
|
||||
- Upgrade libkermit/compose version ([#2071](https://github.com/containous/traefik/pull/2071) by [nmengin](https://github.com/nmengin))
|
||||
- Add proxy protocol tests ([#2325](https://github.com/containous/traefik/pull/2325) by [emilevauge](https://github.com/emilevauge))
|
||||
- Register pprof handlers. ([#2428](https://github.com/containous/traefik/pull/2428) by [timoreimann](https://github.com/timoreimann))
|
||||
- Rate limiting for frontends ([#2034](https://github.com/containous/traefik/pull/2034) by [bparli](https://github.com/bparli))
|
||||
- Stats collection. ([#2447](https://github.com/containous/traefik/pull/2447) by [ldez](https://github.com/ldez))
|
||||
- Add request accepting grace period delaying graceful shutdown. ([#1971](https://github.com/containous/traefik/pull/1971) by [timoreimann](https://github.com/timoreimann))
|
||||
- Put subcommand in dedicated files. ([#2265](https://github.com/containous/traefik/pull/2265) by [ldez](https://github.com/ldez))
|
||||
|
||||
**Bug fixes:**
|
||||
- **[acme,docker]** Modify ACME configuration migration into KV store ([#2598](https://github.com/containous/traefik/pull/2598) by [nmengin](https://github.com/nmengin))
|
||||
- **[acme,logs]** Modify DEBUG messages to get ACME certificates ([#2685](https://github.com/containous/traefik/pull/2685) by [nmengin](https://github.com/nmengin))
|
||||
- **[acme]** Modify the ACME renewing logs level ([#2520](https://github.com/containous/traefik/pull/2520) by [nmengin](https://github.com/nmengin))
|
||||
- **[acme]** ACME and corporate proxy. ([#2738](https://github.com/containous/traefik/pull/2738) by [ldez](https://github.com/ldez))
|
||||
- **[acme]** Challenge HTTP must ignore deprecated web.path option ([#2719](https://github.com/containous/traefik/pull/2719) by [Juliens](https://github.com/Juliens))
|
||||
- **[api]** Fix pprof route order. ([#2523](https://github.com/containous/traefik/pull/2523) by [timoreimann](https://github.com/timoreimann))
|
||||
- **[authentication,middleware]** Fix concurrent map writes on digest auth ([#2695](https://github.com/containous/traefik/pull/2695) by [mmatur](https://github.com/mmatur))
|
||||
- **[consulcatalog]** Use prefix for sticky and stickiness tags. ([#2624](https://github.com/containous/traefik/pull/2624) by [ldez](https://github.com/ldez))
|
||||
- **[consulcatalog]** Fix bad Træfik update on Consul Catalog ([#2573](https://github.com/containous/traefik/pull/2573) by [mmatur](https://github.com/mmatur))
|
||||
- **[consulcatalog]** Reload configuration when port change for one service ([#2574](https://github.com/containous/traefik/pull/2574) by [mmatur](https://github.com/mmatur))
|
||||
- **[docker,k8s]** Fix Labels/annotation logs and values. ([#2488](https://github.com/containous/traefik/pull/2488) by [ldez](https://github.com/ldez))
|
||||
- **[docker,k8s]** Change custom headers separator ([#2509](https://github.com/containous/traefik/pull/2509) by [ldez](https://github.com/ldez))
|
||||
- **[docker]** Fix empty IP for backend when dnsrr in Docker swarm mode ([#2490](https://github.com/containous/traefik/pull/2490) by [mmatur](https://github.com/mmatur))
|
||||
- **[docker]** Quote template strings ([#2496](https://github.com/containous/traefik/pull/2496) by [dtomcej](https://github.com/dtomcej))
|
||||
- **[docker]** Return errors from Docker client.Events ([#2689](https://github.com/containous/traefik/pull/2689) by [BlakeMesdag](https://github.com/BlakeMesdag))
|
||||
- **[docker]** Typo in Docker template. ([#2692](https://github.com/containous/traefik/pull/2692) by [ldez](https://github.com/ldez))
|
||||
- **[ecs]** Add missing functions for ECS template ([#2312](https://github.com/containous/traefik/pull/2312) by [oldmantaiter](https://github.com/oldmantaiter))
|
||||
- **[file,tls]** Send empty configuration from file provider ([#2609](https://github.com/containous/traefik/pull/2609) by [nmengin](https://github.com/nmengin))
|
||||
- **[healthcheck]** Fix health check when web is not specified ([#2529](https://github.com/containous/traefik/pull/2529) by [Juliens](https://github.com/Juliens))
|
||||
- **[k8s]** Reduce logs with new Kubernetes security annotations ([#2506](https://github.com/containous/traefik/pull/2506) by [ldez](https://github.com/ldez))
|
||||
- **[k8s]** Add missing entry points template. ([#2594](https://github.com/containous/traefik/pull/2594) by [ldez](https://github.com/ldez))
|
||||
- **[kv]** Fix stickiness bug due to template syntax error ([#2591](https://github.com/containous/traefik/pull/2591) by [dahefanteng](https://github.com/dahefanteng))
|
||||
- **[kv]** List entries parsing. ([#2669](https://github.com/containous/traefik/pull/2669) by [ldez](https://github.com/ldez))
|
||||
- **[logs]** Fix traefik logs to behave like configured ([#2176](https://github.com/containous/traefik/pull/2176) by [marco-jantke](https://github.com/marco-jantke))
|
||||
- **[marathon]** Update go-marathon ([#2585](https://github.com/containous/traefik/pull/2585) by [timoreimann](https://github.com/timoreimann))
|
||||
- **[mesos]** Mesos: Use slave.PID.Host as task SlaveIP. ([#2590](https://github.com/containous/traefik/pull/2590) by [nemosupremo](https://github.com/nemosupremo))
|
||||
- **[metrics]** Fix breaking change in web metrics ([#2725](https://github.com/containous/traefik/pull/2725) by [Juliens](https://github.com/Juliens))
|
||||
- **[metrics]** Do not ignore web params when web.metrics.prometheus is set ([#2499](https://github.com/containous/traefik/pull/2499) by [Juliens](https://github.com/Juliens))
|
||||
- **[metrics]** Fix metrics problem on multiple entrypoints ([#2492](https://github.com/containous/traefik/pull/2492) by [Juliens](https://github.com/Juliens))
|
||||
- **[metrics]** Fix data races. ([#2287](https://github.com/containous/traefik/pull/2287) by [tcolgate](https://github.com/tcolgate))
|
||||
- **[metrics]** Flaky test Influxdb. ([#2386](https://github.com/containous/traefik/pull/2386) by [ldez](https://github.com/ldez))
|
||||
- **[middleware,docker,k8s]** Fix custom headers template ([#2621](https://github.com/containous/traefik/pull/2621) by [ldez](https://github.com/ldez))
|
||||
- **[middleware]** Don't panic if ResponseWriter does not implement CloseNotify ([#2651](https://github.com/containous/traefik/pull/2651) by [Juliens](https://github.com/Juliens))
|
||||
- **[middleware]** GzipResponse must implement CloseNotifier if ResponseWriter implement it ([#2657](https://github.com/containous/traefik/pull/2657) by [Juliens](https://github.com/Juliens))
|
||||
- **[middleware]** Fix RawPath handling in addPrefix ([#2560](https://github.com/containous/traefik/pull/2560) by [risdenk](https://github.com/risdenk))
|
||||
- **[middleware]** We need to flush the end of the body when retry is streamed ([#2644](https://github.com/containous/traefik/pull/2644) by [Juliens](https://github.com/Juliens))
|
||||
- **[provider]** Fix typo in frontend.headers.customresponseheaders label ([#2356](https://github.com/containous/traefik/pull/2356) by [nmandery](https://github.com/nmandery))
|
||||
- **[provider]** Fix concurrent provider config reloads ([#2276](https://github.com/containous/traefik/pull/2276) by [marco-jantke](https://github.com/marco-jantke))
|
||||
- **[rancher]** Don't reload configuration when rancher server is down ([#2706](https://github.com/containous/traefik/pull/2706) by [wacken89](https://github.com/wacken89))
|
||||
- **[rules]** Add non regex pathPrefix ([#2592](https://github.com/containous/traefik/pull/2592) by [emilevauge](https://github.com/emilevauge))
|
||||
- **[servicefabric]** Fix backend name for Stateful services. (Service Fabric) ([#2559](https://github.com/containous/traefik/pull/2559) by [ldez](https://github.com/ldez))
|
||||
- **[servicefabric]** Fix isHealthy logic. ([#2577](https://github.com/containous/traefik/pull/2577) by [ldez](https://github.com/ldez))
|
||||
- **[servicefabric]** Service Fabric 'expose' as boolean. ([#2476](https://github.com/containous/traefik/pull/2476) by [ldez](https://github.com/ldez))
|
||||
- **[tls]** Allow deleting dynamically all TLS certificates from an entryPoint ([#2603](https://github.com/containous/traefik/pull/2603) by [nmengin](https://github.com/nmengin))
|
||||
- **[websocket]** Disable websocket compression ([#2727](https://github.com/containous/traefik/pull/2727) by [Juliens](https://github.com/Juliens))
|
||||
- **[websocket]** Add compression and better error handling ([#2702](https://github.com/containous/traefik/pull/2702) by [Juliens](https://github.com/Juliens))
|
||||
- **[websocket]** Use gorilla readMessage and writeMessage instead of just an io.Copy ([#2650](https://github.com/containous/traefik/pull/2650) by [Juliens](https://github.com/Juliens))
|
||||
- **[websocket]** RawPath and Transfer TLSConfig in websocket ([#2077](https://github.com/containous/traefik/pull/2077) by [Juliens](https://github.com/Juliens))
|
||||
- **[zk]** Change Zookeeper default prefix. ([#2580](https://github.com/containous/traefik/pull/2580) by [ldez](https://github.com/ldez))
|
||||
- Fix wrong default entry point and non-existing entry point issue ([#2501](https://github.com/containous/traefik/pull/2501) by [Juliens](https://github.com/Juliens))
|
||||
- Fix goroutine leak in throttler logic. ([#2739](https://github.com/containous/traefik/pull/2739) by [timoreimann](https://github.com/timoreimann))
|
||||
- Fix timeout integration test ([#2679](https://github.com/containous/traefik/pull/2679) by [ldez](https://github.com/ldez))
|
||||
- Fix frontend redirect ([#2544](https://github.com/containous/traefik/pull/2544) by [ldez](https://github.com/ldez))
|
||||
- Close ring buffer used in throttling function. ([#2532](https://github.com/containous/traefik/pull/2532) by [timoreimann](https://github.com/timoreimann))
|
||||
|
||||
**Documentation:**
|
||||
- **[acme]** Improve documentation for Cloudflare API key ([#2558](https://github.com/containous/traefik/pull/2558) by [mmatur](https://github.com/mmatur))
|
||||
- **[acme]** Update Let's Encrypt provider list ([#2347](https://github.com/containous/traefik/pull/2347) by [mmatur](https://github.com/mmatur))
|
||||
- **[cluster]** Add a clustering example with Docker Swarm ([#2589](https://github.com/containous/traefik/pull/2589) by [jmaitrehenry](https://github.com/jmaitrehenry))
|
||||
- **[consul,consulcatalog]** Split Consul and Consul Catalog documentation ([#2654](https://github.com/containous/traefik/pull/2654) by [ldez](https://github.com/ldez))
|
||||
- **[consul]** Improve Consul documentation ([#2485](https://github.com/containous/traefik/pull/2485) by [mmatur](https://github.com/mmatur))
|
||||
- **[docker/swarm]** Typo in docker.endpoint TCP port. ([#2626](https://github.com/containous/traefik/pull/2626) by [redhandpl](https://github.com/redhandpl))
|
||||
- **[docker]** Fix Docker labels documentation render. ([#2505](https://github.com/containous/traefik/pull/2505) by [ldez](https://github.com/ldez))
|
||||
- **[docker]** Add a note on how to add label to a docker compose file ([#2611](https://github.com/containous/traefik/pull/2611) by [jmaitrehenry](https://github.com/jmaitrehenry))
|
||||
- **[etcd]** Fix typo in examples ([#2446](https://github.com/containous/traefik/pull/2446) by [dahefanteng](https://github.com/dahefanteng))
|
||||
- **[k8s]** Add note to Kubernetes RBAC docs about RoleBindings and namespaces ([#2498](https://github.com/containous/traefik/pull/2498) by [jmara](https://github.com/jmara))
|
||||
- **[k8s]** k8s guide: Leave note about assumed DaemonSet usage. ([#2634](https://github.com/containous/traefik/pull/2634) by [timoreimann](https://github.com/timoreimann))
|
||||
- **[k8s]** Apply various contentual and stylish improvements to the k8s docs. ([#2677](https://github.com/containous/traefik/pull/2677) by [timoreimann](https://github.com/timoreimann))
|
||||
- **[k8s]** Document rewrite-target annotation. ([#2676](https://github.com/containous/traefik/pull/2676) by [timoreimann](https://github.com/timoreimann))
|
||||
- **[k8s]** Remove obsolete links in k8s docs ([#2465](https://github.com/containous/traefik/pull/2465) by [marco-jantke](https://github.com/marco-jantke))
|
||||
- **[k8s]** Document filename parameter for Kubernetes. ([#2464](https://github.com/containous/traefik/pull/2464) by [timoreimann](https://github.com/timoreimann))
|
||||
- **[marathon]** Improve Marathon service label documentation. ([#2635](https://github.com/containous/traefik/pull/2635) by [timoreimann](https://github.com/timoreimann))
|
||||
- **[metrics]** Add entrypoint in Prometheus doc and remove web on Influxdb doc ([#2452](https://github.com/containous/traefik/pull/2452) by [Juliens](https://github.com/Juliens))
|
||||
- **[provider,webui]** Fix redirect problem on dashboard + docs/tests on [web] ([#2686](https://github.com/containous/traefik/pull/2686) by [Juliens](https://github.com/Juliens))
|
||||
- **[servicefabric]** Describe 'refreshSecond' configuration. ([#2471](https://github.com/containous/traefik/pull/2471) by [ldez](https://github.com/ldez))
|
||||
- **[tls]** Fix doc dynamic certificates ([#2737](https://github.com/containous/traefik/pull/2737) by [emilevauge](https://github.com/emilevauge))
|
||||
- **[tls]** Add link to crypto/tls godoc. ([#2470](https://github.com/containous/traefik/pull/2470) by [ldez](https://github.com/ldez))
|
||||
- Move rate limit documentation. ([#2588](https://github.com/containous/traefik/pull/2588) by [ldez](https://github.com/ldez))
|
||||
- Grammar ([#2562](https://github.com/containous/traefik/pull/2562) by [geraldcroes](https://github.com/geraldcroes))
|
||||
- Fix some doc links ([#2731](https://github.com/containous/traefik/pull/2731) by [eldondev](https://github.com/eldondev))
|
||||
- Fix broken links and improve ResponseCodeRatio() description ([#2538](https://github.com/containous/traefik/pull/2538) by [mvasin](https://github.com/mvasin))
|
||||
- Fix typo in anonymous usage log message. ([#2711](https://github.com/containous/traefik/pull/2711) by [Yggdrasil](https://github.com/Yggdrasil))
|
||||
- Fix typos in changelog ([#2387](https://github.com/containous/traefik/pull/2387) by [ferhatelmas](https://github.com/ferhatelmas))
|
||||
- Add mmatur to maintainers ([#2303](https://github.com/containous/traefik/pull/2303) by [emilevauge](https://github.com/emilevauge))
|
||||
- Add a note about redirection rule to precise how regex/replacement work. ([#2243](https://github.com/containous/traefik/pull/2243) by [nmengin](https://github.com/nmengin))
|
||||
- Add docker things for documentation ([#2020](https://github.com/containous/traefik/pull/2020) by [tcoupin](https://github.com/tcoupin))
|
||||
- Prepare release v1.5.0-rc5 ([#2707](https://github.com/containous/traefik/pull/2707) by [mmatur](https://github.com/mmatur))
|
||||
- Prepare release v1.5.0-rc4 ([#2656](https://github.com/containous/traefik/pull/2656) by [Juliens](https://github.com/Juliens))
|
||||
- Prepare release v1.5.0-rc3 ([#2599](https://github.com/containous/traefik/pull/2599) by [ldez](https://github.com/ldez))
|
||||
- Prepare release v1.5.0-rc2 ([#2533](https://github.com/containous/traefik/pull/2533) by [ldez](https://github.com/ldez))
|
||||
- Prepare release v1.5.0-rc1 ([#2480](https://github.com/containous/traefik/pull/2480) by [ldez](https://github.com/ldez))
|
||||
|
||||
**Misc:**
|
||||
- **[acme]** dumpcerts.sh: Fix call to "base64" for Alpine ([#2344](https://github.com/containous/traefik/pull/2344) by [nknapp](https://github.com/nknapp))
|
||||
- **[acme]** dumpcerts.sh: fixed sed, extracted domain keys ([#2161](https://github.com/containous/traefik/pull/2161) by [sjawhar](https://github.com/sjawhar))
|
||||
- **[etcd,kv,tls]** Add tests for TLS dynamic configuration in ETCD3 ([#2606](https://github.com/containous/traefik/pull/2606) by [dahefanteng](https://github.com/dahefanteng))
|
||||
- Upgrade libkermit/compose version ([#2074](https://github.com/containous/traefik/pull/2074) by [nmengin](https://github.com/nmengin))
|
||||
- Merge v1.4.6 into v1.5 ([#2642](https://github.com/containous/traefik/pull/2642) by [ldez](https://github.com/ldez))
|
||||
- Merge v1.4.5 into v1.5 ([#2530](https://github.com/containous/traefik/pull/2530) by [mmatur](https://github.com/mmatur))
|
||||
- Merge current v1.4 into master ([#2479](https://github.com/containous/traefik/pull/2479) by [ldez](https://github.com/ldez))
|
||||
- Merge v1.4.3 into master ([#2415](https://github.com/containous/traefik/pull/2415) by [ldez](https://github.com/ldez))
|
||||
- Merge v1.4.4 into master ([#2457](https://github.com/containous/traefik/pull/2457) by [ldez](https://github.com/ldez))
|
||||
- Merge v1.4.3 into master ([#2406](https://github.com/containous/traefik/pull/2406) by [ldez](https://github.com/ldez))
|
||||
- Revert "Merge v1.4.2 into master" ([#2414](https://github.com/containous/traefik/pull/2414) by [ldez](https://github.com/ldez))
|
||||
- Merge v1.4.2 into master ([#2358](https://github.com/containous/traefik/pull/2358) by [ldez](https://github.com/ldez))
|
||||
- Merge v1.4.1 into master ([#2318](https://github.com/containous/traefik/pull/2318) by [ldez](https://github.com/ldez))
|
||||
- Merge v1.4.0 ([#2271](https://github.com/containous/traefik/pull/2271) by [ldez](https://github.com/ldez))
|
||||
- Merge v1.4.0-rc5 into master ([#2242](https://github.com/containous/traefik/pull/2242) by [ldez](https://github.com/ldez))
|
||||
- Merge v1.4.0-rc4 into master ([#2202](https://github.com/containous/traefik/pull/2202) by [ldez](https://github.com/ldez))
|
||||
- Merge current v1.4 into master ([#2469](https://github.com/containous/traefik/pull/2469) by [ldez](https://github.com/ldez))
|
||||
- Merge current v1.4 ([#2154](https://github.com/containous/traefik/pull/2154) by [ldez](https://github.com/ldez))
|
||||
- Merge v1.4.0-rc3 into master ([#2140](https://github.com/containous/traefik/pull/2140) by [ldez](https://github.com/ldez))
|
||||
- Merge v1.4.0-rc2 into master ([#2092](https://github.com/containous/traefik/pull/2092) by [ldez](https://github.com/ldez))
|
||||
- Merge current 1.4 ([#2064](https://github.com/containous/traefik/pull/2064) by [ldez](https://github.com/ldez))
|
||||
|
||||
## [v1.5.0-rc5](https://github.com/containous/traefik/tree/v1.5.0-rc5) (2018-01-15)
|
||||
[All Commits](https://github.com/containous/traefik/compare/v1.5.0-rc4...v1.5.0-rc5)
|
||||
|
||||
|
||||
@@ -64,7 +64,7 @@ Once your environment is set up and the Træfik repository cloned you can build
|
||||
cd ~/go/src/github.com/containous/traefik
|
||||
|
||||
# Get go-bindata. Please note, the ellipses are required
|
||||
go get github.com/jteeuwen/go-bindata/...
|
||||
go get github.com/containous/go-bindata/...
|
||||
|
||||
# Start build
|
||||
|
||||
@@ -87,9 +87,11 @@ If you happen to update the provider templates (in `/templates`), you need to ru
|
||||
|
||||
[dep](https://github.com/golang/dep) is not required for building; however, it is necessary to modify dependencies (i.e., add, update, or remove third-party packages)
|
||||
|
||||
You need to use [dep](https://github.com/golang/dep) >= O.4.1.
|
||||
|
||||
If you want to add a dependency, use `dep ensure -add` to have [dep](https://github.com/golang/dep) put it into the vendor folder and update the dep manifest/lock files (`Gopkg.toml` and `Gopkg.lock`, respectively).
|
||||
|
||||
A following `make prune-dep` run should be triggered to trim down the size of the vendor folder.
|
||||
A following `make dep-prune` run should be triggered to trim down the size of the vendor folder.
|
||||
The final result must be committed into VCS.
|
||||
|
||||
Here's a full example using dep to add a new dependency:
|
||||
|
||||
11
Gopkg.lock
generated
11
Gopkg.lock
generated
@@ -89,7 +89,7 @@
|
||||
branch = "master"
|
||||
name = "github.com/NYTimes/gziphandler"
|
||||
packages = ["."]
|
||||
revision = "47ca22a0aeea4c9ceddfb935d818d636d934c312"
|
||||
revision = "289a3b81f5aedc99f8d6eb0f67827c142f1310d8"
|
||||
|
||||
[[projects]]
|
||||
name = "github.com/Nvveen/Gotty"
|
||||
@@ -223,8 +223,8 @@
|
||||
[[projects]]
|
||||
name = "github.com/containous/staert"
|
||||
packages = ["."]
|
||||
revision = "af517d5b70db9c4b0505e0144fcc62b054057d2a"
|
||||
version = "v2.0.0"
|
||||
revision = "68c67b32c3a986672d994d38127cd5c78d53eb26"
|
||||
version = "v2.1.0"
|
||||
|
||||
[[projects]]
|
||||
name = "github.com/containous/traefik-extra-service-fabric"
|
||||
@@ -962,7 +962,8 @@
|
||||
"mock",
|
||||
"require"
|
||||
]
|
||||
revision = "4d4bfba8f1d1027c4fdbe371823030df51419987"
|
||||
revision = "69483b4bd14f5845b5a1e55bca19e954e827f1d0"
|
||||
version = "v1.1.4"
|
||||
|
||||
[[projects]]
|
||||
branch = "master"
|
||||
@@ -1027,7 +1028,7 @@
|
||||
"roundrobin",
|
||||
"utils"
|
||||
]
|
||||
revision = "fd0f370c961f6aa304379f4106e76ffe5ed7e97a"
|
||||
revision = "af377749f48ff0ae9974b30ce12a816738b94558"
|
||||
source = "https://github.com/containous/oxy.git"
|
||||
|
||||
[[projects]]
|
||||
|
||||
@@ -64,7 +64,7 @@ ignored = ["github.com/sirupsen/logrus"]
|
||||
|
||||
[[constraint]]
|
||||
name = "github.com/containous/staert"
|
||||
version = "2.0.0"
|
||||
version = "2.1.0"
|
||||
|
||||
[[constraint]]
|
||||
name = "github.com/containous/traefik-extra-service-fabric"
|
||||
@@ -190,3 +190,8 @@ ignored = ["github.com/sirupsen/logrus"]
|
||||
# remove override on master
|
||||
name = "github.com/coreos/bbolt"
|
||||
revision = "32c383e75ce054674c53b5a07e55de85332aee14"
|
||||
|
||||
[prune]
|
||||
non-go = true
|
||||
go-tests = true
|
||||
unused-packages = true
|
||||
|
||||
6
Makefile
6
Makefile
@@ -127,7 +127,11 @@ fmt:
|
||||
pull-images:
|
||||
grep --no-filename -E '^\s+image:' ./integration/resources/compose/*.yml | awk '{print $$2}' | sort | uniq | xargs -P 6 -n 1 docker pull
|
||||
|
||||
prune-dep:
|
||||
dep-ensure:
|
||||
dep ensure -v
|
||||
./script/prune-dep.sh
|
||||
|
||||
dep-prune:
|
||||
./script/prune-dep.sh
|
||||
|
||||
help: ## this help
|
||||
|
||||
20
acme/acme.go
20
acme/acme.go
@@ -114,6 +114,20 @@ type Domain struct {
|
||||
}
|
||||
|
||||
func (a *ACME) init() error {
|
||||
// FIXME temporary fix, waiting for https://github.com/xenolf/lego/pull/478
|
||||
acme.HTTPClient = http.Client{
|
||||
Transport: &http.Transport{
|
||||
Proxy: http.ProxyFromEnvironment,
|
||||
Dial: (&net.Dialer{
|
||||
Timeout: 30 * time.Second,
|
||||
KeepAlive: 30 * time.Second,
|
||||
}).Dial,
|
||||
TLSHandshakeTimeout: 15 * time.Second,
|
||||
ResponseHeaderTimeout: 15 * time.Second,
|
||||
ExpectContinueTimeout: 1 * time.Second,
|
||||
},
|
||||
}
|
||||
|
||||
if a.ACMELogging {
|
||||
acme.Logger = fmtlog.New(os.Stderr, "legolog: ", fmtlog.LstdFlags)
|
||||
} else {
|
||||
@@ -281,6 +295,7 @@ func (a *ACME) leadershipListener(elected bool) error {
|
||||
|
||||
// CreateLocalConfig creates a tls.config using local ACME configuration
|
||||
func (a *ACME) CreateLocalConfig(tlsConfig *tls.Config, certs *safe.Safe, checkOnDemandDomain func(domain string) bool) error {
|
||||
defer a.runJobs()
|
||||
err := a.init()
|
||||
if err != nil {
|
||||
return err
|
||||
@@ -319,7 +334,9 @@ func (a *ACME) CreateLocalConfig(tlsConfig *tls.Config, certs *safe.Safe, checkO
|
||||
|
||||
a.client, err = a.buildACMEClient(account)
|
||||
if err != nil {
|
||||
return err
|
||||
log.Errorf(`Failed to build ACME client: %s
|
||||
Let's Encrypt functionality will be limited until traefik is restarted.`, err)
|
||||
return nil
|
||||
}
|
||||
|
||||
if needRegister {
|
||||
@@ -360,7 +377,6 @@ func (a *ACME) CreateLocalConfig(tlsConfig *tls.Config, certs *safe.Safe, checkO
|
||||
|
||||
a.retrieveCertificates()
|
||||
a.renewCertificates()
|
||||
a.runJobs()
|
||||
|
||||
ticker := time.NewTicker(24 * time.Hour)
|
||||
safe.Go(func() {
|
||||
|
||||
@@ -441,9 +441,9 @@ var _templatesKubernetesTmpl = []byte(`[backends]{{range $backendName, $backend
|
||||
|
||||
{{if $frontend.Redirect}}
|
||||
[frontends."{{$frontendName}}".redirect]
|
||||
entryPoint = "{{$frontend.RedirectEntryPoint}}"
|
||||
regex = "{{$frontend.RedirectRegex}}"
|
||||
replacement = "{{$frontend.RedirectReplacement}}"
|
||||
entryPoint = "{{$frontend.Redirect.EntryPoint}}"
|
||||
regex = "{{$frontend.Redirect.Regex}}"
|
||||
replacement = "{{$frontend.Redirect.Replacement}}"
|
||||
{{end}}
|
||||
|
||||
{{ if $frontend.Headers }}
|
||||
@@ -522,7 +522,7 @@ func templatesKubernetesTmpl() (*asset, error) {
|
||||
|
||||
var _templatesKvTmpl = []byte(`{{$frontends := List .Prefix "/frontends/" }}
|
||||
{{$backends := List .Prefix "/backends/"}}
|
||||
{{$tlsconfiguration := List .Prefix "/tlsconfiguration/"}}
|
||||
{{$tls := List .Prefix "/tls/"}}
|
||||
|
||||
[backends]{{range $backends}}
|
||||
{{$backend := .}}
|
||||
@@ -587,13 +587,13 @@ var _templatesKvTmpl = []byte(`{{$frontends := List .Prefix "/frontends/" }}
|
||||
{{end}}
|
||||
{{end}}
|
||||
|
||||
{{range $tlsconfiguration}}
|
||||
{{range $tls}}
|
||||
{{$entryPoints := SplitGet . "/entrypoints"}}
|
||||
[[tlsConfiguration]]
|
||||
[[tls]]
|
||||
entryPoints = [{{range $entryPoints}}
|
||||
"{{.}}",
|
||||
{{end}}]
|
||||
[tlsConfiguration.certificate]
|
||||
[tls.certificate]
|
||||
certFile = """{{Get "" . "/certificate" "/certfile"}}"""
|
||||
keyFile = """{{Get "" . "/certificate" "/keyfile"}}"""
|
||||
{{end}}
|
||||
|
||||
@@ -4,22 +4,20 @@ RUN apk --update upgrade \
|
||||
&& apk --no-cache --no-progress add git mercurial bash gcc musl-dev curl tar \
|
||||
&& rm -rf /var/cache/apk/*
|
||||
|
||||
RUN go get github.com/jteeuwen/go-bindata/... \
|
||||
RUN go get github.com/containous/go-bindata/... \
|
||||
&& go get github.com/golang/lint/golint \
|
||||
&& go get github.com/kisielk/errcheck \
|
||||
&& go get github.com/client9/misspell/cmd/misspell
|
||||
|
||||
# Which docker version to test on
|
||||
ARG DOCKER_VERSION=17.03.2
|
||||
ARG DEP_VERSION=0.3.2
|
||||
ARG DEP_VERSION=0.4.1
|
||||
|
||||
# Download dep binary to bin folder in $GOPATH
|
||||
RUN mkdir -p /usr/local/bin \
|
||||
&& curl -fsSL -o /usr/local/bin/dep https://github.com/golang/dep/releases/download/v${DEP_VERSION}/dep-linux-amd64 \
|
||||
&& chmod +x /usr/local/bin/dep
|
||||
|
||||
|
||||
|
||||
# Download docker
|
||||
RUN mkdir -p /usr/local/bin \
|
||||
&& curl -fL https://download.docker.com/linux/static/stable/x86_64/docker-${DOCKER_VERSION}-ce.tgz \
|
||||
|
||||
@@ -61,7 +61,8 @@ func NewTraefikDefaultPointersConfiguration() *TraefikConfiguration {
|
||||
// TODO: Deprecated - default Metrics
|
||||
defaultWeb.Metrics = &types.Metrics{
|
||||
Prometheus: &types.Prometheus{
|
||||
Buckets: types.Buckets{0.1, 0.3, 1.2, 5},
|
||||
Buckets: types.Buckets{0.1, 0.3, 1.2, 5},
|
||||
EntryPoint: configuration.DefaultInternalEntryPointName,
|
||||
},
|
||||
Datadog: &types.Datadog{
|
||||
Address: "localhost:8125",
|
||||
@@ -220,7 +221,7 @@ func NewTraefikDefaultPointersConfiguration() *TraefikConfiguration {
|
||||
defaultMetrics := types.Metrics{
|
||||
Prometheus: &types.Prometheus{
|
||||
Buckets: types.Buckets{0.1, 0.3, 1.2, 5},
|
||||
EntryPoint: "traefik",
|
||||
EntryPoint: configuration.DefaultInternalEntryPointName,
|
||||
},
|
||||
Datadog: &types.Datadog{
|
||||
Address: "localhost:8125",
|
||||
|
||||
@@ -28,6 +28,7 @@ import (
|
||||
"github.com/containous/traefik/types"
|
||||
"github.com/containous/traefik/version"
|
||||
"github.com/coreos/go-systemd/daemon"
|
||||
"github.com/ogier/pflag"
|
||||
)
|
||||
|
||||
func main() {
|
||||
@@ -75,6 +76,9 @@ Complete documentation is available at https://traefik.io`,
|
||||
}
|
||||
|
||||
if _, err := f.Parse(usedCmd); err != nil {
|
||||
if err == pflag.ErrHelp {
|
||||
os.Exit(0)
|
||||
}
|
||||
fmtlog.Printf("Error parsing command: %s\n", err)
|
||||
os.Exit(-1)
|
||||
}
|
||||
@@ -142,6 +146,7 @@ func run(globalConfiguration *configuration.GlobalConfiguration, configFile stri
|
||||
http.DefaultTransport.(*http.Transport).Proxy = http.ProxyFromEnvironment
|
||||
|
||||
globalConfiguration.SetEffectiveConfiguration(configFile)
|
||||
globalConfiguration.ValidateConfiguration()
|
||||
|
||||
jsonConf, _ := json.Marshal(globalConfiguration)
|
||||
log.Infof("Traefik version %s built on %s", version.Version, version.BuildDate)
|
||||
@@ -261,14 +266,14 @@ func stats(globalConfiguration *configuration.GlobalConfiguration) {
|
||||
Stats collection is enabled.
|
||||
Many thanks for contributing to Traefik's improvement by allowing us to receive anonymous information from your configuration.
|
||||
Help us improve Traefik by leaving this feature on :)
|
||||
More details on: https://docs.traefik.io/basic/#collected-data
|
||||
More details on: https://docs.traefik.io/basics/#collected-data
|
||||
`)
|
||||
collect(globalConfiguration)
|
||||
} else {
|
||||
log.Info(`
|
||||
Stats collection is disabled.
|
||||
Help us improve Traefik by turning this feature on :)
|
||||
More details on: https://docs.traefik.io/basic/#collected-data
|
||||
More details on: https://docs.traefik.io/basics/#collected-data
|
||||
`)
|
||||
}
|
||||
}
|
||||
|
||||
@@ -259,6 +259,19 @@ func (gc *GlobalConfiguration) SetEffectiveConfiguration(configFile string) {
|
||||
}
|
||||
}
|
||||
|
||||
// ValidateConfiguration validate that configuration is coherent
|
||||
func (gc *GlobalConfiguration) ValidateConfiguration() {
|
||||
if gc.ACME != nil {
|
||||
if _, ok := gc.EntryPoints[gc.ACME.EntryPoint]; !ok {
|
||||
log.Fatalf("Unknown entrypoint %q for ACME configuration", gc.ACME.EntryPoint)
|
||||
} else {
|
||||
if gc.EntryPoints[gc.ACME.EntryPoint].TLS == nil {
|
||||
log.Fatalf("Entrypoint without TLS %q for ACME configuration", gc.ACME.EntryPoint)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// DefaultEntryPoints holds default entry points
|
||||
type DefaultEntryPoints []string
|
||||
|
||||
|
||||
@@ -1,23 +0,0 @@
|
||||
## Current versions documentation
|
||||
|
||||
- [Latest stable](https://docs.traefik.io)
|
||||
|
||||
## Future version documentation
|
||||
|
||||
- [Experimental](https://master--traefik-docs.netlify.com/)
|
||||
|
||||
## Previous versions documentation
|
||||
|
||||
- [v1.5 aka Cancoillotte](http://v1-5.archive.docs.traefik.io/)
|
||||
|
||||
- [v1.4 aka Roquefort](http://v1-4.archive.docs.traefik.io/)
|
||||
|
||||
- [v1.3 aka Raclette](http://v1-3.archive.docs.traefik.io/)
|
||||
|
||||
- [v1.2 aka Morbier](http://v1-2.archive.docs.traefik.io/)
|
||||
|
||||
- [v1.1 aka Camembert](http://v1-1.archive.docs.traefik.io/)
|
||||
|
||||
## More
|
||||
|
||||
[Change log](https://github.com/containous/traefik/blob/master/CHANGELOG.md)
|
||||
@@ -236,7 +236,7 @@ The following rules are both `Matchers` and `Modifiers`, so the `Matcher` portio
|
||||
By default, routes will be sorted (in descending order) using rules length (to avoid path overlap):
|
||||
`PathPrefix:/12345` will be matched before `PathPrefix:/1234` that will be matched before `PathPrefix:/1`.
|
||||
|
||||
You can customize priority by frontend:
|
||||
You can customize priority by frontend. The priority value is added to the rule length during sorting:
|
||||
|
||||
```toml
|
||||
[frontends]
|
||||
@@ -254,7 +254,7 @@ You can customize priority by frontend:
|
||||
rule = "PathPrefix:/toto"
|
||||
```
|
||||
|
||||
Here, `frontend1` will be matched before `frontend2` (`10 > 5`).
|
||||
Here, `frontend1` will be matched before `frontend2` (`(3 + 10 == 13) > (4 + 5 == 9)`).
|
||||
|
||||
#### Custom headers
|
||||
|
||||
@@ -612,6 +612,7 @@ Those data help us prioritize our developments and focus on what's more importan
|
||||
### What ?
|
||||
|
||||
Once a day (the first call begins 10 minutes after the start of Træfik), we collect:
|
||||
|
||||
- the Træfik version
|
||||
- a hash of the configuration
|
||||
- an **anonymous version** of the static configuration:
|
||||
|
||||
@@ -144,6 +144,18 @@ entryPoint = "https"
|
||||
If `HTTP-01` challenge is used, `acme.httpChallenge.entryPoint` has to be defined and reachable by Let's Encrypt through the port 80.
|
||||
These are Let's Encrypt limitations as described on the [community forum](https://community.letsencrypt.org/t/support-for-ports-other-than-80-and-443/3419/72).
|
||||
|
||||
### Let's Encrypt downtime
|
||||
|
||||
Let's Encrypt functionality will be limited until Træfik is restarted.
|
||||
|
||||
If Let's Encrypt is not reachable, these certificates will be used :
|
||||
- ACME certificates already generated before downtime
|
||||
- Expired ACME certificates
|
||||
- Provided certificates
|
||||
|
||||
!!! note
|
||||
Default Træfik certificate will be used instead of ACME certificates for new (sub)domains (which need Let's Encrypt challenge).
|
||||
|
||||
### `storage`
|
||||
|
||||
```toml
|
||||
@@ -153,27 +165,13 @@ storage = "acme.json"
|
||||
# ...
|
||||
```
|
||||
|
||||
File or key used for certificates storage.
|
||||
The `storage` option sets where are stored your ACME certificates.
|
||||
|
||||
**WARNING:** If you use Træfik in Docker, you have 2 options:
|
||||
There are two kind of `storage` :
|
||||
- a JSON file,
|
||||
- a KV store entry.
|
||||
|
||||
- create a file on your host and mount it as a volume:
|
||||
```toml
|
||||
storage = "acme.json"
|
||||
```
|
||||
```bash
|
||||
docker run -v "/my/host/acme.json:acme.json" traefik
|
||||
```
|
||||
|
||||
- mount the folder containing the file as a volume
|
||||
```toml
|
||||
storage = "/etc/traefik/acme/acme.json"
|
||||
```
|
||||
```bash
|
||||
docker run -v "/my/host/acme:/etc/traefik/acme" traefik
|
||||
```
|
||||
|
||||
!!! note
|
||||
!!! danger "DEPRECATED"
|
||||
`storage` replaces `storageFile` which is deprecated.
|
||||
|
||||
!!! note
|
||||
@@ -182,10 +180,53 @@ docker run -v "/my/host/acme:/etc/traefik/acme" traefik
|
||||
- `storageFile` will contain the path to the `acme.json` file to migrate.
|
||||
- `storage` will contain the key where the certificates will be stored.
|
||||
|
||||
#### Store data in a file
|
||||
|
||||
ACME certificates can be stored in a JSON file which with the `600` right mode.
|
||||
|
||||
There are two ways to store ACME certificates in a file from Docker:
|
||||
|
||||
- create a file on your host and mount it as a volume:
|
||||
```toml
|
||||
storage = "acme.json"
|
||||
```
|
||||
```bash
|
||||
docker run -v "/my/host/acme.json:acme.json" traefik
|
||||
```
|
||||
- mount the folder containing the file as a volume
|
||||
```toml
|
||||
storage = "/etc/traefik/acme/acme.json"
|
||||
```
|
||||
```bash
|
||||
docker run -v "/my/host/acme:/etc/traefik/acme" traefik
|
||||
```
|
||||
|
||||
!!! warning
|
||||
This file cannot be shared per many instances of Træfik at the same time.
|
||||
If you have to use Træfik cluster mode, please use [a KV Store entry](/configuration/acme/#storage-kv-entry).
|
||||
|
||||
#### Store data in a KV store entry
|
||||
|
||||
ACME certificates can be stored in a KV Store entry.
|
||||
|
||||
```toml
|
||||
storage = "traefik/acme/account"
|
||||
```
|
||||
|
||||
**This kind of storage is mandatory in cluster mode.**
|
||||
|
||||
Because KV stores (like Consul) have limited entries size, the certificates list is compressed before to be set in a KV store entry.
|
||||
|
||||
!!! note
|
||||
It's possible to store up to approximately 100 ACME certificates in Consul.
|
||||
|
||||
### `acme.httpChallenge`
|
||||
|
||||
Use `HTTP-01` challenge to generate/renew ACME certificates.
|
||||
|
||||
The redirection is fully compatible with the HTTP-01 challenge.
|
||||
You can use redirection with HTTP-01 challenge without problem.
|
||||
|
||||
```toml
|
||||
[acme]
|
||||
# ...
|
||||
@@ -273,7 +314,7 @@ Useful if internal networks block external DNS queries.
|
||||
|
||||
### `onDemand` (Deprecated)
|
||||
|
||||
!!! warning
|
||||
!!! danger "DEPRECATED"
|
||||
This option is deprecated.
|
||||
|
||||
```toml
|
||||
@@ -350,12 +391,12 @@ Each domain & SANs will lead to a certificate request.
|
||||
|
||||
### `dnsProvider` (Deprecated)
|
||||
|
||||
!!! warning
|
||||
!!! danger "DEPRECATED"
|
||||
This option is deprecated.
|
||||
Please refer to [DNS challenge provider section](/configuration/acme/#provider)
|
||||
|
||||
### `delayDontCheckDNS` (Deprecated)
|
||||
|
||||
!!! warning
|
||||
!!! danger "DEPRECATED"
|
||||
This option is deprecated.
|
||||
Please refer to [DNS challenge delayBeforeCheck section](/configuration/acme/#delaybeforecheck)
|
||||
|
||||
@@ -1,6 +1,140 @@
|
||||
# File Backends
|
||||
|
||||
Like any other reverse proxy, Træfik can be configured with a file.
|
||||
Træfik can be configured with a file.
|
||||
|
||||
## Reference
|
||||
|
||||
```toml
|
||||
# Backends
|
||||
[backends]
|
||||
|
||||
[backends.backend1]
|
||||
|
||||
[backends.backend1.servers]
|
||||
[backends.backend1.servers.server0]
|
||||
url = "http://10.10.10.1:80"
|
||||
weight = 1
|
||||
[backends.backend1.servers.server1]
|
||||
url = "http://10.10.10.2:80"
|
||||
weight = 2
|
||||
# ...
|
||||
|
||||
[backends.backend1.circuitBreaker]
|
||||
expression = "NetworkErrorRatio() > 0.5"
|
||||
|
||||
[backends.backend1.loadBalancer]
|
||||
method = "drr"
|
||||
[backends.backend1.loadBalancer.stickiness]
|
||||
cookieName = "foobar"
|
||||
|
||||
[backends.backend1.maxConn]
|
||||
amount = 10
|
||||
extractorfunc = "request.host"
|
||||
|
||||
[backends.backend1.healthCheck]
|
||||
path = "/health"
|
||||
port = 88
|
||||
interval = "30s"
|
||||
|
||||
[backends.backend2]
|
||||
# ...
|
||||
|
||||
# Frontends
|
||||
[frontends]
|
||||
|
||||
[frontends.frontend1]
|
||||
entryPoints = ["http", "https"]
|
||||
backend = "backend1"
|
||||
passHostHeader = true
|
||||
passTLSCert = true
|
||||
priority = 42
|
||||
basicAuth = [
|
||||
"test:$apr1$H6uskkkW$IgXLP6ewTrSuBkTrqE8wj/",
|
||||
"test2:$apr1$d9hr9HBB$4HxwgUir3HP4EsggP/QNo0",
|
||||
]
|
||||
whitelistSourceRange = ["10.42.0.0/16", "152.89.1.33/32", "afed:be44::/16"]
|
||||
|
||||
[frontends.frontend1.routes]
|
||||
[frontends.frontend1.routes.route0]
|
||||
rule = "Host:test.localhost"
|
||||
[frontends.frontend1.routes.Route1]
|
||||
rule = "Method:GET"
|
||||
# ...
|
||||
|
||||
[frontends.frontend1.headers]
|
||||
allowedHosts = ["foobar", "foobar"]
|
||||
hostsProxyHeaders = ["foobar", "foobar"]
|
||||
SSLRedirect = true
|
||||
SSLTemporaryRedirect = true
|
||||
SSLHost = "foobar"
|
||||
STSSeconds = 42
|
||||
STSIncludeSubdomains = true
|
||||
STSPreload = true
|
||||
forceSTSHeader = true
|
||||
frameDeny = true
|
||||
customFrameOptionsValue = "foobar"
|
||||
contentTypeNosniff = true
|
||||
browserXSSFilter = true
|
||||
contentSecurityPolicy = "foobar"
|
||||
publicKey = "foobar"
|
||||
referrerPolicy = "foobar"
|
||||
isDevelopment = true
|
||||
[frontends.frontend1.headers.customRequestHeaders]
|
||||
X-Foo-Bar-01 = "foobar"
|
||||
X-Foo-Bar-02 = "foobar"
|
||||
# ...
|
||||
[frontends.frontend1.headers.customResponseHeaders]
|
||||
X-Foo-Bar-03 = "foobar"
|
||||
X-Foo-Bar-04 = "foobar"
|
||||
# ...
|
||||
[frontends.frontend1.headers.SSLProxyHeaders]
|
||||
X-Foo-Bar-05 = "foobar"
|
||||
X-Foo-Bar-06 = "foobar"
|
||||
# ...
|
||||
|
||||
[frontends.frontend1.errors]
|
||||
[frontends.frontend1.errors.errorPage0]
|
||||
status = ["500-599"]
|
||||
backend = "error"
|
||||
query = "/{status}.html"
|
||||
[frontends.frontend1.errors.errorPage1]
|
||||
status = ["404", "403"]
|
||||
backend = "error"
|
||||
query = "/{status}.html"
|
||||
# ...
|
||||
|
||||
[frontends.frontend1.ratelimit]
|
||||
extractorfunc = "client.ip"
|
||||
[frontends.frontend1.ratelimit.rateset.rateset1]
|
||||
period = "10s"
|
||||
average = 100
|
||||
burst = 200
|
||||
[frontends.frontend1.ratelimit.rateset.rateset2]
|
||||
period = "3s"
|
||||
average = 5
|
||||
burst = 10
|
||||
# ...
|
||||
|
||||
[frontends.frontend1.redirect]
|
||||
entryPoint = "https"
|
||||
regex = "^http://localhost/(.*)"
|
||||
replacement = "http://mydomain/$1"
|
||||
|
||||
[frontends.frontend2]
|
||||
# ...
|
||||
|
||||
# HTTPS certificates
|
||||
[[tls]]
|
||||
entryPoints = ["https"]
|
||||
[tls.certificate]
|
||||
certFile = "path/to/my.cert"
|
||||
keyFile = "path/to/my.key"
|
||||
|
||||
[[tls]]
|
||||
# ...
|
||||
```
|
||||
|
||||
## Configuration mode
|
||||
|
||||
You have three choices:
|
||||
|
||||
@@ -12,7 +146,7 @@ To enable the file backend, you must either pass the `--file` option to the Træ
|
||||
|
||||
The configuration file allows managing both backends/frontends and HTTPS certificates (which are not [Let's Encrypt](https://letsencrypt.org) certificates generated through Træfik).
|
||||
|
||||
## Simple
|
||||
### Simple
|
||||
|
||||
Add your configuration at the end of the global configuration file `traefik.toml`:
|
||||
|
||||
@@ -21,167 +155,93 @@ defaultEntryPoints = ["http", "https"]
|
||||
|
||||
[entryPoints]
|
||||
[entryPoints.http]
|
||||
address = ":80"
|
||||
[entryPoints.http.redirect]
|
||||
entryPoint = "https"
|
||||
# ...
|
||||
[entryPoints.https]
|
||||
address = ":443"
|
||||
[entryPoints.https.tls]
|
||||
[[entryPoints.https.tls.certificates]]
|
||||
certFile = "integration/fixtures/https/snitest.org.cert"
|
||||
keyFile = "integration/fixtures/https/snitest.org.key"
|
||||
# ...
|
||||
|
||||
[file]
|
||||
|
||||
# rules
|
||||
[backends]
|
||||
[backends.backend1]
|
||||
[backends.backend1.circuitbreaker]
|
||||
expression = "NetworkErrorRatio() > 0.5"
|
||||
[backends.backend1.servers.server1]
|
||||
url = "http://172.17.0.2:80"
|
||||
weight = 10
|
||||
[backends.backend1.servers.server2]
|
||||
url = "http://172.17.0.3:80"
|
||||
weight = 1
|
||||
# ...
|
||||
[backends.backend2]
|
||||
[backends.backend2.maxconn]
|
||||
amount = 10
|
||||
extractorfunc = "request.host"
|
||||
[backends.backend2.LoadBalancer]
|
||||
method = "drr"
|
||||
[backends.backend2.servers.server1]
|
||||
url = "http://172.17.0.4:80"
|
||||
weight = 1
|
||||
[backends.backend2.servers.server2]
|
||||
url = "http://172.17.0.5:80"
|
||||
weight = 2
|
||||
# ...
|
||||
|
||||
[frontends]
|
||||
[frontends.frontend1]
|
||||
backend = "backend2"
|
||||
[frontends.frontend1.routes.test_1]
|
||||
rule = "Host:test.localhost"
|
||||
|
||||
# ...
|
||||
[frontends.frontend2]
|
||||
backend = "backend1"
|
||||
passHostHeader = true
|
||||
priority = 10
|
||||
|
||||
# restrict access to this frontend to the specified list of IPv4/IPv6 CIDR Nets
|
||||
# an unset or empty list allows all Source-IPs to access
|
||||
# if one of the Net-Specifications are invalid, the whole list is invalid
|
||||
# and allows all Source-IPs to access.
|
||||
whitelistSourceRange = ["10.42.0.0/16", "152.89.1.33/32", "afed:be44::/16"]
|
||||
|
||||
entrypoints = ["https"] # overrides defaultEntryPoints
|
||||
[frontends.frontend2.routes.test_1]
|
||||
rule = "Host:{subdomain:[a-z]+}.localhost"
|
||||
|
||||
# ...
|
||||
[frontends.frontend3]
|
||||
entrypoints = ["http", "https"] # overrides defaultEntryPoints
|
||||
backend = "backend2"
|
||||
rule = "Path:/test"
|
||||
# ...
|
||||
|
||||
# HTTPS certificate
|
||||
[[tlsConfiguration]]
|
||||
entryPoints = ["https"]
|
||||
[tlsConfiguration.certificate]
|
||||
certFile = "integration/fixtures/https/snitest.com.cert"
|
||||
keyFile = "integration/fixtures/https/snitest.com.key"
|
||||
[[tls]]
|
||||
# ...
|
||||
|
||||
[[tls]]
|
||||
# ...
|
||||
```
|
||||
|
||||
!!! note
|
||||
adding certificates directly to the entrypoint is still maintained but certificates declared in this way cannot be managed dynamically.
|
||||
It's recommended to use the file provider to declare certificates.
|
||||
|
||||
## Rules in a Separate File
|
||||
### Rules in a Separate File
|
||||
|
||||
Put your rules in a separate file, for example `rules.toml`:
|
||||
|
||||
```toml
|
||||
# traefik.toml
|
||||
defaultEntryPoints = ["http", "https"]
|
||||
|
||||
[entryPoints]
|
||||
[entryPoints.http]
|
||||
address = ":80"
|
||||
[entryPoints.http.redirect]
|
||||
entryPoint = "https"
|
||||
# ...
|
||||
[entryPoints.https]
|
||||
address = ":443"
|
||||
[entryPoints.https.tls]
|
||||
# ...
|
||||
|
||||
[file]
|
||||
filename = "rules.toml"
|
||||
filename = "rules.toml"
|
||||
```
|
||||
|
||||
```toml
|
||||
# rules.toml
|
||||
[backends]
|
||||
[backends.backend1]
|
||||
[backends.backend1.circuitbreaker]
|
||||
expression = "NetworkErrorRatio() > 0.5"
|
||||
[backends.backend1.servers.server1]
|
||||
url = "http://172.17.0.2:80"
|
||||
weight = 10
|
||||
[backends.backend1.servers.server2]
|
||||
url = "http://172.17.0.3:80"
|
||||
weight = 1
|
||||
# ...
|
||||
[backends.backend2]
|
||||
[backends.backend2.maxconn]
|
||||
amount = 10
|
||||
extractorfunc = "request.host"
|
||||
[backends.backend2.LoadBalancer]
|
||||
method = "drr"
|
||||
[backends.backend2.servers.server1]
|
||||
url = "http://172.17.0.4:80"
|
||||
weight = 1
|
||||
[backends.backend2.servers.server2]
|
||||
url = "http://172.17.0.5:80"
|
||||
weight = 2
|
||||
# ...
|
||||
|
||||
[frontends]
|
||||
[frontends.frontend1]
|
||||
backend = "backend2"
|
||||
[frontends.frontend1.routes.test_1]
|
||||
rule = "Host:test.localhost"
|
||||
# ...
|
||||
[frontends.frontend2]
|
||||
backend = "backend1"
|
||||
passHostHeader = true
|
||||
priority = 10
|
||||
entrypoints = ["https"] # overrides defaultEntryPoints
|
||||
[frontends.frontend2.routes.test_1]
|
||||
rule = "Host:{subdomain:[a-z]+}.localhost"
|
||||
# ...
|
||||
[frontends.frontend3]
|
||||
entrypoints = ["http", "https"] # overrides defaultEntryPoints
|
||||
backend = "backend2"
|
||||
rule = "Path:/test"
|
||||
|
||||
# HTTPS certificate
|
||||
[[tlsConfiguration]]
|
||||
entryPoints = ["https"]
|
||||
[tlsConfiguration.certificate]
|
||||
certFile = "integration/fixtures/https/snitest.com.cert"
|
||||
keyFile = "integration/fixtures/https/snitest.com.key"
|
||||
# ...
|
||||
|
||||
[[tlsConfiguration]]
|
||||
entryPoints = ["https"]
|
||||
[[tlsConfiguration.certificates]]
|
||||
certFile = "integration/fixtures/https/snitest.org.cert"
|
||||
keyFile = "integration/fixtures/https/snitest.org.key"
|
||||
# HTTPS certificate
|
||||
[[tls]]
|
||||
# ...
|
||||
|
||||
[[tls]]
|
||||
# ...
|
||||
```
|
||||
|
||||
## Multiple `.toml` Files
|
||||
### Multiple `.toml` Files
|
||||
|
||||
You could have multiple `.toml` files in a directory (and recursively in its sub-directories):
|
||||
|
||||
```toml
|
||||
[file]
|
||||
directory = "/path/to/config/"
|
||||
directory = "/path/to/config/"
|
||||
```
|
||||
|
||||
If you want Træfik to watch file changes automatically, just add:
|
||||
|
||||
```toml
|
||||
[file]
|
||||
watch = true
|
||||
watch = true
|
||||
```
|
||||
|
||||
@@ -150,7 +150,7 @@ The following security annotations are applicable on the Ingress object:
|
||||
| `ingress.kubernetes.io/ssl-host:HOST` | This setting configures the hostname that redirects will be based on. Default is "", which is the same host as the request. |
|
||||
| `ingress.kubernetes.io/ssl-proxy-headers:EXPR` | Header combinations that would signify a proper SSL Request (Such as `X-Forwarded-For:https`). Format: <code>HEADER:value||HEADER2:value2</code> |
|
||||
| `ingress.kubernetes.io/hsts-max-age:315360000` | Sets the max-age of the HSTS header. |
|
||||
| `ngress.kubernetes.io/hsts-include-subdomains:true` | Adds the IncludeSubdomains section of the STS header. |
|
||||
| `ingress.kubernetes.io/hsts-include-subdomains:true` | Adds the IncludeSubdomains section of the STS header. |
|
||||
| `ingress.kubernetes.io/hsts-preload:true` | Adds the preload flag to the HSTS header. |
|
||||
| `ingress.kubernetes.io/force-hsts:false` | Adds the STS header to non-SSL requests. |
|
||||
| `ingress.kubernetes.io/frame-deny:false` | Adds the `X-Frame-Options` header with the value of `DENY`. |
|
||||
|
||||
@@ -285,21 +285,17 @@ Multiple sets of rates can be added to each frontend, but the time periods must
|
||||
```toml
|
||||
[frontends]
|
||||
[frontends.frontend1]
|
||||
passHostHeader = true
|
||||
entrypoints = ["http"]
|
||||
backend = "backend1"
|
||||
[frontends.frontend1.routes.test_1]
|
||||
rule = "Path:/"
|
||||
[frontends.frontend1.ratelimit]
|
||||
extractorfunc = "client.ip"
|
||||
[frontends.frontend1.ratelimit.rateset.rateset1]
|
||||
period = "10s"
|
||||
average = 100
|
||||
burst = 200
|
||||
[frontends.frontend1.ratelimit.rateset.rateset2]
|
||||
period = "3s"
|
||||
average = 5
|
||||
burst = 10
|
||||
# ...
|
||||
[frontends.frontend1.ratelimit]
|
||||
extractorfunc = "client.ip"
|
||||
[frontends.frontend1.ratelimit.rateset.rateset1]
|
||||
period = "10s"
|
||||
average = 100
|
||||
burst = 200
|
||||
[frontends.frontend1.ratelimit.rateset.rateset2]
|
||||
period = "3s"
|
||||
average = 5
|
||||
burst = 10
|
||||
```
|
||||
|
||||
In the above example, frontend1 is configured to limit requests by the client's ip address.
|
||||
|
||||
@@ -1,5 +1,120 @@
|
||||
# Entry Points Definition
|
||||
|
||||
## Reference
|
||||
|
||||
### TOML
|
||||
|
||||
```toml
|
||||
[entryPoints]
|
||||
[entryPoints.http]
|
||||
address = ":80"
|
||||
whitelistSourceRange = ["10.42.0.0/16", "152.89.1.33/32", "afed:be44::/16"]
|
||||
compress = true
|
||||
|
||||
[entryPoints.http.tls]
|
||||
minVersion = "VersionTLS12"
|
||||
cipherSuites = ["TLS_RSA_WITH_AES_256_GCM_SHA384"]
|
||||
[[entryPoints.http.tls.certificates]]
|
||||
certFile = "path/to/my.cert"
|
||||
keyFile = "path/to/my.key"
|
||||
[[entryPoints.http.tls.certificates]]
|
||||
certFile = "path/to/other.cert"
|
||||
keyFile = "path/to/other.key"
|
||||
# ...
|
||||
[entryPoints.http.tls.clientCA]
|
||||
files = ["path/to/ca1.crt", "path/to/ca2.crt"]
|
||||
optional = false
|
||||
|
||||
[entryPoints.http.redirect]
|
||||
entryPoint = "https"
|
||||
regex = "^http://localhost/(.*)"
|
||||
replacement = "http://mydomain/$1"
|
||||
|
||||
[entryPoints.http.auth]
|
||||
headerField = "X-WebAuth-User"
|
||||
[entryPoints.http.auth.basic]
|
||||
users = [
|
||||
"test:$apr1$H6uskkkW$IgXLP6ewTrSuBkTrqE8wj/",
|
||||
"test2:$apr1$d9hr9HBB$4HxwgUir3HP4EsggP/QNo0",
|
||||
]
|
||||
usersFile = "/path/to/.htpasswd"
|
||||
[entryPoints.http.auth.digest]
|
||||
users = [
|
||||
"test:traefik:a2688e031edb4be6a3797f3882655c05",
|
||||
"test2:traefik:518845800f9e2bfb1f1f740ec24f074e",
|
||||
]
|
||||
usersFile = "/path/to/.htdigest"
|
||||
[entryPoints.http.auth.forward]
|
||||
address = "https://authserver.com/auth"
|
||||
trustForwardHeader = true
|
||||
[entryPoints.http.auth.forward.tls]
|
||||
ca = [ "path/to/local.crt"]
|
||||
caOptional = true
|
||||
cert = "path/to/foo.cert"
|
||||
key = "path/to/foo.key"
|
||||
insecureSkipVerify = true
|
||||
|
||||
[entryPoints.http.proxyProtocol]
|
||||
insecure = true
|
||||
trustedIPs = ["10.10.10.1", "10.10.10.2"]
|
||||
|
||||
[entryPoints.http.forwardedHeaders]
|
||||
trustedIPs = ["10.10.10.1", "10.10.10.2"]
|
||||
|
||||
[entryPoints.https]
|
||||
# ...
|
||||
```
|
||||
|
||||
### CLI
|
||||
|
||||
For more information about the CLI, see the documentation about [Traefik command](/basics/#traefik).
|
||||
|
||||
```shell
|
||||
--entryPoints='Name:http Address::80'
|
||||
--entryPoints='Name:https Address::443 TLS'
|
||||
```
|
||||
|
||||
!!! note
|
||||
Whitespace is used as option separator and `,` is used as value separator for the list.
|
||||
The names of the options are case-insensitive.
|
||||
|
||||
In compose file the entrypoint syntax is different:
|
||||
|
||||
```yaml
|
||||
traefik:
|
||||
image: traefik
|
||||
command:
|
||||
- --defaultentrypoints=powpow
|
||||
- "--entryPoints=Name:powpow Address::42 Compress:true"
|
||||
```
|
||||
or
|
||||
```yaml
|
||||
traefik:
|
||||
image: traefik
|
||||
command: --defaultentrypoints=powpow --entryPoints='Name:powpow Address::42 Compress:true'
|
||||
```
|
||||
|
||||
#### All available options:
|
||||
|
||||
```ini
|
||||
Name:foo
|
||||
Address::80
|
||||
TLS:goo,gii
|
||||
TLS
|
||||
CA:car
|
||||
CA.Optional:true
|
||||
Redirect.EntryPoint:https
|
||||
Redirect.Regex:http://localhost/(.*)
|
||||
Redirect.Replacement:http://mydomain/$1
|
||||
Compress:true
|
||||
WhiteListSourceRange:10.42.0.0/16,152.89.1.33/32,afed:be44::/16
|
||||
ProxyProtocol.TrustedIPs:192.168.0.1
|
||||
ProxyProtocol.Insecure:tue
|
||||
ForwardedHeaders.TrustedIPs:10.0.0.3/24,20.0.0.3/24
|
||||
```
|
||||
|
||||
## Basic
|
||||
|
||||
```toml
|
||||
# Entrypoints definition
|
||||
#
|
||||
@@ -51,10 +166,16 @@ To redirect an entrypoint rewriting the URL.
|
||||
```
|
||||
|
||||
!!! note
|
||||
Please note that `regex` and `replacement` do not have to be set in the `redirect` structure if an entrypoint is defined for the redirection (they will not be used in this case).
|
||||
Please note that `regex` and `replacement` do not have to be set in the `redirect` structure if an `entrypoint` is defined for the redirection (they will not be used in this case).
|
||||
|
||||
Care should be taken when defining replacement expand variables: `$1x` is equivalent to `${1x}`, not `${1}x` (see [Regexp.Expand](https://golang.org/pkg/regexp/#Regexp.Expand)), so use `${1}` syntax.
|
||||
|
||||
Regular expressions and replacements can be tested using online tools such as [Go Playground](https://play.golang.org/p/mWU9p-wk2ru) or the [Regex101](https://regex101.com/r/58sIgx/2).
|
||||
|
||||
## TLS
|
||||
|
||||
### Static Certificates
|
||||
|
||||
Define an entrypoint with SNI support.
|
||||
|
||||
```toml
|
||||
@@ -70,6 +191,12 @@ Define an entrypoint with SNI support.
|
||||
!!! note
|
||||
If an empty TLS configuration is done, default self-signed certificates are generated.
|
||||
|
||||
|
||||
### Dynamic Certificates
|
||||
|
||||
If you need to add or remove TLS certificates while Traefik is started, Dynamic TLS certificates are supported using the [file provider](/configuration/backends/file).
|
||||
|
||||
|
||||
## TLS Mutual Authentication
|
||||
|
||||
TLS Mutual Authentication can be `optional` or not.
|
||||
@@ -100,9 +227,8 @@ In the example below both `snitest.com` and `snitest.org` will require client ce
|
||||
```
|
||||
|
||||
!!! note
|
||||
|
||||
The deprecated argument `ClientCAFiles` allows adding Client CA files which are mandatory.
|
||||
If this parameter exists, the new ones are not checked.
|
||||
The deprecated argument `ClientCAFiles` allows adding Client CA files which are mandatory.
|
||||
If this parameter exists, the new ones are not checked.
|
||||
|
||||
## Authentication
|
||||
|
||||
@@ -154,7 +280,7 @@ Otherwise, the response from the auth server is returned.
|
||||
# To enable forward auth on an entrypoint
|
||||
[entryPoints.http.auth.forward]
|
||||
address = "https://authserver.com/auth"
|
||||
|
||||
|
||||
# Trust existing X-Forwarded-* headers.
|
||||
# Useful with another reverse proxy in front of Traefik.
|
||||
#
|
||||
@@ -162,7 +288,7 @@ Otherwise, the response from the auth server is returned.
|
||||
# Default: false
|
||||
#
|
||||
trustForwardHeader = true
|
||||
|
||||
|
||||
# Enable forward auth TLS connection.
|
||||
#
|
||||
# Optional
|
||||
@@ -226,7 +352,7 @@ Only IPs in `trustedIPs` will lead to remote client address replacement: you sho
|
||||
|
||||
!!! danger
|
||||
When queuing Træfik behind another load-balancer, be sure to carefully configure Proxy Protocol on both sides.
|
||||
Otherwise, it could introduce a security risk in your system by forging requests.
|
||||
Otherwise, it could introduce a security risk in your system by forging requests.
|
||||
|
||||
```toml
|
||||
[entryPoints]
|
||||
|
||||
@@ -35,14 +35,14 @@ TL;DR:
|
||||
|
||||
```shell
|
||||
$ traefik \
|
||||
--entrypoints=Name:http Address::80 Redirect.EntryPoint:https \
|
||||
--entrypoints=Name:https Address::443 TLS \
|
||||
--entrypoints='Name:http Address::80 Redirect.EntryPoint:https' \
|
||||
--entrypoints='Name:https Address::443 TLS' \
|
||||
--defaultentrypoints=http,https
|
||||
```
|
||||
|
||||
To listen to different ports, we need to create an entry point for each.
|
||||
|
||||
The CLI syntax is `--entrypoints=Name:a_name Address:an_ip_or_empty:a_port options`.
|
||||
The CLI syntax is `--entrypoints='Name:a_name Address:an_ip_or_empty:a_port options'`.
|
||||
If you want to redirect traffic from one entry point to another, it's the option `Redirect.EntryPoint:entrypoint_name`.
|
||||
|
||||
By default, we don't want to configure all our services to listen on http and https, we add a default entry point configuration: `--defaultentrypoints=http,https`.
|
||||
|
||||
@@ -23,3 +23,11 @@ A Træfik cluster is based on a manager/worker model.
|
||||
|
||||
When starting, Træfik will elect a manager.
|
||||
If this instance fails, another manager will be automatically elected.
|
||||
|
||||
## Træfik cluster and Let's Encrypt
|
||||
|
||||
**In cluster mode, ACME certificates have to be stored in [a KV Store entry](/configuration/acme/#storage-kv-entry).**
|
||||
|
||||
Thanks to the Træfik cluster mode algorithm (based on [the Raft Consensus Algorithm](https://raft.github.io/)), only one instance will contact Let's encrypt to solve the challenges.
|
||||
|
||||
The others instances will get ACME certificate from the KV Store entry.
|
||||
@@ -110,7 +110,7 @@ entryPoint = "http"
|
||||
|
||||
This is the minimum configuration required to do the following:
|
||||
|
||||
- Log `ERROR`-level messages (or more severe) to the console, but silence `DEBUG`-level messagse
|
||||
- Log `ERROR`-level messages (or more severe) to the console, but silence `DEBUG`-level messages
|
||||
- Check for new versions of Træfik periodically
|
||||
- Create two entry points, namely an `HTTP` endpoint on port `80`, and an `HTTPS` endpoint on port `443` where all incoming traffic on port `80` will immediately get redirected to `HTTPS`.
|
||||
- Enable the Docker configuration backend and listen for container events on the Docker unix socket we've mounted earlier. However, **new containers will not be exposed by Træfik by default, we'll get into this in a bit!**
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
# Key-value store configuration
|
||||
|
||||
Both [static global configuration](/user-guide/kv-config/#static-configuration-in-key-value-store) and [dynamic](/user-guide/kv-config/#dynamic-configuration-in-key-value-store) configuration can be sorted in a Key-value store.
|
||||
Both [static global configuration](/user-guide/kv-config/#static-configuration-in-key-value-store) and [dynamic](/user-guide/kv-config/#dynamic-configuration-in-key-value-store) configuration can be stored in a Key-value store.
|
||||
|
||||
This section explains how to launch Træfik using a configuration loaded from a Key-value store.
|
||||
|
||||
@@ -274,14 +274,14 @@ Here is the toml configuration we would like to store in the store :
|
||||
backend = "backend2"
|
||||
rule = "Path:/test"
|
||||
|
||||
[[tlsConfiguration]]
|
||||
[[tls]]
|
||||
entryPoints = ["https"]
|
||||
[tlsConfiguration.certificate]
|
||||
[tls.certificate]
|
||||
certFile = "path/to/your.cert"
|
||||
keyFile = "path/to/your.key"
|
||||
[[tlsConfiguration]]
|
||||
[[tls]]
|
||||
entryPoints = ["https","other-https"]
|
||||
[tlsConfiguration.certificate]
|
||||
[tls.certificate]
|
||||
certFile = """-----BEGIN CERTIFICATE-----
|
||||
<cert file content>
|
||||
-----END CERTIFICATE-----"""
|
||||
@@ -335,19 +335,19 @@ And there, the same dynamic configuration in a KV Store (using `prefix = "traefi
|
||||
|
||||
- certificate 1
|
||||
|
||||
| Key | Value |
|
||||
|----------------------------------------------------|--------------------|
|
||||
| `/traefik/tlsconfiguration/1/entrypoints` | `https` |
|
||||
| `/traefik/tlsconfiguration/1/certificate/certfile` | `path/to/your.cert`|
|
||||
| `/traefik/tlsconfiguration/1/certificate/keyfile` | `path/to/your.key` |
|
||||
| Key | Value |
|
||||
|---------------------------------------|--------------------|
|
||||
| `/traefik/tls/1/entrypoints` | `https` |
|
||||
| `/traefik/tls/1/certificate/certfile` | `path/to/your.cert`|
|
||||
| `/traefik/tls/1/certificate/keyfile` | `path/to/your.key` |
|
||||
|
||||
- certificate 2
|
||||
|
||||
| Key | Value |
|
||||
|----------------------------------------------------|-----------------------|
|
||||
| `/traefik/tlsconfiguration/2/entrypoints` | `https,other-https` |
|
||||
| `/traefik/tlsconfiguration/2/certificate/certfile` | `<cert file content>` |
|
||||
| `/traefik/tlsconfiguration/2/certificate/certfile` | `<key file content>` |
|
||||
| Key | Value |
|
||||
|---------------------------------------|-----------------------|
|
||||
| `/traefik/tls/2/entrypoints` | `https,other-https` |
|
||||
| `/traefik/tls/2/certificate/certfile` | `<cert file content>` |
|
||||
| `/traefik/tls/2/certificate/certfile` | `<key file content>` |
|
||||
|
||||
### Atomic configuration changes
|
||||
|
||||
@@ -408,7 +408,7 @@ Here, we have a 50% balance between the `http://172.17.0.3:80` and the `http://1
|
||||
## Store configuration in Key-value store
|
||||
|
||||
!!! note
|
||||
Don't forget to [setup the connection between Træfik and Key-value store](/user-guide/kv-config/#launch-trfk).
|
||||
Don't forget to [setup the connection between Træfik and Key-value store](/user-guide/kv-config/#launch-trfik).
|
||||
|
||||
The static Træfik configuration in a key-value store can be automatically created and updated, using the [`storeconfig` subcommand](/basics/#commands).
|
||||
|
||||
@@ -416,7 +416,7 @@ The static Træfik configuration in a key-value store can be automatically creat
|
||||
traefik storeconfig [flags] ...
|
||||
```
|
||||
This command is here only to automate the [process which upload the configuration into the Key-value store](/user-guide/kv-config/#upload-the-configuration-in-the-key-value-store).
|
||||
Træfik will not start but the [static configuration](/basics/#static-trfk-configuration) will be uploaded into the Key-value store.
|
||||
Træfik will not start but the [static configuration](/basics/#static-trfik-configuration) will be uploaded into the Key-value store.
|
||||
|
||||
If you configured ACME (Let's Encrypt), your registration account and your certificates will also be uploaded.
|
||||
|
||||
|
||||
@@ -29,6 +29,8 @@ services :
|
||||
- bhsm
|
||||
- bmysql
|
||||
- brabbitmq
|
||||
volumes:
|
||||
- "./rate-limit-policies.yml:/go/src/github.com/letsencrypt/boulder/test/rate-limit-policies.yml:ro"
|
||||
|
||||
bhsm:
|
||||
image: letsencrypt/boulder-tools:2016-11-02
|
||||
|
||||
42
examples/acme/rate-limit-policies.yml
Normal file
42
examples/acme/rate-limit-policies.yml
Normal file
@@ -0,0 +1,42 @@
|
||||
totalCertificates:
|
||||
window: 1h
|
||||
threshold: 100000
|
||||
certificatesPerName:
|
||||
window: 1h
|
||||
threshold: 100000
|
||||
overrides:
|
||||
ratelimit.me: 1
|
||||
lim.it: 0
|
||||
# Hostnames used by the letsencrypt client integration test.
|
||||
le.wtf: 10000
|
||||
le1.wtf: 10000
|
||||
le2.wtf: 10000
|
||||
le3.wtf: 10000
|
||||
nginx.wtf: 10000
|
||||
good-caa-reserved.com: 10000
|
||||
bad-caa-reserved.com: 10000
|
||||
ecdsa.le.wtf: 10000
|
||||
must-staple.le.wtf: 10000
|
||||
registrationOverrides:
|
||||
101: 1000
|
||||
registrationsPerIP:
|
||||
window: 1h
|
||||
threshold: 100000
|
||||
overrides:
|
||||
127.0.0.1: 1000000
|
||||
pendingAuthorizationsPerAccount:
|
||||
window: 1h
|
||||
threshold: 100000
|
||||
certificatesPerFQDNSet:
|
||||
window: 1h
|
||||
threshold: 100000
|
||||
overrides:
|
||||
le.wtf: 10000
|
||||
le1.wtf: 10000
|
||||
le2.wtf: 10000
|
||||
le3.wtf: 10000
|
||||
le.wtf,le1.wtf: 10000
|
||||
good-caa-reserved.com: 10000
|
||||
nginx.wtf: 10000
|
||||
ecdsa.le.wtf: 10000
|
||||
must-staple.le.wtf: 10000
|
||||
@@ -73,6 +73,8 @@ services:
|
||||
- bhsm
|
||||
- bmysql
|
||||
- brabbitmq
|
||||
volumes:
|
||||
- "./rate-limit-policies.yml:/go/src/github.com/letsencrypt/boulder/test/rate-limit-policies.yml:ro"
|
||||
networks:
|
||||
net:
|
||||
ipv4_address: 10.0.1.3
|
||||
|
||||
42
examples/cluster/rate-limit-policies.yml
Normal file
42
examples/cluster/rate-limit-policies.yml
Normal file
@@ -0,0 +1,42 @@
|
||||
totalCertificates:
|
||||
window: 1h
|
||||
threshold: 100000
|
||||
certificatesPerName:
|
||||
window: 1h
|
||||
threshold: 100000
|
||||
overrides:
|
||||
ratelimit.me: 1
|
||||
lim.it: 0
|
||||
# Hostnames used by the letsencrypt client integration test.
|
||||
le.wtf: 10000
|
||||
le1.wtf: 10000
|
||||
le2.wtf: 10000
|
||||
le3.wtf: 10000
|
||||
nginx.wtf: 10000
|
||||
good-caa-reserved.com: 10000
|
||||
bad-caa-reserved.com: 10000
|
||||
ecdsa.le.wtf: 10000
|
||||
must-staple.le.wtf: 10000
|
||||
registrationOverrides:
|
||||
101: 1000
|
||||
registrationsPerIP:
|
||||
window: 1h
|
||||
threshold: 100000
|
||||
overrides:
|
||||
127.0.0.1: 1000000
|
||||
pendingAuthorizationsPerAccount:
|
||||
window: 1h
|
||||
threshold: 100000
|
||||
certificatesPerFQDNSet:
|
||||
window: 1h
|
||||
threshold: 100000
|
||||
overrides:
|
||||
le.wtf: 10000
|
||||
le1.wtf: 10000
|
||||
le2.wtf: 10000
|
||||
le3.wtf: 10000
|
||||
le.wtf,le1.wtf: 10000
|
||||
good-caa-reserved.com: 10000
|
||||
nginx.wtf: 10000
|
||||
ecdsa.le.wtf: 10000
|
||||
must-staple.le.wtf: 10000
|
||||
@@ -19,8 +19,7 @@ caServer = "http://traefik.boulder.com:4000/directory"
|
||||
entryPoint="http"
|
||||
|
||||
|
||||
[web]
|
||||
address = ":8080"
|
||||
[api]
|
||||
|
||||
[docker]
|
||||
endpoint = "unix:///var/run/docker.sock"
|
||||
|
||||
@@ -26,11 +26,11 @@ curl -i -H "Accept: application/json" -X PUT -d "Path:/test" ht
|
||||
|
||||
|
||||
# certificate 1
|
||||
curl -i -H "Accept: application/json" -X PUT -d "https" http://localhost:8500/v1/kv/traefik/tlsconfiguration/pair1/entrypoints
|
||||
curl -i -H "Accept: application/json" -X PUT -d "/tmp/test1.crt" http://localhost:8500/v1/kv/traefik/tlsconfiguration/pair1/certificate/certfile
|
||||
curl -i -H "Accept: application/json" -X PUT -d "/tmp/test1.key" http://localhost:8500/v1/kv/traefik/tlsconfiguration/pair1/certificate/keyfile
|
||||
curl -i -H "Accept: application/json" -X PUT -d "https" http://localhost:8500/v1/kv/traefik/tls/pair1/entrypoints
|
||||
curl -i -H "Accept: application/json" -X PUT -d "/tmp/test1.crt" http://localhost:8500/v1/kv/traefik/tls/pair1/certificate/certfile
|
||||
curl -i -H "Accept: application/json" -X PUT -d "/tmp/test1.key" http://localhost:8500/v1/kv/traefik/tls/pair1/certificate/keyfile
|
||||
|
||||
# certificate 2
|
||||
curl -i -H "Accept: application/json" -X PUT -d "http,https" http://localhost:8500/v1/kv/traefik/tlsconfiguration/pair2/entrypoints
|
||||
curl -i -H "Accept: application/json" -X PUT -d "/tmp/test2.crt" http://localhost:8500/v1/kv/traefik/tlsconfiguration/pair2/certificate/certfile
|
||||
curl -i -H "Accept: application/json" -X PUT -d "/tmp/test2.key" http://localhost:8500/v1/kv/traefik/tlsconfiguration/pair2/certificate/keyfile
|
||||
curl -i -H "Accept: application/json" -X PUT -d "http,https" http://localhost:8500/v1/kv/traefik/tls/pair2/entrypoints
|
||||
curl -i -H "Accept: application/json" -X PUT -d "/tmp/test2.crt" http://localhost:8500/v1/kv/traefik/tls/pair2/certificate/certfile
|
||||
curl -i -H "Accept: application/json" -X PUT -d "/tmp/test2.key" http://localhost:8500/v1/kv/traefik/tls/pair2/certificate/keyfile
|
||||
|
||||
@@ -28,14 +28,14 @@ function insert_etcd2_data() {
|
||||
curl -i -H "Accept: application/json" -X PUT -d value="Path:/test" http://localhost:2379/v2/keys/traefik/frontends/frontend2/routes/test_2/rule
|
||||
|
||||
# certificate 1
|
||||
curl -i -H "Accept: application/json" -X PUT -d value="https" http://localhost:2379/v2/keys/traefik/tlsconfiguration/pair1/entrypoints
|
||||
curl -i -H "Accept: application/json" -X PUT -d value="/tmp/test1.crt" http://localhost:2379/v2/keys/traefik/tlsconfiguration/pair1/certificate/certfile
|
||||
curl -i -H "Accept: application/json" -X PUT -d value="/tmp/test1.key" http://localhost:2379/v2/keys/traefik/tlsconfiguration/pair1/certificate/keyfile
|
||||
curl -i -H "Accept: application/json" -X PUT -d value="https" http://localhost:2379/v2/keys/traefik/tls/pair1/entrypoints
|
||||
curl -i -H "Accept: application/json" -X PUT -d value="/tmp/test1.crt" http://localhost:2379/v2/keys/traefik/tls/pair1/certificate/certfile
|
||||
curl -i -H "Accept: application/json" -X PUT -d value="/tmp/test1.key" http://localhost:2379/v2/keys/traefik/tls/pair1/certificate/keyfile
|
||||
|
||||
# certificate 2
|
||||
curl -i -H "Accept: application/json" -X PUT -d value="http,https" http://localhost:2379/v2/keys/traefik/tlsconfiguration/pair2/entrypoints
|
||||
curl -i -H "Accept: application/json" -X PUT -d value="/tmp/test2.crt" http://localhost:2379/v2/keys/traefik/tlsconfiguration/pair2/certificate/certfile
|
||||
curl -i -H "Accept: application/json" -X PUT -d value="/tmp/test2.key" http://localhost:2379/v2/keys/traefik/tlsconfiguration/pair2/certificate/keyfile
|
||||
curl -i -H "Accept: application/json" -X PUT -d value="http,https" http://localhost:2379/v2/keys/traefik/tls/pair2/entrypoints
|
||||
curl -i -H "Accept: application/json" -X PUT -d value="/tmp/test2.crt" http://localhost:2379/v2/keys/traefik/tls/pair2/certificate/certfile
|
||||
curl -i -H "Accept: application/json" -X PUT -d value="/tmp/test2.key" http://localhost:2379/v2/keys/traefik/tls/pair2/certificate/keyfile
|
||||
}
|
||||
|
||||
#
|
||||
@@ -71,14 +71,14 @@ function insert_etcd3_data() {
|
||||
docker container run --rm -ti -e ETCDCTL_DIAL_="TIMEOUT 10s" -e ETCDCTL_API="3" tenstartups/etcdctl --endpoints=[$etcd_ip:2379] put "/traefik/frontends/frontend2/routes/test_2/rule" "Path:/test"
|
||||
|
||||
# certificate 1
|
||||
docker container run --rm -ti -e ETCDCTL_DIAL_="TIMEOUT 10s" -e ETCDCTL_API="3" tenstartups/etcdctl --endpoints=[$etcd_ip:2379] put "/traefik/tlsconfiguration/pair1/entrypoints" "https"
|
||||
docker container run --rm -ti -e ETCDCTL_DIAL_="TIMEOUT 10s" -e ETCDCTL_API="3" tenstartups/etcdctl --endpoints=[$etcd_ip:2379] put "/traefik/tlsconfiguration/pair1/certificate/certfile" "/tmp/test1.crt"
|
||||
docker container run --rm -ti -e ETCDCTL_DIAL_="TIMEOUT 10s" -e ETCDCTL_API="3" tenstartups/etcdctl --endpoints=[$etcd_ip:2379] put "/traefik/tlsconfiguration/pair1/certificate/keyfile" "/tmp/test1.key"
|
||||
docker container run --rm -ti -e ETCDCTL_DIAL_="TIMEOUT 10s" -e ETCDCTL_API="3" tenstartups/etcdctl --endpoints=[$etcd_ip:2379] put "/traefik/tls/pair1/entrypoints" "https"
|
||||
docker container run --rm -ti -e ETCDCTL_DIAL_="TIMEOUT 10s" -e ETCDCTL_API="3" tenstartups/etcdctl --endpoints=[$etcd_ip:2379] put "/traefik/tls/pair1/certificate/certfile" "/tmp/test1.crt"
|
||||
docker container run --rm -ti -e ETCDCTL_DIAL_="TIMEOUT 10s" -e ETCDCTL_API="3" tenstartups/etcdctl --endpoints=[$etcd_ip:2379] put "/traefik/tls/pair1/certificate/keyfile" "/tmp/test1.key"
|
||||
|
||||
# certificate 2
|
||||
docker container run --rm -ti -e ETCDCTL_DIAL_="TIMEOUT 10s" -e ETCDCTL_API="3" tenstartups/etcdctl --endpoints=[$etcd_ip:2379] put "/traefik/tlsconfiguration/pair2/entrypoints" "https"
|
||||
docker container run --rm -ti -e ETCDCTL_DIAL_="TIMEOUT 10s" -e ETCDCTL_API="3" tenstartups/etcdctl --endpoints=[$etcd_ip:2379] put "/traefik/tlsconfiguration/pair2/certificate/certfile" "/tmp/test2.crt"
|
||||
docker container run --rm -ti -e ETCDCTL_DIAL_="TIMEOUT 10s" -e ETCDCTL_API="3" tenstartups/etcdctl --endpoints=[$etcd_ip:2379] put "/traefik/tlsconfiguration/pair2/certificate/keyfile" "/tmp/test2.key"
|
||||
docker container run --rm -ti -e ETCDCTL_DIAL_="TIMEOUT 10s" -e ETCDCTL_API="3" tenstartups/etcdctl --endpoints=[$etcd_ip:2379] put "/traefik/tls/pair2/entrypoints" "https"
|
||||
docker container run --rm -ti -e ETCDCTL_DIAL_="TIMEOUT 10s" -e ETCDCTL_API="3" tenstartups/etcdctl --endpoints=[$etcd_ip:2379] put "/traefik/tls/pair2/certificate/certfile" "/tmp/test2.crt"
|
||||
docker container run --rm -ti -e ETCDCTL_DIAL_="TIMEOUT 10s" -e ETCDCTL_API="3" tenstartups/etcdctl --endpoints=[$etcd_ip:2379] put "/traefik/tls/pair2/certificate/keyfile" "/tmp/test2.key"
|
||||
}
|
||||
|
||||
function show_usage() {
|
||||
|
||||
@@ -92,6 +92,16 @@ func (s *AcmeSuite) TestOnHostRuleRetrieveAcmeCertificateHTTP01(c *check.C) {
|
||||
s.retrieveAcmeCertificate(c, testCase)
|
||||
}
|
||||
|
||||
// Test OnHostRule option with none provided certificate and challenge HTTP-01 and web path
|
||||
func (s *AcmeSuite) TestOnHostRuleRetrieveAcmeCertificateHTTP01WithPath(c *check.C) {
|
||||
testCase := AcmeTestCase{
|
||||
traefikConfFilePath: "fixtures/acme/acme_http01_web.toml",
|
||||
onDemand: false,
|
||||
domainToCheck: acmeDomain}
|
||||
|
||||
s.retrieveAcmeCertificate(c, testCase)
|
||||
}
|
||||
|
||||
// Test OnDemand option with a wildcard provided certificate
|
||||
func (s *AcmeSuite) TestOnDemandRetrieveAcmeCertificateWithWildcard(c *check.C) {
|
||||
testCase := AcmeTestCase{
|
||||
@@ -132,6 +142,19 @@ func (s *AcmeSuite) TestOnHostRuleRetrieveAcmeCertificateWithDynamicWildcard(c *
|
||||
s.retrieveAcmeCertificate(c, testCase)
|
||||
}
|
||||
|
||||
// Test Let's encrypt down
|
||||
func (s *AcmeSuite) TestNoValidLetsEncryptServer(c *check.C) {
|
||||
cmd, display := s.traefikCmd(withConfigFile("fixtures/acme/wrong_acme.toml"))
|
||||
defer display(c)
|
||||
err := cmd.Start()
|
||||
c.Assert(err, checker.IsNil)
|
||||
defer cmd.Process.Kill()
|
||||
|
||||
// Expected traefik works
|
||||
err = try.GetRequest("http://127.0.0.1:8080/api/providers", 10*time.Second, try.StatusCodeIs(http.StatusOK))
|
||||
c.Assert(err, checker.IsNil)
|
||||
}
|
||||
|
||||
// Doing an HTTPS request and test the response certificate
|
||||
func (s *AcmeSuite) retrieveAcmeCertificate(c *check.C, testCase AcmeTestCase) {
|
||||
file := s.adaptFile(c, testCase.traefikConfFilePath, struct {
|
||||
|
||||
@@ -336,3 +336,25 @@ func (s *SimpleSuite) TestWithUnexistingEntrypoint(c *check.C) {
|
||||
err = try.GetRequest("http://127.0.0.1:8000/whoami", 1*time.Second, try.StatusCodeIs(http.StatusOK))
|
||||
c.Assert(err, checker.IsNil)
|
||||
}
|
||||
|
||||
func (s *SimpleSuite) TestMetricsPrometheusDefaultEntrypoint(c *check.C) {
|
||||
|
||||
s.createComposeProject(c, "base")
|
||||
s.composeProject.Start(c)
|
||||
|
||||
cmd, output := s.traefikCmd("--defaultEntryPoints=http", "--entryPoints=Name:http Address::8000", "--web", "--web.metrics.prometheus.buckets=0.1,0.3,1.2,5.0", "--docker", "--debug")
|
||||
defer output(c)
|
||||
|
||||
err := cmd.Start()
|
||||
c.Assert(err, checker.IsNil)
|
||||
defer cmd.Process.Kill()
|
||||
|
||||
err = try.GetRequest("http://127.0.0.1:8080/api/providers", 1*time.Second, try.BodyContains("PathPrefix"))
|
||||
c.Assert(err, checker.IsNil)
|
||||
|
||||
err = try.GetRequest("http://127.0.0.1:8000/whoami", 1*time.Second, try.StatusCodeIs(http.StatusOK))
|
||||
c.Assert(err, checker.IsNil)
|
||||
|
||||
err = try.GetRequest("http://127.0.0.1:8080/metrics", 1*time.Second, try.StatusCodeIs(http.StatusOK))
|
||||
c.Assert(err, checker.IsNil)
|
||||
}
|
||||
|
||||
@@ -564,15 +564,15 @@ func (s *ConsulSuite) TestSNIDynamicTlsConfig(c *check.C) {
|
||||
}
|
||||
|
||||
tlsconfigure1 := map[string]string{
|
||||
"traefik/tlsconfiguration/snitestcom/entrypoints": "https",
|
||||
"traefik/tlsconfiguration/snitestcom/certificate/keyfile": string(snitestComKey),
|
||||
"traefik/tlsconfiguration/snitestcom/certificate/certfile": string(snitestComCert),
|
||||
"traefik/tls/snitestcom/entrypoints": "https",
|
||||
"traefik/tls/snitestcom/certificate/keyfile": string(snitestComKey),
|
||||
"traefik/tls/snitestcom/certificate/certfile": string(snitestComCert),
|
||||
}
|
||||
|
||||
tlsconfigure2 := map[string]string{
|
||||
"traefik/tlsconfiguration/snitestorg/entrypoints": "https",
|
||||
"traefik/tlsconfiguration/snitestorg/certificate/keyfile": string(snitestOrgKey),
|
||||
"traefik/tlsconfiguration/snitestorg/certificate/certfile": string(snitestOrgCert),
|
||||
"traefik/tls/snitestorg/entrypoints": "https",
|
||||
"traefik/tls/snitestorg/certificate/keyfile": string(snitestOrgKey),
|
||||
"traefik/tls/snitestorg/certificate/certfile": string(snitestOrgCert),
|
||||
}
|
||||
|
||||
// config backends,frontends and first tls keypair
|
||||
@@ -613,7 +613,7 @@ func (s *ConsulSuite) TestSNIDynamicTlsConfig(c *check.C) {
|
||||
|
||||
// wait for consul
|
||||
err = try.Do(60*time.Second, func() error {
|
||||
_, err := s.kv.Get("traefik/tlsconfiguration/snitestcom/certificate/keyfile", nil)
|
||||
_, err := s.kv.Get("traefik/tls/snitestcom/certificate/keyfile", nil)
|
||||
return err
|
||||
})
|
||||
c.Assert(err, checker.IsNil)
|
||||
@@ -642,7 +642,7 @@ func (s *ConsulSuite) TestSNIDynamicTlsConfig(c *check.C) {
|
||||
|
||||
// wait for consul
|
||||
err = try.Do(60*time.Second, func() error {
|
||||
_, err := s.kv.Get("traefik/tlsconfiguration/snitestorg/certificate/keyfile", nil)
|
||||
_, err := s.kv.Get("traefik/tls/snitestorg/certificate/keyfile", nil)
|
||||
return err
|
||||
})
|
||||
c.Assert(err, checker.IsNil)
|
||||
|
||||
@@ -474,15 +474,15 @@ func (s *Etcd3Suite) TestSNIDynamicTlsConfig(c *check.C) {
|
||||
}
|
||||
|
||||
tlsconfigure1 := map[string]string{
|
||||
"/traefik/tlsconfiguration/snitestcom/entrypoints": "https",
|
||||
"/traefik/tlsconfiguration/snitestcom/certificate/keyfile": string(snitestComKey),
|
||||
"/traefik/tlsconfiguration/snitestcom/certificate/certfile": string(snitestComCert),
|
||||
"/traefik/tls/snitestcom/entrypoints": "https",
|
||||
"/traefik/tls/snitestcom/certificate/keyfile": string(snitestComKey),
|
||||
"/traefik/tls/snitestcom/certificate/certfile": string(snitestComCert),
|
||||
}
|
||||
|
||||
tlsconfigure2 := map[string]string{
|
||||
"/traefik/tlsconfiguration/snitestorg/entrypoints": "https",
|
||||
"/traefik/tlsconfiguration/snitestorg/certificate/keyfile": string(snitestOrgKey),
|
||||
"/traefik/tlsconfiguration/snitestorg/certificate/certfile": string(snitestOrgCert),
|
||||
"/traefik/tls/snitestorg/entrypoints": "https",
|
||||
"/traefik/tls/snitestorg/certificate/keyfile": string(snitestOrgKey),
|
||||
"/traefik/tls/snitestorg/certificate/certfile": string(snitestOrgCert),
|
||||
}
|
||||
|
||||
// config backends,frontends and first tls keypair
|
||||
@@ -523,7 +523,7 @@ func (s *Etcd3Suite) TestSNIDynamicTlsConfig(c *check.C) {
|
||||
|
||||
// wait for etcd
|
||||
err = try.Do(60*time.Second, func() error {
|
||||
_, err := s.kv.Get("/traefik/tlsconfiguration/snitestcom/certificate/keyfile", nil)
|
||||
_, err := s.kv.Get("/traefik/tls/snitestcom/certificate/keyfile", nil)
|
||||
return err
|
||||
})
|
||||
c.Assert(err, checker.IsNil)
|
||||
@@ -557,7 +557,7 @@ func (s *Etcd3Suite) TestSNIDynamicTlsConfig(c *check.C) {
|
||||
|
||||
// wait for etcd
|
||||
err = try.Do(60*time.Second, func() error {
|
||||
_, err := s.kv.Get("/traefik/tlsconfiguration/snitestorg/certificate/keyfile", nil)
|
||||
_, err := s.kv.Get("/traefik/tls/snitestorg/certificate/keyfile", nil)
|
||||
return err
|
||||
})
|
||||
c.Assert(err, checker.IsNil)
|
||||
@@ -609,9 +609,9 @@ func (s *Etcd3Suite) TestDeleteSNIDynamicTlsConfig(c *check.C) {
|
||||
}
|
||||
|
||||
tlsconfigure1 := map[string]string{
|
||||
"/traefik/tlsconfiguration/snitestcom/entrypoints": "https",
|
||||
"/traefik/tlsconfiguration/snitestcom/certificate/keyfile": string(snitestComKey),
|
||||
"/traefik/tlsconfiguration/snitestcom/certificate/certfile": string(snitestComCert),
|
||||
"/traefik/tls/snitestcom/entrypoints": "https",
|
||||
"/traefik/tls/snitestcom/certificate/keyfile": string(snitestComKey),
|
||||
"/traefik/tls/snitestcom/certificate/certfile": string(snitestComCert),
|
||||
}
|
||||
|
||||
// config backends,frontends and first tls keypair
|
||||
@@ -637,7 +637,7 @@ func (s *Etcd3Suite) TestDeleteSNIDynamicTlsConfig(c *check.C) {
|
||||
|
||||
// wait for etcd
|
||||
err = try.Do(60*time.Second, func() error {
|
||||
_, err := s.kv.Get("/traefik/tlsconfiguration/snitestcom/certificate/keyfile", nil)
|
||||
_, err := s.kv.Get("/traefik/tls/snitestcom/certificate/keyfile", nil)
|
||||
return err
|
||||
})
|
||||
c.Assert(err, checker.IsNil)
|
||||
|
||||
@@ -490,15 +490,15 @@ func (s *EtcdSuite) TestSNIDynamicTlsConfig(c *check.C) {
|
||||
}
|
||||
|
||||
tlsconfigure1 := map[string]string{
|
||||
"/traefik/tlsconfiguration/snitestcom/entrypoints": "https",
|
||||
"/traefik/tlsconfiguration/snitestcom/certificate/keyfile": string(snitestComKey),
|
||||
"/traefik/tlsconfiguration/snitestcom/certificate/certfile": string(snitestComCert),
|
||||
"/traefik/tls/snitestcom/entrypoints": "https",
|
||||
"/traefik/tls/snitestcom/certificate/keyfile": string(snitestComKey),
|
||||
"/traefik/tls/snitestcom/certificate/certfile": string(snitestComCert),
|
||||
}
|
||||
|
||||
tlsconfigure2 := map[string]string{
|
||||
"/traefik/tlsconfiguration/snitestorg/entrypoints": "https",
|
||||
"/traefik/tlsconfiguration/snitestorg/certificate/keyfile": string(snitestOrgKey),
|
||||
"/traefik/tlsconfiguration/snitestorg/certificate/certfile": string(snitestOrgCert),
|
||||
"/traefik/tls/snitestorg/entrypoints": "https",
|
||||
"/traefik/tls/snitestorg/certificate/keyfile": string(snitestOrgKey),
|
||||
"/traefik/tls/snitestorg/certificate/certfile": string(snitestOrgCert),
|
||||
}
|
||||
|
||||
// config backends,frontends and first tls keypair
|
||||
@@ -539,7 +539,7 @@ func (s *EtcdSuite) TestSNIDynamicTlsConfig(c *check.C) {
|
||||
|
||||
// wait for etcd
|
||||
err = try.Do(60*time.Second, func() error {
|
||||
_, err := s.kv.Get("/traefik/tlsconfiguration/snitestcom/certificate/keyfile", nil)
|
||||
_, err := s.kv.Get("/traefik/tls/snitestcom/certificate/keyfile", nil)
|
||||
return err
|
||||
})
|
||||
c.Assert(err, checker.IsNil)
|
||||
@@ -573,7 +573,7 @@ func (s *EtcdSuite) TestSNIDynamicTlsConfig(c *check.C) {
|
||||
|
||||
// wait for etcd
|
||||
err = try.Do(60*time.Second, func() error {
|
||||
_, err := s.kv.Get("/traefik/tlsconfiguration/snitestorg/certificate/keyfile", nil)
|
||||
_, err := s.kv.Get("/traefik/tls/snitestorg/certificate/keyfile", nil)
|
||||
return err
|
||||
})
|
||||
c.Assert(err, checker.IsNil)
|
||||
|
||||
38
integration/fixtures/acme/acme_http01_web.toml
Normal file
38
integration/fixtures/acme/acme_http01_web.toml
Normal file
@@ -0,0 +1,38 @@
|
||||
logLevel = "DEBUG"
|
||||
|
||||
defaultEntryPoints = ["http", "https"]
|
||||
|
||||
[entryPoints]
|
||||
[entryPoints.http]
|
||||
address = ":5002"
|
||||
[entryPoints.https]
|
||||
address = ":5001"
|
||||
[entryPoints.https.tls]
|
||||
|
||||
|
||||
[web]
|
||||
path="/traefik"
|
||||
|
||||
[acme]
|
||||
email = "test@traefik.io"
|
||||
storage = "/dev/null"
|
||||
entryPoint = "https"
|
||||
onDemand = {{.OnDemand}}
|
||||
OnHostRule = {{.OnHostRule}}
|
||||
caServer = "http://{{.BoulderHost}}:4000/directory"
|
||||
[acme.httpchallenge]
|
||||
entrypoint="http"
|
||||
|
||||
[file]
|
||||
|
||||
[backends]
|
||||
[backends.backend]
|
||||
[backends.backend.servers.server1]
|
||||
url = "http://127.0.0.1:9010"
|
||||
|
||||
|
||||
[frontends]
|
||||
[frontends.frontend]
|
||||
backend = "backend"
|
||||
[frontends.frontend.routes.test]
|
||||
rule = "Host:traefik.acme.wtf"
|
||||
@@ -9,8 +9,8 @@
|
||||
[frontends.frontend.routes.test]
|
||||
rule = "Host:traefik.acme.wtf"
|
||||
|
||||
[[tlsConfiguration]]
|
||||
[[tls]]
|
||||
entryPoints = ["https"]
|
||||
[tlsConfiguration.certificate]
|
||||
[tls.certificate]
|
||||
certFile = "fixtures/acme/ssl/wildcard.crt"
|
||||
keyFile = "fixtures/acme/ssl/wildcard.key"
|
||||
34
integration/fixtures/acme/wrong_acme.toml
Normal file
34
integration/fixtures/acme/wrong_acme.toml
Normal file
@@ -0,0 +1,34 @@
|
||||
logLevel = "DEBUG"
|
||||
|
||||
defaultEntryPoints = ["http", "https"]
|
||||
|
||||
[api]
|
||||
|
||||
[entryPoints]
|
||||
[entryPoints.http]
|
||||
address = ":8081"
|
||||
[entryPoints.https]
|
||||
address = ":5001"
|
||||
[entryPoints.https.tls]
|
||||
|
||||
|
||||
[acme]
|
||||
email = "test@traefik.io"
|
||||
storage = "/dev/null"
|
||||
entryPoint = "https"
|
||||
OnHostRule = true
|
||||
caServer = "http://wrongurl:4000/directory"
|
||||
|
||||
[file]
|
||||
|
||||
[backends]
|
||||
[backends.backend]
|
||||
[backends.backend.servers.server1]
|
||||
url = "http://127.0.0.1:9010"
|
||||
|
||||
|
||||
[frontends]
|
||||
[frontends.frontend]
|
||||
backend = "backend"
|
||||
[frontends.frontend.routes.test]
|
||||
rule = "Host:traefik.acme.wtf"
|
||||
@@ -16,9 +16,9 @@
|
||||
[frontends.frontend2.routes.test_2]
|
||||
rule = "Host:snitest.org"
|
||||
|
||||
[[tlsConfiguration]]
|
||||
[[tls]]
|
||||
entryPoints = ["https"]
|
||||
[tlsConfiguration.certificate]
|
||||
[tls.certificate]
|
||||
certFile = """-----BEGIN CERTIFICATE-----
|
||||
MIIC/zCCAeegAwIBAgIJALAYHG/vGqWEMA0GCSqGSIb3DQEBBQUAMBYxFDASBgNV
|
||||
BAMMC3NuaXRlc3Qub3JnMB4XDTE1MTEyMzIyMDU0NFoXDTI1MTEyMDIyMDU0NFow
|
||||
|
||||
@@ -624,7 +624,7 @@ func modifyCertificateConfFileContent(c *check.C, certFileName, confFileName, en
|
||||
// If certificate file is not provided, just truncate the configuration file
|
||||
if len(certFileName) > 0 {
|
||||
tlsConf := types.Configuration{
|
||||
TLSConfiguration: []*traefikTls.Configuration{
|
||||
TLS: []*traefikTls.Configuration{
|
||||
{
|
||||
Certificate: &traefikTls.Certificate{
|
||||
CertFile: traefikTls.FileOrContent("fixtures/https/" + certFileName + ".cert"),
|
||||
|
||||
@@ -100,4 +100,3 @@ pages:
|
||||
- 'gRPC Example': 'user-guide/grpc.md'
|
||||
- 'Traefik cluster example with Swarm': 'user-guide/cluster-docker-consul.md'
|
||||
- Benchmarks: benchmarks.md
|
||||
- 'Archive': 'archive.md'
|
||||
|
||||
@@ -777,19 +777,21 @@ func listServices(ctx context.Context, dockerClient client.APIClient) ([]dockerD
|
||||
|
||||
for _, service := range serviceList {
|
||||
dockerData := parseService(service, networkMap)
|
||||
if len(dockerData.NetworkSettings.Networks) > 0 {
|
||||
useSwarmLB, _ := strconv.ParseBool(getIsBackendLBSwarm(dockerData))
|
||||
|
||||
if useSwarmLB {
|
||||
useSwarmLB, _ := strconv.ParseBool(getIsBackendLBSwarm(dockerData))
|
||||
|
||||
if useSwarmLB {
|
||||
if len(dockerData.NetworkSettings.Networks) > 0 {
|
||||
dockerDataList = append(dockerDataList, dockerData)
|
||||
} else {
|
||||
isGlobalSvc := service.Spec.Mode.Global != nil
|
||||
dockerDataListTasks, err = listTasks(ctx, dockerClient, service.ID, dockerData, networkMap, isGlobalSvc)
|
||||
|
||||
for _, dockerDataTask := range dockerDataListTasks {
|
||||
dockerDataList = append(dockerDataList, dockerDataTask)
|
||||
}
|
||||
}
|
||||
} else {
|
||||
isGlobalSvc := service.Spec.Mode.Global != nil
|
||||
dockerDataListTasks, err = listTasks(ctx, dockerClient, service.ID, dockerData, networkMap, isGlobalSvc)
|
||||
|
||||
for _, dockerDataTask := range dockerDataListTasks {
|
||||
dockerDataList = append(dockerDataList, dockerDataTask)
|
||||
}
|
||||
|
||||
}
|
||||
}
|
||||
return dockerDataList, err
|
||||
@@ -805,7 +807,10 @@ func parseService(service swarmtypes.Service, networkMap map[string]*dockertypes
|
||||
|
||||
if service.Spec.EndpointSpec != nil {
|
||||
if service.Spec.EndpointSpec.Mode == swarmtypes.ResolutionModeDNSRR {
|
||||
log.Warnf("Ignored endpoint-mode not supported, service name: %s", service.Spec.Annotations.Name)
|
||||
useSwarmLB, _ := strconv.ParseBool(getIsBackendLBSwarm(dockerData))
|
||||
if useSwarmLB {
|
||||
log.Warnf("Ignored %s endpoint-mode not supported, service name: %s. Fallback to Træfik load balancing", swarmtypes.ResolutionModeDNSRR, service.Spec.Annotations.Name)
|
||||
}
|
||||
} else if service.Spec.EndpointSpec.Mode == swarmtypes.ResolutionModeVIP {
|
||||
dockerData.NetworkSettings.Networks = make(map[string]*networkData)
|
||||
for _, virtualIP := range service.Endpoint.VirtualIPs {
|
||||
|
||||
@@ -773,6 +773,7 @@ type fakeServicesClient struct {
|
||||
dockerVersion string
|
||||
networks []dockertypes.NetworkResource
|
||||
services []swarm.Service
|
||||
tasks []swarm.Task
|
||||
err error
|
||||
}
|
||||
|
||||
@@ -788,10 +789,15 @@ func (c *fakeServicesClient) NetworkList(ctx context.Context, options dockertype
|
||||
return c.networks, c.err
|
||||
}
|
||||
|
||||
func (c *fakeServicesClient) TaskList(ctx context.Context, options dockertypes.TaskListOptions) ([]swarm.Task, error) {
|
||||
return c.tasks, c.err
|
||||
}
|
||||
|
||||
func TestListServices(t *testing.T) {
|
||||
testCases := []struct {
|
||||
desc string
|
||||
services []swarm.Service
|
||||
tasks []swarm.Task
|
||||
dockerVersion string
|
||||
networks []dockertypes.NetworkResource
|
||||
expectedServices []string
|
||||
@@ -813,7 +819,8 @@ func TestListServices(t *testing.T) {
|
||||
swarmService(
|
||||
serviceName("service2"),
|
||||
serviceLabels(map[string]string{
|
||||
labelDockerNetwork: "barnet",
|
||||
labelDockerNetwork: "barnet",
|
||||
labelBackendLoadBalancerSwarm: "true",
|
||||
}),
|
||||
withEndpointSpec(modeDNSSR)),
|
||||
},
|
||||
@@ -838,7 +845,8 @@ func TestListServices(t *testing.T) {
|
||||
swarmService(
|
||||
serviceName("service2"),
|
||||
serviceLabels(map[string]string{
|
||||
labelDockerNetwork: "barnet",
|
||||
labelDockerNetwork: "barnet",
|
||||
labelBackendLoadBalancerSwarm: "true",
|
||||
}),
|
||||
withEndpointSpec(modeDNSSR)),
|
||||
},
|
||||
@@ -867,13 +875,65 @@ func TestListServices(t *testing.T) {
|
||||
"service1",
|
||||
},
|
||||
},
|
||||
{
|
||||
desc: "Should return service1 and service2",
|
||||
services: []swarm.Service{
|
||||
swarmService(
|
||||
serviceName("service1"),
|
||||
serviceLabels(map[string]string{
|
||||
labelDockerNetwork: "barnet",
|
||||
}),
|
||||
withEndpointSpec(modeVIP),
|
||||
withEndpoint(
|
||||
virtualIP("yk6l57rfwizjzxxzftn4amaot", "10.11.12.13/24"),
|
||||
virtualIP("2", "10.11.12.99/24"),
|
||||
)),
|
||||
swarmService(
|
||||
serviceName("service2"),
|
||||
serviceLabels(map[string]string{
|
||||
labelDockerNetwork: "barnet",
|
||||
}),
|
||||
withEndpointSpec(modeDNSSR)),
|
||||
},
|
||||
tasks: []swarm.Task{
|
||||
swarmTask("id1", taskStatus(taskState(swarm.TaskStateRunning))),
|
||||
swarmTask("id2", taskStatus(taskState(swarm.TaskStateRunning))),
|
||||
},
|
||||
dockerVersion: "1.30",
|
||||
networks: []dockertypes.NetworkResource{
|
||||
{
|
||||
Name: "network_name",
|
||||
ID: "yk6l57rfwizjzxxzftn4amaot",
|
||||
Created: time.Now(),
|
||||
Scope: "swarm",
|
||||
Driver: "overlay",
|
||||
EnableIPv6: false,
|
||||
Internal: true,
|
||||
Ingress: false,
|
||||
ConfigOnly: false,
|
||||
Options: map[string]string{
|
||||
"com.docker.network.driver.overlay.vxlanid_list": "4098",
|
||||
"com.docker.network.enable_ipv6": "false",
|
||||
},
|
||||
Labels: map[string]string{
|
||||
"com.docker.stack.namespace": "test",
|
||||
},
|
||||
},
|
||||
},
|
||||
expectedServices: []string{
|
||||
"service1.0",
|
||||
"service1.0",
|
||||
"service2.0",
|
||||
"service2.0",
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
for caseID, test := range testCases {
|
||||
test := test
|
||||
t.Run(strconv.Itoa(caseID), func(t *testing.T) {
|
||||
t.Parallel()
|
||||
dockerClient := &fakeServicesClient{services: test.services, dockerVersion: test.dockerVersion, networks: test.networks}
|
||||
dockerClient := &fakeServicesClient{services: test.services, tasks: test.tasks, dockerVersion: test.dockerVersion, networks: test.networks}
|
||||
serviceDockerData, _ := listServices(context.Background(), dockerClient)
|
||||
|
||||
assert.Equal(t, len(test.expectedServices), len(serviceDockerData))
|
||||
|
||||
@@ -55,12 +55,12 @@ func (p *Provider) Provide(configurationChan chan<- types.ConfigMessage, pool *s
|
||||
safe.Go(func() {
|
||||
for t := range ticker.C {
|
||||
|
||||
log.Debug("Refreshing Provider " + t.String())
|
||||
log.Debugf("Refreshing Provider %s", t.String())
|
||||
|
||||
configuration, err := p.buildConfiguration()
|
||||
if err != nil {
|
||||
log.Errorf("Failed to refresh Provider configuration, error: %s", err)
|
||||
return
|
||||
continue
|
||||
}
|
||||
|
||||
configurationChan <- types.ConfigMessage{
|
||||
|
||||
@@ -186,7 +186,7 @@ func loadFileConfigFromDirectory(directory string, configuration *types.Configur
|
||||
}
|
||||
}
|
||||
|
||||
for _, conf := range c.TLSConfiguration {
|
||||
for _, conf := range c.TLS {
|
||||
if _, exists := configTLSMaps[conf]; exists {
|
||||
log.Warnf("TLS Configuration %v already configured, skipping", conf)
|
||||
} else {
|
||||
@@ -196,7 +196,7 @@ func loadFileConfigFromDirectory(directory string, configuration *types.Configur
|
||||
|
||||
}
|
||||
for conf := range configTLSMaps {
|
||||
configuration.TLSConfiguration = append(configuration.TLSConfiguration, conf)
|
||||
configuration.TLS = append(configuration.TLS, conf)
|
||||
}
|
||||
return configuration, nil
|
||||
}
|
||||
|
||||
@@ -26,7 +26,7 @@ func TestProvideSingleFileAndWatch(t *testing.T) {
|
||||
tempDir, "simple.toml",
|
||||
createFrontendConfiguration(expectedNumFrontends),
|
||||
createBackendConfiguration(expectedNumBackends),
|
||||
createTLSConfiguration(expectedNumTLSConf))
|
||||
createTLS(expectedNumTLSConf))
|
||||
|
||||
configurationChan, signal := createConfigurationRoutine(t, &expectedNumFrontends, &expectedNumBackends, &expectedNumTLSConf)
|
||||
|
||||
@@ -45,7 +45,7 @@ func TestProvideSingleFileAndWatch(t *testing.T) {
|
||||
tempDir, "simple.toml",
|
||||
createFrontendConfiguration(expectedNumFrontends),
|
||||
createBackendConfiguration(expectedNumBackends),
|
||||
createTLSConfiguration(expectedNumTLSConf))
|
||||
createTLS(expectedNumTLSConf))
|
||||
|
||||
err = waitForSignal(signal, 2*time.Second, "single frontend, backend, TLS configuration")
|
||||
assert.NoError(t, err)
|
||||
@@ -63,7 +63,7 @@ func TestProvideSingleFileAndNotWatch(t *testing.T) {
|
||||
tempDir, "simple.toml",
|
||||
createFrontendConfiguration(expectedNumFrontends),
|
||||
createBackendConfiguration(expectedNumBackends),
|
||||
createTLSConfiguration(expectedNumTLSConf))
|
||||
createTLS(expectedNumTLSConf))
|
||||
|
||||
configurationChan, signal := createConfigurationRoutine(t, &expectedNumFrontends, &expectedNumBackends, &expectedNumTLSConf)
|
||||
|
||||
@@ -82,7 +82,7 @@ func TestProvideSingleFileAndNotWatch(t *testing.T) {
|
||||
tempDir, "simple.toml",
|
||||
createFrontendConfiguration(expectedNumFrontends),
|
||||
createBackendConfiguration(expectedNumBackends),
|
||||
createTLSConfiguration(expectedNumTLSConf))
|
||||
createTLS(expectedNumTLSConf))
|
||||
|
||||
// Must fail because we don't watch the changes
|
||||
err = waitForSignal(signal, 2*time.Second, "single frontend, backend and TLS configuration")
|
||||
@@ -99,7 +99,7 @@ func TestProvideDirectoryAndWatch(t *testing.T) {
|
||||
|
||||
tempFile1 := createRandomFile(t, tempDir, createFrontendConfiguration(expectedNumFrontends))
|
||||
tempFile2 := createRandomFile(t, tempDir, createBackendConfiguration(expectedNumBackends))
|
||||
tempFile3 := createRandomFile(t, tempDir, createTLSConfiguration(expectedNumTLSConf))
|
||||
tempFile3 := createRandomFile(t, tempDir, createTLS(expectedNumTLSConf))
|
||||
|
||||
configurationChan, signal := createConfigurationRoutine(t, &expectedNumFrontends, &expectedNumBackends, &expectedNumTLSConf)
|
||||
|
||||
@@ -145,7 +145,7 @@ func TestProvideDirectoryAndNotWatch(t *testing.T) {
|
||||
|
||||
createRandomFile(t, tempDir, createFrontendConfiguration(expectedNumFrontends))
|
||||
tempFile2 := createRandomFile(t, tempDir, createBackendConfiguration(expectedNumBackends))
|
||||
createRandomFile(t, tempTLSDir, createTLSConfiguration(expectedNumTLSConf))
|
||||
createRandomFile(t, tempTLSDir, createTLS(expectedNumTLSConf))
|
||||
|
||||
configurationChan, signal := createConfigurationRoutine(t, &expectedNumFrontends, &expectedNumBackends, &expectedNumTLSConf)
|
||||
|
||||
@@ -167,7 +167,7 @@ func TestProvideDirectoryAndNotWatch(t *testing.T) {
|
||||
|
||||
}
|
||||
|
||||
func createConfigurationRoutine(t *testing.T, expectedNumFrontends *int, expectedNumBackends *int, expectedNumTLSConfigurations *int) (chan types.ConfigMessage, chan interface{}) {
|
||||
func createConfigurationRoutine(t *testing.T, expectedNumFrontends *int, expectedNumBackends *int, expectedNumTLSes *int) (chan types.ConfigMessage, chan interface{}) {
|
||||
configurationChan := make(chan types.ConfigMessage)
|
||||
signal := make(chan interface{})
|
||||
|
||||
@@ -177,7 +177,7 @@ func createConfigurationRoutine(t *testing.T, expectedNumFrontends *int, expecte
|
||||
assert.Equal(t, "file", data.ProviderName)
|
||||
assert.Len(t, data.Configuration.Frontends, *expectedNumFrontends)
|
||||
assert.Len(t, data.Configuration.Backends, *expectedNumBackends)
|
||||
assert.Len(t, data.Configuration.TLSConfiguration, *expectedNumTLSConfigurations)
|
||||
assert.Len(t, data.Configuration.TLS, *expectedNumTLSes)
|
||||
signal <- nil
|
||||
}
|
||||
})
|
||||
@@ -297,13 +297,13 @@ func createBackendConfiguration(n int) string {
|
||||
return conf
|
||||
}
|
||||
|
||||
// createTLSConfiguration Helper
|
||||
func createTLSConfiguration(n int) string {
|
||||
// createTLS Helper
|
||||
func createTLS(n int) string {
|
||||
var conf string
|
||||
for i := 1; i <= n; i++ {
|
||||
conf += fmt.Sprintf(`[[TLSConfiguration]]
|
||||
conf += fmt.Sprintf(`[[TLS]]
|
||||
EntryPoints = ["https"]
|
||||
[TLSConfiguration.Certificate]
|
||||
[TLS.Certificate]
|
||||
CertFile = "integration/fixtures/https/snitest%[1]d.com.cert"
|
||||
KeyFile = "integration/fixtures/https/snitest%[1]d.com.key"
|
||||
`, i)
|
||||
|
||||
@@ -91,19 +91,24 @@ func (p *Provider) apiProvide(configurationChan chan<- types.ConfigMessage, pool
|
||||
for {
|
||||
select {
|
||||
case <-ticker.C:
|
||||
checkAPI, errAPI := rancherClient.ApiKey.List(withoutPagination)
|
||||
|
||||
log.Debugf("Refreshing new Data from Provider API")
|
||||
var stacks = listRancherStacks(rancherClient)
|
||||
var services = listRancherServices(rancherClient)
|
||||
var container = listRancherContainer(rancherClient)
|
||||
if errAPI != nil {
|
||||
log.Errorf("Cannot establish connection: %+v, Rancher API return: %+v; Skipping refresh Data from Rancher API.", errAPI, checkAPI)
|
||||
} else {
|
||||
log.Debugf("Refreshing new Data from Rancher API")
|
||||
stacks := listRancherStacks(rancherClient)
|
||||
services := listRancherServices(rancherClient)
|
||||
container := listRancherContainer(rancherClient)
|
||||
|
||||
rancherData := parseAPISourcedRancherData(stacks, services, container)
|
||||
rancherData := parseAPISourcedRancherData(stacks, services, container)
|
||||
|
||||
configuration := p.loadRancherConfig(rancherData)
|
||||
if configuration != nil {
|
||||
configurationChan <- types.ConfigMessage{
|
||||
ProviderName: "rancher",
|
||||
Configuration: configuration,
|
||||
configuration := p.loadRancherConfig(rancherData)
|
||||
if configuration != nil {
|
||||
configurationChan <- types.ConfigMessage{
|
||||
ProviderName: "rancher",
|
||||
Configuration: configuration,
|
||||
}
|
||||
}
|
||||
}
|
||||
case <-stop:
|
||||
|
||||
@@ -28,14 +28,6 @@ echo $VERSION | git commit --file -
|
||||
echo $VERSION | git tag -a $VERSION --file -
|
||||
git push -q --follow-tags -u origin master > /dev/null 2>&1
|
||||
|
||||
# create docker image emilevauge/traefik (compatibility)
|
||||
echo "Updating docker emilevauge/traefik image..."
|
||||
docker login -u $DOCKER_USER -p $DOCKER_PASS
|
||||
docker tag containous/traefik emilevauge/traefik:latest
|
||||
docker push emilevauge/traefik:latest
|
||||
docker tag emilevauge/traefik:latest emilevauge/traefik:${VERSION}
|
||||
docker push emilevauge/traefik:${VERSION}
|
||||
|
||||
cd ..
|
||||
rm -Rf traefik-library-image/
|
||||
|
||||
|
||||
@@ -6,8 +6,6 @@ set -o nounset
|
||||
|
||||
echo "Prune dependencies"
|
||||
|
||||
dep prune
|
||||
|
||||
find vendor -name '*_test.go' -exec rm {} \;
|
||||
|
||||
find vendor -type f \( ! -iname 'licen[cs]e*' \
|
||||
@@ -32,3 +30,28 @@ find vendor -type f \( ! -iname 'licen[cs]e*' \
|
||||
-a ! -iname '*.hpp' \
|
||||
-a ! -iname '*.hxx' \
|
||||
-a ! -iname '*.s' \) -exec rm -f {} +
|
||||
|
||||
find -type d \( -iname '*Godeps*' \) -exec rm -rf {} +
|
||||
|
||||
find vendor -type l \( ! -iname 'licen[cs]e*' \
|
||||
-a ! -iname '*notice*' \
|
||||
-a ! -iname '*patent*' \
|
||||
-a ! -iname '*copying*' \
|
||||
-a ! -iname '*unlicense*' \
|
||||
-a ! -iname '*copyright*' \
|
||||
-a ! -iname '*copyleft*' \
|
||||
-a ! -iname '*legal*' \
|
||||
-a ! -iname 'disclaimer*' \
|
||||
-a ! -iname 'third-party*' \
|
||||
-a ! -iname 'thirdparty*' \
|
||||
-a ! -iname '*.go' \
|
||||
-a ! -iname '*.c' \
|
||||
-a ! -iname '*.S' \
|
||||
-a ! -iname '*.cc' \
|
||||
-a ! -iname '*.cpp' \
|
||||
-a ! -iname '*.cxx' \
|
||||
-a ! -iname '*.h' \
|
||||
-a ! -iname '*.hh' \
|
||||
-a ! -iname '*.hpp' \
|
||||
-a ! -iname '*.hxx' \
|
||||
-a ! -iname '*.s' \) -exec rm -f {} +
|
||||
@@ -4,7 +4,7 @@ set -e
|
||||
export SCRIPTDIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )"
|
||||
export DEST=.
|
||||
|
||||
TESTFLAGS="${TESTFLAGS} -test.timeout=9m -check.v"
|
||||
TESTFLAGS="${TESTFLAGS} -test.timeout=20m -check.v"
|
||||
|
||||
if [ -n "$VERBOSE" ]; then
|
||||
TESTFLAGS="${TESTFLAGS} -v"
|
||||
|
||||
@@ -66,6 +66,7 @@ type Server struct {
|
||||
stopChan chan bool
|
||||
providers []provider.Provider
|
||||
currentConfigurations safe.Safe
|
||||
providerConfigUpdateMap map[string]chan types.ConfigMessage
|
||||
globalConfiguration configuration.GlobalConfiguration
|
||||
accessLoggerMiddleware *accesslog.LogHandler
|
||||
routinesPool *safe.Pool
|
||||
@@ -105,6 +106,7 @@ func NewServer(globalConfiguration configuration.GlobalConfiguration) *Server {
|
||||
server.configureSignals()
|
||||
currentConfigurations := make(types.Configurations)
|
||||
server.currentConfigurations.Set(currentConfigurations)
|
||||
server.providerConfigUpdateMap = make(map[string]chan types.ConfigMessage)
|
||||
server.globalConfiguration = globalConfiguration
|
||||
if server.globalConfiguration.API != nil {
|
||||
server.globalConfiguration.API.CurrentConfigurations = &server.currentConfigurations
|
||||
@@ -350,25 +352,25 @@ func (s *Server) listenProviders(stop chan bool) {
|
||||
}
|
||||
|
||||
func (s *Server) preLoadConfiguration(configMsg types.ConfigMessage) {
|
||||
providerConfigUpdateMap := map[string]chan types.ConfigMessage{}
|
||||
providersThrottleDuration := time.Duration(s.globalConfiguration.ProvidersThrottleDuration)
|
||||
s.defaultConfigurationValues(configMsg.Configuration)
|
||||
currentConfigurations := s.currentConfigurations.Get().(types.Configurations)
|
||||
jsonConf, _ := json.Marshal(configMsg.Configuration)
|
||||
log.Debugf("Configuration received from provider %s: %s", configMsg.ProviderName, string(jsonConf))
|
||||
if configMsg.Configuration == nil || configMsg.Configuration.Backends == nil && configMsg.Configuration.Frontends == nil && configMsg.Configuration.TLSConfiguration == nil {
|
||||
if configMsg.Configuration == nil || configMsg.Configuration.Backends == nil && configMsg.Configuration.Frontends == nil && configMsg.Configuration.TLS == nil {
|
||||
log.Infof("Skipping empty Configuration for provider %s", configMsg.ProviderName)
|
||||
} else if reflect.DeepEqual(currentConfigurations[configMsg.ProviderName], configMsg.Configuration) {
|
||||
log.Infof("Skipping same configuration for provider %s", configMsg.ProviderName)
|
||||
} else {
|
||||
if _, ok := providerConfigUpdateMap[configMsg.ProviderName]; !ok {
|
||||
providerConfigUpdate := make(chan types.ConfigMessage)
|
||||
providerConfigUpdateMap[configMsg.ProviderName] = providerConfigUpdate
|
||||
providerConfigUpdateCh, ok := s.providerConfigUpdateMap[configMsg.ProviderName]
|
||||
if !ok {
|
||||
providerConfigUpdateCh = make(chan types.ConfigMessage)
|
||||
s.providerConfigUpdateMap[configMsg.ProviderName] = providerConfigUpdateCh
|
||||
s.routinesPool.Go(func(stop chan bool) {
|
||||
throttleProviderConfigReload(providersThrottleDuration, s.configurationValidatedChan, providerConfigUpdate, stop)
|
||||
throttleProviderConfigReload(providersThrottleDuration, s.configurationValidatedChan, providerConfigUpdateCh, stop)
|
||||
})
|
||||
}
|
||||
providerConfigUpdateMap[configMsg.ProviderName] <- configMsg
|
||||
providerConfigUpdateCh <- configMsg
|
||||
}
|
||||
}
|
||||
|
||||
@@ -460,8 +462,8 @@ func (s *Server) loadHTTPSConfiguration(configurations types.Configurations) (ma
|
||||
newEPCertificates := make(map[string]*traefikTls.DomainsCertificates)
|
||||
// Get all certificates
|
||||
for _, configuration := range configurations {
|
||||
if configuration.TLSConfiguration != nil && len(configuration.TLSConfiguration) > 0 {
|
||||
if err := traefikTls.SortTLSConfigurationPerEntryPoints(configuration.TLSConfiguration, newEPCertificates); err != nil {
|
||||
if configuration.TLS != nil && len(configuration.TLS) > 0 {
|
||||
if err := traefikTls.SortTLSPerEntryPoints(configuration.TLS, newEPCertificates); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
@@ -671,31 +673,27 @@ func (s *Server) createTLSConfig(entryPointName string, tlsOption *traefikTls.TL
|
||||
}
|
||||
|
||||
if s.globalConfiguration.ACME != nil {
|
||||
if _, ok := s.serverEntryPoints[s.globalConfiguration.ACME.EntryPoint]; ok {
|
||||
if entryPointName == s.globalConfiguration.ACME.EntryPoint {
|
||||
checkOnDemandDomain := func(domain string) bool {
|
||||
routeMatch := &mux.RouteMatch{}
|
||||
router := router.GetHandler()
|
||||
match := router.Match(&http.Request{URL: &url.URL{}, Host: domain}, routeMatch)
|
||||
if match && routeMatch.Route != nil {
|
||||
return true
|
||||
}
|
||||
return false
|
||||
if entryPointName == s.globalConfiguration.ACME.EntryPoint {
|
||||
checkOnDemandDomain := func(domain string) bool {
|
||||
routeMatch := &mux.RouteMatch{}
|
||||
router := router.GetHandler()
|
||||
match := router.Match(&http.Request{URL: &url.URL{}, Host: domain}, routeMatch)
|
||||
if match && routeMatch.Route != nil {
|
||||
return true
|
||||
}
|
||||
if s.leadership == nil {
|
||||
err := s.globalConfiguration.ACME.CreateLocalConfig(config, &s.serverEntryPoints[entryPointName].certs, checkOnDemandDomain)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
} else {
|
||||
err := s.globalConfiguration.ACME.CreateClusterConfig(s.leadership, config, &s.serverEntryPoints[entryPointName].certs, checkOnDemandDomain)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return false
|
||||
}
|
||||
if s.leadership == nil {
|
||||
err := s.globalConfiguration.ACME.CreateLocalConfig(config, &s.serverEntryPoints[entryPointName].certs, checkOnDemandDomain)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
} else {
|
||||
err := s.globalConfiguration.ACME.CreateClusterConfig(s.leadership, config, &s.serverEntryPoints[entryPointName].certs, checkOnDemandDomain)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
} else {
|
||||
return nil, errors.New("Unknown entrypoint " + s.globalConfiguration.ACME.EntryPoint + " for ACME configuration")
|
||||
}
|
||||
} else {
|
||||
config.GetCertificate = s.serverEntryPoints[entryPointName].getCertificate
|
||||
@@ -758,7 +756,9 @@ func (s *Server) addInternalPublicRoutes(entryPointName string, router *mux.Rout
|
||||
if s.globalConfiguration.Ping != nil && s.globalConfiguration.Ping.EntryPoint != "" && s.globalConfiguration.Ping.EntryPoint == entryPointName {
|
||||
s.globalConfiguration.Ping.AddRoutes(router)
|
||||
}
|
||||
}
|
||||
|
||||
func (s *Server) addACMERoutes(entryPointName string, router *mux.Router) {
|
||||
if s.globalConfiguration.ACME != nil && s.globalConfiguration.ACME.HTTPChallenge != nil && s.globalConfiguration.ACME.HTTPChallenge.EntryPoint == entryPointName {
|
||||
s.globalConfiguration.ACME.AddRoutes(router)
|
||||
}
|
||||
@@ -839,6 +839,9 @@ func (s *Server) buildInternalRouter(entryPointName, path string, internalMiddle
|
||||
internalMuxRouter.Walk(wrapRoute(internalMiddlewares))
|
||||
|
||||
s.addInternalPublicRoutes(entryPointName, internalMuxSubrouter)
|
||||
|
||||
s.addACMERoutes(entryPointName, internalMuxRouter)
|
||||
|
||||
return internalMuxRouter
|
||||
}
|
||||
|
||||
@@ -918,23 +921,23 @@ func (s *Server) loadConfig(configurations types.Configurations, globalConfigura
|
||||
|
||||
log.Debugf("Creating frontend %s", frontendName)
|
||||
|
||||
var frontendEntryPoints []string
|
||||
for _, entryPointName := range frontend.EntryPoints {
|
||||
if _, ok := serverEntryPoints[entryPointName]; !ok {
|
||||
log.Errorf("Undefined entrypoint '%s' for frontend %s", entryPointName, frontendName)
|
||||
} else {
|
||||
frontendEntryPoints = append(frontendEntryPoints, entryPointName)
|
||||
}
|
||||
}
|
||||
frontend.EntryPoints = frontendEntryPoints
|
||||
|
||||
if len(frontend.EntryPoints) == 0 {
|
||||
log.Errorf("No entrypoint defined for frontend %s, defaultEntryPoints:%s", frontendName, globalConfiguration.DefaultEntryPoints)
|
||||
log.Errorf("No entrypoint defined for frontend %s", frontendName)
|
||||
log.Errorf("Skipping frontend %s...", frontendName)
|
||||
continue frontend
|
||||
}
|
||||
var failedEntrypoints int
|
||||
for _, entryPointName := range frontend.EntryPoints {
|
||||
log.Debugf("Wiring frontend %s to entryPoint %s", frontendName, entryPointName)
|
||||
if _, ok := serverEntryPoints[entryPointName]; !ok {
|
||||
log.Errorf("Undefined entrypoint '%s' for frontend %s", entryPointName, frontendName)
|
||||
failedEntrypoints++
|
||||
if failedEntrypoints == len(frontend.EntryPoints) {
|
||||
log.Errorf("Skipping frontend %s...", frontendName)
|
||||
continue frontend
|
||||
}
|
||||
continue
|
||||
}
|
||||
|
||||
newServerRoute := &serverRoute{route: serverEntryPoints[entryPointName].httpRouter.GetHandler().NewRoute().Name(frontendName)}
|
||||
for routeName, route := range frontend.Routes {
|
||||
|
||||
@@ -463,7 +463,7 @@ func TestServerLoadConfigHealthCheckOptions(t *testing.T) {
|
||||
HealthCheck: healthCheck,
|
||||
},
|
||||
},
|
||||
TLSConfiguration: []*tls.Configuration{
|
||||
TLS: []*tls.Configuration{
|
||||
{
|
||||
Certificate: &tls.Certificate{
|
||||
CertFile: localhostCert,
|
||||
@@ -644,7 +644,7 @@ func TestServerLoadConfigEmptyBasicAuth(t *testing.T) {
|
||||
},
|
||||
},
|
||||
},
|
||||
TLSConfiguration: []*tls.Configuration{
|
||||
TLS: []*tls.Configuration{
|
||||
{
|
||||
Certificate: &tls.Certificate{
|
||||
CertFile: localhostCert,
|
||||
|
||||
@@ -37,9 +37,9 @@
|
||||
|
||||
{{if $frontend.Redirect}}
|
||||
[frontends."{{$frontendName}}".redirect]
|
||||
entryPoint = "{{$frontend.RedirectEntryPoint}}"
|
||||
regex = "{{$frontend.RedirectRegex}}"
|
||||
replacement = "{{$frontend.RedirectReplacement}}"
|
||||
entryPoint = "{{$frontend.Redirect.EntryPoint}}"
|
||||
regex = "{{$frontend.Redirect.Regex}}"
|
||||
replacement = "{{$frontend.Redirect.Replacement}}"
|
||||
{{end}}
|
||||
|
||||
{{ if $frontend.Headers }}
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
{{$frontends := List .Prefix "/frontends/" }}
|
||||
{{$backends := List .Prefix "/backends/"}}
|
||||
{{$tlsconfiguration := List .Prefix "/tlsconfiguration/"}}
|
||||
{{$tls := List .Prefix "/tls/"}}
|
||||
|
||||
[backends]{{range $backends}}
|
||||
{{$backend := .}}
|
||||
@@ -65,13 +65,13 @@
|
||||
{{end}}
|
||||
{{end}}
|
||||
|
||||
{{range $tlsconfiguration}}
|
||||
{{range $tls}}
|
||||
{{$entryPoints := SplitGet . "/entrypoints"}}
|
||||
[[tlsConfiguration]]
|
||||
[[tls]]
|
||||
entryPoints = [{{range $entryPoints}}
|
||||
"{{.}}",
|
||||
{{end}}]
|
||||
[tlsConfiguration.certificate]
|
||||
[tls.certificate]
|
||||
certFile = """{{Get "" . "/certificate" "/certfile"}}"""
|
||||
keyFile = """{{Get "" . "/certificate" "/keyfile"}}"""
|
||||
{{end}}
|
||||
|
||||
@@ -144,7 +144,12 @@ func (c *Certificate) AppendCertificates(certs map[string]*DomainsCertificates,
|
||||
certKey := parsedCert.Subject.CommonName
|
||||
if parsedCert.DNSNames != nil {
|
||||
sort.Strings(parsedCert.DNSNames)
|
||||
certKey += fmt.Sprintf("%s,%s", parsedCert.Subject.CommonName, strings.Join(parsedCert.DNSNames, ","))
|
||||
for _, dnsName := range parsedCert.DNSNames {
|
||||
if dnsName != parsedCert.Subject.CommonName {
|
||||
certKey += fmt.Sprintf(",%s", dnsName)
|
||||
}
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
certExists := false
|
||||
|
||||
@@ -86,8 +86,8 @@ func (r *RootCAs) Type() string {
|
||||
return "rootcas"
|
||||
}
|
||||
|
||||
// SortTLSConfigurationPerEntryPoints converts TLS configuration sorted by Certificates into TLS configuration sorted by EntryPoints
|
||||
func SortTLSConfigurationPerEntryPoints(configurations []*Configuration, epConfiguration map[string]*DomainsCertificates) error {
|
||||
// SortTLSPerEntryPoints converts TLS configuration sorted by Certificates into TLS configuration sorted by EntryPoints
|
||||
func SortTLSPerEntryPoints(configurations []*Configuration, epConfiguration map[string]*DomainsCertificates) error {
|
||||
if epConfiguration == nil {
|
||||
epConfiguration = make(map[string]*DomainsCertificates)
|
||||
}
|
||||
|
||||
@@ -197,9 +197,9 @@ type Configurations map[string]*Configuration
|
||||
|
||||
// Configuration of a provider.
|
||||
type Configuration struct {
|
||||
Backends map[string]*Backend `json:"backends,omitempty"`
|
||||
Frontends map[string]*Frontend `json:"frontends,omitempty"`
|
||||
TLSConfiguration []*traefikTls.Configuration `json:"tlsConfiguration,omitempty"`
|
||||
Backends map[string]*Backend `json:"backends,omitempty"`
|
||||
Frontends map[string]*Frontend `json:"frontends,omitempty"`
|
||||
TLS []*traefikTls.Configuration `json:"tls,omitempty"`
|
||||
}
|
||||
|
||||
// ConfigMessage hold configuration information exchanged between parts of traefik.
|
||||
|
||||
20
vendor/cloud.google.com/go/cloud.go
generated
vendored
20
vendor/cloud.google.com/go/cloud.go
generated
vendored
@@ -1,20 +0,0 @@
|
||||
// Copyright 2014 Google Inc. All Rights Reserved.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
// Package cloud is the root of the packages used to access Google Cloud
|
||||
// Services. See https://godoc.org/cloud.google.com/go for a full list
|
||||
// of sub-packages.
|
||||
//
|
||||
// This package documents how to authorize and authenticate the sub packages.
|
||||
package cloud // import "cloud.google.com/go"
|
||||
14
vendor/github.com/BurntSushi/toml/cmd/toml-test-decoder/COPYING
generated
vendored
Normal file
14
vendor/github.com/BurntSushi/toml/cmd/toml-test-decoder/COPYING
generated
vendored
Normal file
@@ -0,0 +1,14 @@
|
||||
DO WHAT THE FUCK YOU WANT TO PUBLIC LICENSE
|
||||
Version 2, December 2004
|
||||
|
||||
Copyright (C) 2004 Sam Hocevar <sam@hocevar.net>
|
||||
|
||||
Everyone is permitted to copy and distribute verbatim or modified
|
||||
copies of this license document, and changing it is allowed as long
|
||||
as the name is changed.
|
||||
|
||||
DO WHAT THE FUCK YOU WANT TO PUBLIC LICENSE
|
||||
TERMS AND CONDITIONS FOR COPYING, DISTRIBUTION AND MODIFICATION
|
||||
|
||||
0. You just DO WHAT THE FUCK YOU WANT TO.
|
||||
|
||||
14
vendor/github.com/BurntSushi/toml/cmd/toml-test-encoder/COPYING
generated
vendored
Normal file
14
vendor/github.com/BurntSushi/toml/cmd/toml-test-encoder/COPYING
generated
vendored
Normal file
@@ -0,0 +1,14 @@
|
||||
DO WHAT THE FUCK YOU WANT TO PUBLIC LICENSE
|
||||
Version 2, December 2004
|
||||
|
||||
Copyright (C) 2004 Sam Hocevar <sam@hocevar.net>
|
||||
|
||||
Everyone is permitted to copy and distribute verbatim or modified
|
||||
copies of this license document, and changing it is allowed as long
|
||||
as the name is changed.
|
||||
|
||||
DO WHAT THE FUCK YOU WANT TO PUBLIC LICENSE
|
||||
TERMS AND CONDITIONS FOR COPYING, DISTRIBUTION AND MODIFICATION
|
||||
|
||||
0. You just DO WHAT THE FUCK YOU WANT TO.
|
||||
|
||||
14
vendor/github.com/BurntSushi/toml/cmd/tomlv/COPYING
generated
vendored
Normal file
14
vendor/github.com/BurntSushi/toml/cmd/tomlv/COPYING
generated
vendored
Normal file
@@ -0,0 +1,14 @@
|
||||
DO WHAT THE FUCK YOU WANT TO PUBLIC LICENSE
|
||||
Version 2, December 2004
|
||||
|
||||
Copyright (C) 2004 Sam Hocevar <sam@hocevar.net>
|
||||
|
||||
Everyone is permitted to copy and distribute verbatim or modified
|
||||
copies of this license document, and changing it is allowed as long
|
||||
as the name is changed.
|
||||
|
||||
DO WHAT THE FUCK YOU WANT TO PUBLIC LICENSE
|
||||
TERMS AND CONDITIONS FOR COPYING, DISTRIBUTION AND MODIFICATION
|
||||
|
||||
0. You just DO WHAT THE FUCK YOU WANT TO.
|
||||
|
||||
13
vendor/github.com/JamesClonk/vultr/vultr.go
generated
vendored
13
vendor/github.com/JamesClonk/vultr/vultr.go
generated
vendored
@@ -1,13 +0,0 @@
|
||||
package main
|
||||
|
||||
import (
|
||||
"os"
|
||||
|
||||
"github.com/JamesClonk/vultr/cmd"
|
||||
)
|
||||
|
||||
func main() {
|
||||
cli := cmd.NewCLI()
|
||||
cli.RegisterCommands()
|
||||
cli.Run(os.Args)
|
||||
}
|
||||
27
vendor/github.com/Microsoft/go-winio/archive/tar/LICENSE
generated
vendored
Normal file
27
vendor/github.com/Microsoft/go-winio/archive/tar/LICENSE
generated
vendored
Normal file
@@ -0,0 +1,27 @@
|
||||
Copyright (c) 2012 The Go Authors. All rights reserved.
|
||||
|
||||
Redistribution and use in source and binary forms, with or without
|
||||
modification, are permitted provided that the following conditions are
|
||||
met:
|
||||
|
||||
* Redistributions of source code must retain the above copyright
|
||||
notice, this list of conditions and the following disclaimer.
|
||||
* Redistributions in binary form must reproduce the above
|
||||
copyright notice, this list of conditions and the following disclaimer
|
||||
in the documentation and/or other materials provided with the
|
||||
distribution.
|
||||
* Neither the name of Google Inc. nor the names of its
|
||||
contributors may be used to endorse or promote products derived from
|
||||
this software without specific prior written permission.
|
||||
|
||||
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
|
||||
"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
|
||||
LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
|
||||
A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
|
||||
OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
|
||||
SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
|
||||
LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
|
||||
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
|
||||
THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
||||
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
||||
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||
2
vendor/github.com/NYTimes/gziphandler/gzip.go
generated
vendored
2
vendor/github.com/NYTimes/gziphandler/gzip.go
generated
vendored
@@ -88,7 +88,7 @@ type GzipResponseWriterWithCloseNotify struct {
|
||||
*GzipResponseWriter
|
||||
}
|
||||
|
||||
func (w *GzipResponseWriterWithCloseNotify) CloseNotify() <-chan bool {
|
||||
func (w GzipResponseWriterWithCloseNotify) CloseNotify() <-chan bool {
|
||||
return w.ResponseWriter.(http.CloseNotifier).CloseNotify()
|
||||
}
|
||||
|
||||
|
||||
7
vendor/github.com/aws/aws-sdk-go/sdk.go
generated
vendored
7
vendor/github.com/aws/aws-sdk-go/sdk.go
generated
vendored
@@ -1,7 +0,0 @@
|
||||
// Package sdk is the official AWS SDK for the Go programming language.
|
||||
//
|
||||
// See our Developer Guide for information for on getting started and using
|
||||
// the SDK.
|
||||
//
|
||||
// https://github.com/aws/aws-sdk-go/wiki
|
||||
package sdk
|
||||
5
vendor/github.com/aws/aws-sdk-go/service/generate.go
generated
vendored
5
vendor/github.com/aws/aws-sdk-go/service/generate.go
generated
vendored
@@ -1,5 +0,0 @@
|
||||
// Package service contains automatically generated AWS clients.
|
||||
package service
|
||||
|
||||
//go:generate go run -tags codegen ../private/model/cli/gen-api/main.go -path=../service ../models/apis/*/*/api-2.json
|
||||
//go:generate gofmt -s -w ../service
|
||||
47
vendor/github.com/containous/staert/kv.go
generated
vendored
47
vendor/github.com/containous/staert/kv.go
generated
vendored
@@ -1,10 +1,14 @@
|
||||
package staert
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"compress/gzip"
|
||||
"encoding"
|
||||
"encoding/base64"
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"reflect"
|
||||
"sort"
|
||||
"strconv"
|
||||
@@ -155,16 +159,32 @@ func decodeHook(fromType reflect.Type, toType reflect.Type, data interface{}) (i
|
||||
|
||||
return dataOutput, nil
|
||||
} else if fromType.Kind() == reflect.String {
|
||||
b, err := base64.StdEncoding.DecodeString(data.(string))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return b, nil
|
||||
return readCompressedData(data.(string), gzipReader, base64Reader)
|
||||
}
|
||||
}
|
||||
return data, nil
|
||||
}
|
||||
|
||||
func readCompressedData(data string, fs ...func(io.Reader) (io.Reader, error)) ([]byte, error) {
|
||||
var err error
|
||||
for _, f := range fs {
|
||||
var reader io.Reader
|
||||
reader, err = f(bytes.NewBufferString(data))
|
||||
if err == nil {
|
||||
return ioutil.ReadAll(reader)
|
||||
}
|
||||
}
|
||||
return nil, err
|
||||
}
|
||||
|
||||
func base64Reader(r io.Reader) (io.Reader, error) {
|
||||
return base64.NewDecoder(base64.StdEncoding, r), nil
|
||||
}
|
||||
|
||||
func gzipReader(r io.Reader) (io.Reader, error) {
|
||||
return gzip.NewReader(r)
|
||||
}
|
||||
|
||||
// StoreConfig stores the config into the KV Store
|
||||
func (kv *KvSource) StoreConfig(config interface{}) error {
|
||||
kvMap := map[string]string{}
|
||||
@@ -263,7 +283,11 @@ func collateKvRecursive(objValue reflect.Value, kv map[string]string, key string
|
||||
case reflect.Array, reflect.Slice:
|
||||
// Byte slices get special treatment
|
||||
if objValue.Type().Elem().Kind() == reflect.Uint8 {
|
||||
kv[name] = base64.StdEncoding.EncodeToString(objValue.Bytes())
|
||||
compressedData, err := writeCompressedData(objValue.Bytes())
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
kv[name] = compressedData
|
||||
} else {
|
||||
for i := 0; i < objValue.Len(); i++ {
|
||||
name = key + "/" + strconv.Itoa(i)
|
||||
@@ -286,6 +310,17 @@ func collateKvRecursive(objValue reflect.Value, kv map[string]string, key string
|
||||
return nil
|
||||
}
|
||||
|
||||
func writeCompressedData(data []byte) (string, error) {
|
||||
var buffer bytes.Buffer
|
||||
gzipWriter := gzip.NewWriter(&buffer)
|
||||
_, err := gzipWriter.Write(data)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
gzipWriter.Close()
|
||||
return buffer.String(), nil
|
||||
}
|
||||
|
||||
// ListRecursive lists all key value children under key
|
||||
func (kv *KvSource) ListRecursive(key string, pairs map[string][]byte) error {
|
||||
pairsN1, err := kv.List(key, nil)
|
||||
|
||||
16
vendor/github.com/coreos/etcd/auth/doc.go
generated
vendored
16
vendor/github.com/coreos/etcd/auth/doc.go
generated
vendored
@@ -1,16 +0,0 @@
|
||||
// Copyright 2016 The etcd Authors
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
// Package auth provides client role authentication for accessing keys in etcd.
|
||||
package auth
|
||||
137
vendor/github.com/coreos/etcd/auth/jwt.go
generated
vendored
137
vendor/github.com/coreos/etcd/auth/jwt.go
generated
vendored
@@ -1,137 +0,0 @@
|
||||
// Copyright 2017 The etcd Authors
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package auth
|
||||
|
||||
import (
|
||||
"crypto/rsa"
|
||||
"io/ioutil"
|
||||
|
||||
jwt "github.com/dgrijalva/jwt-go"
|
||||
"golang.org/x/net/context"
|
||||
)
|
||||
|
||||
type tokenJWT struct {
|
||||
signMethod string
|
||||
signKey *rsa.PrivateKey
|
||||
verifyKey *rsa.PublicKey
|
||||
}
|
||||
|
||||
func (t *tokenJWT) enable() {}
|
||||
func (t *tokenJWT) disable() {}
|
||||
func (t *tokenJWT) invalidateUser(string) {}
|
||||
func (t *tokenJWT) genTokenPrefix() (string, error) { return "", nil }
|
||||
|
||||
func (t *tokenJWT) info(ctx context.Context, token string, rev uint64) (*AuthInfo, bool) {
|
||||
// rev isn't used in JWT, it is only used in simple token
|
||||
var (
|
||||
username string
|
||||
revision uint64
|
||||
)
|
||||
|
||||
parsed, err := jwt.Parse(token, func(token *jwt.Token) (interface{}, error) {
|
||||
return t.verifyKey, nil
|
||||
})
|
||||
|
||||
switch err.(type) {
|
||||
case nil:
|
||||
if !parsed.Valid {
|
||||
plog.Warningf("invalid jwt token: %s", token)
|
||||
return nil, false
|
||||
}
|
||||
|
||||
claims := parsed.Claims.(jwt.MapClaims)
|
||||
|
||||
username = claims["username"].(string)
|
||||
revision = uint64(claims["revision"].(float64))
|
||||
default:
|
||||
plog.Warningf("failed to parse jwt token: %s", err)
|
||||
return nil, false
|
||||
}
|
||||
|
||||
return &AuthInfo{Username: username, Revision: revision}, true
|
||||
}
|
||||
|
||||
func (t *tokenJWT) assign(ctx context.Context, username string, revision uint64) (string, error) {
|
||||
// Future work: let a jwt token include permission information would be useful for
|
||||
// permission checking in proxy side.
|
||||
tk := jwt.NewWithClaims(jwt.GetSigningMethod(t.signMethod),
|
||||
jwt.MapClaims{
|
||||
"username": username,
|
||||
"revision": revision,
|
||||
})
|
||||
|
||||
token, err := tk.SignedString(t.signKey)
|
||||
if err != nil {
|
||||
plog.Debugf("failed to sign jwt token: %s", err)
|
||||
return "", err
|
||||
}
|
||||
|
||||
plog.Debugf("jwt token: %s", token)
|
||||
|
||||
return token, err
|
||||
}
|
||||
|
||||
func prepareOpts(opts map[string]string) (jwtSignMethod, jwtPubKeyPath, jwtPrivKeyPath string, err error) {
|
||||
for k, v := range opts {
|
||||
switch k {
|
||||
case "sign-method":
|
||||
jwtSignMethod = v
|
||||
case "pub-key":
|
||||
jwtPubKeyPath = v
|
||||
case "priv-key":
|
||||
jwtPrivKeyPath = v
|
||||
default:
|
||||
plog.Errorf("unknown token specific option: %s", k)
|
||||
return "", "", "", ErrInvalidAuthOpts
|
||||
}
|
||||
}
|
||||
|
||||
return jwtSignMethod, jwtPubKeyPath, jwtPrivKeyPath, nil
|
||||
}
|
||||
|
||||
func newTokenProviderJWT(opts map[string]string) (*tokenJWT, error) {
|
||||
jwtSignMethod, jwtPubKeyPath, jwtPrivKeyPath, err := prepareOpts(opts)
|
||||
if err != nil {
|
||||
return nil, ErrInvalidAuthOpts
|
||||
}
|
||||
|
||||
t := &tokenJWT{}
|
||||
|
||||
t.signMethod = jwtSignMethod
|
||||
|
||||
verifyBytes, err := ioutil.ReadFile(jwtPubKeyPath)
|
||||
if err != nil {
|
||||
plog.Errorf("failed to read public key (%s) for jwt: %s", jwtPubKeyPath, err)
|
||||
return nil, err
|
||||
}
|
||||
t.verifyKey, err = jwt.ParseRSAPublicKeyFromPEM(verifyBytes)
|
||||
if err != nil {
|
||||
plog.Errorf("failed to parse public key (%s): %s", jwtPubKeyPath, err)
|
||||
return nil, err
|
||||
}
|
||||
|
||||
signBytes, err := ioutil.ReadFile(jwtPrivKeyPath)
|
||||
if err != nil {
|
||||
plog.Errorf("failed to read private key (%s) for jwt: %s", jwtPrivKeyPath, err)
|
||||
return nil, err
|
||||
}
|
||||
t.signKey, err = jwt.ParseRSAPrivateKeyFromPEM(signBytes)
|
||||
if err != nil {
|
||||
plog.Errorf("failed to parse private key (%s): %s", jwtPrivKeyPath, err)
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return t, nil
|
||||
}
|
||||
133
vendor/github.com/coreos/etcd/auth/range_perm_cache.go
generated
vendored
133
vendor/github.com/coreos/etcd/auth/range_perm_cache.go
generated
vendored
@@ -1,133 +0,0 @@
|
||||
// Copyright 2016 The etcd Authors
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package auth
|
||||
|
||||
import (
|
||||
"github.com/coreos/etcd/auth/authpb"
|
||||
"github.com/coreos/etcd/mvcc/backend"
|
||||
"github.com/coreos/etcd/pkg/adt"
|
||||
)
|
||||
|
||||
func getMergedPerms(tx backend.BatchTx, userName string) *unifiedRangePermissions {
|
||||
user := getUser(tx, userName)
|
||||
if user == nil {
|
||||
plog.Errorf("invalid user name %s", userName)
|
||||
return nil
|
||||
}
|
||||
|
||||
readPerms := &adt.IntervalTree{}
|
||||
writePerms := &adt.IntervalTree{}
|
||||
|
||||
for _, roleName := range user.Roles {
|
||||
role := getRole(tx, roleName)
|
||||
if role == nil {
|
||||
continue
|
||||
}
|
||||
|
||||
for _, perm := range role.KeyPermission {
|
||||
var ivl adt.Interval
|
||||
var rangeEnd []byte
|
||||
|
||||
if len(perm.RangeEnd) != 1 || perm.RangeEnd[0] != 0 {
|
||||
rangeEnd = perm.RangeEnd
|
||||
}
|
||||
|
||||
if len(perm.RangeEnd) != 0 {
|
||||
ivl = adt.NewBytesAffineInterval(perm.Key, rangeEnd)
|
||||
} else {
|
||||
ivl = adt.NewBytesAffinePoint(perm.Key)
|
||||
}
|
||||
|
||||
switch perm.PermType {
|
||||
case authpb.READWRITE:
|
||||
readPerms.Insert(ivl, struct{}{})
|
||||
writePerms.Insert(ivl, struct{}{})
|
||||
|
||||
case authpb.READ:
|
||||
readPerms.Insert(ivl, struct{}{})
|
||||
|
||||
case authpb.WRITE:
|
||||
writePerms.Insert(ivl, struct{}{})
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return &unifiedRangePermissions{
|
||||
readPerms: readPerms,
|
||||
writePerms: writePerms,
|
||||
}
|
||||
}
|
||||
|
||||
func checkKeyInterval(cachedPerms *unifiedRangePermissions, key, rangeEnd []byte, permtyp authpb.Permission_Type) bool {
|
||||
if len(rangeEnd) == 1 && rangeEnd[0] == 0 {
|
||||
rangeEnd = nil
|
||||
}
|
||||
|
||||
ivl := adt.NewBytesAffineInterval(key, rangeEnd)
|
||||
switch permtyp {
|
||||
case authpb.READ:
|
||||
return cachedPerms.readPerms.Contains(ivl)
|
||||
case authpb.WRITE:
|
||||
return cachedPerms.writePerms.Contains(ivl)
|
||||
default:
|
||||
plog.Panicf("unknown auth type: %v", permtyp)
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
func checkKeyPoint(cachedPerms *unifiedRangePermissions, key []byte, permtyp authpb.Permission_Type) bool {
|
||||
pt := adt.NewBytesAffinePoint(key)
|
||||
switch permtyp {
|
||||
case authpb.READ:
|
||||
return cachedPerms.readPerms.Intersects(pt)
|
||||
case authpb.WRITE:
|
||||
return cachedPerms.writePerms.Intersects(pt)
|
||||
default:
|
||||
plog.Panicf("unknown auth type: %v", permtyp)
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
func (as *authStore) isRangeOpPermitted(tx backend.BatchTx, userName string, key, rangeEnd []byte, permtyp authpb.Permission_Type) bool {
|
||||
// assumption: tx is Lock()ed
|
||||
_, ok := as.rangePermCache[userName]
|
||||
if !ok {
|
||||
perms := getMergedPerms(tx, userName)
|
||||
if perms == nil {
|
||||
plog.Errorf("failed to create a unified permission of user %s", userName)
|
||||
return false
|
||||
}
|
||||
as.rangePermCache[userName] = perms
|
||||
}
|
||||
|
||||
if len(rangeEnd) == 0 {
|
||||
return checkKeyPoint(as.rangePermCache[userName], key, permtyp)
|
||||
}
|
||||
|
||||
return checkKeyInterval(as.rangePermCache[userName], key, rangeEnd, permtyp)
|
||||
}
|
||||
|
||||
func (as *authStore) clearCachedPerm() {
|
||||
as.rangePermCache = make(map[string]*unifiedRangePermissions)
|
||||
}
|
||||
|
||||
func (as *authStore) invalidateCachedPerm(userName string) {
|
||||
delete(as.rangePermCache, userName)
|
||||
}
|
||||
|
||||
type unifiedRangePermissions struct {
|
||||
readPerms *adt.IntervalTree
|
||||
writePerms *adt.IntervalTree
|
||||
}
|
||||
220
vendor/github.com/coreos/etcd/auth/simple_token.go
generated
vendored
220
vendor/github.com/coreos/etcd/auth/simple_token.go
generated
vendored
@@ -1,220 +0,0 @@
|
||||
// Copyright 2016 The etcd Authors
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package auth
|
||||
|
||||
// CAUTION: This randum number based token mechanism is only for testing purpose.
|
||||
// JWT based mechanism will be added in the near future.
|
||||
|
||||
import (
|
||||
"crypto/rand"
|
||||
"fmt"
|
||||
"math/big"
|
||||
"strconv"
|
||||
"strings"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"golang.org/x/net/context"
|
||||
)
|
||||
|
||||
const (
|
||||
letters = "abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ"
|
||||
defaultSimpleTokenLength = 16
|
||||
)
|
||||
|
||||
// var for testing purposes
|
||||
var (
|
||||
simpleTokenTTL = 5 * time.Minute
|
||||
simpleTokenTTLResolution = 1 * time.Second
|
||||
)
|
||||
|
||||
type simpleTokenTTLKeeper struct {
|
||||
tokens map[string]time.Time
|
||||
donec chan struct{}
|
||||
stopc chan struct{}
|
||||
deleteTokenFunc func(string)
|
||||
mu *sync.Mutex
|
||||
}
|
||||
|
||||
func (tm *simpleTokenTTLKeeper) stop() {
|
||||
select {
|
||||
case tm.stopc <- struct{}{}:
|
||||
case <-tm.donec:
|
||||
}
|
||||
<-tm.donec
|
||||
}
|
||||
|
||||
func (tm *simpleTokenTTLKeeper) addSimpleToken(token string) {
|
||||
tm.tokens[token] = time.Now().Add(simpleTokenTTL)
|
||||
}
|
||||
|
||||
func (tm *simpleTokenTTLKeeper) resetSimpleToken(token string) {
|
||||
if _, ok := tm.tokens[token]; ok {
|
||||
tm.tokens[token] = time.Now().Add(simpleTokenTTL)
|
||||
}
|
||||
}
|
||||
|
||||
func (tm *simpleTokenTTLKeeper) deleteSimpleToken(token string) {
|
||||
delete(tm.tokens, token)
|
||||
}
|
||||
|
||||
func (tm *simpleTokenTTLKeeper) run() {
|
||||
tokenTicker := time.NewTicker(simpleTokenTTLResolution)
|
||||
defer func() {
|
||||
tokenTicker.Stop()
|
||||
close(tm.donec)
|
||||
}()
|
||||
for {
|
||||
select {
|
||||
case <-tokenTicker.C:
|
||||
nowtime := time.Now()
|
||||
tm.mu.Lock()
|
||||
for t, tokenendtime := range tm.tokens {
|
||||
if nowtime.After(tokenendtime) {
|
||||
tm.deleteTokenFunc(t)
|
||||
delete(tm.tokens, t)
|
||||
}
|
||||
}
|
||||
tm.mu.Unlock()
|
||||
case <-tm.stopc:
|
||||
return
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
type tokenSimple struct {
|
||||
indexWaiter func(uint64) <-chan struct{}
|
||||
simpleTokenKeeper *simpleTokenTTLKeeper
|
||||
simpleTokensMu sync.Mutex
|
||||
simpleTokens map[string]string // token -> username
|
||||
}
|
||||
|
||||
func (t *tokenSimple) genTokenPrefix() (string, error) {
|
||||
ret := make([]byte, defaultSimpleTokenLength)
|
||||
|
||||
for i := 0; i < defaultSimpleTokenLength; i++ {
|
||||
bInt, err := rand.Int(rand.Reader, big.NewInt(int64(len(letters))))
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
|
||||
ret[i] = letters[bInt.Int64()]
|
||||
}
|
||||
|
||||
return string(ret), nil
|
||||
}
|
||||
|
||||
func (t *tokenSimple) assignSimpleTokenToUser(username, token string) {
|
||||
t.simpleTokensMu.Lock()
|
||||
_, ok := t.simpleTokens[token]
|
||||
if ok {
|
||||
plog.Panicf("token %s is alredy used", token)
|
||||
}
|
||||
|
||||
t.simpleTokens[token] = username
|
||||
t.simpleTokenKeeper.addSimpleToken(token)
|
||||
t.simpleTokensMu.Unlock()
|
||||
}
|
||||
|
||||
func (t *tokenSimple) invalidateUser(username string) {
|
||||
if t.simpleTokenKeeper == nil {
|
||||
return
|
||||
}
|
||||
t.simpleTokensMu.Lock()
|
||||
for token, name := range t.simpleTokens {
|
||||
if strings.Compare(name, username) == 0 {
|
||||
delete(t.simpleTokens, token)
|
||||
t.simpleTokenKeeper.deleteSimpleToken(token)
|
||||
}
|
||||
}
|
||||
t.simpleTokensMu.Unlock()
|
||||
}
|
||||
|
||||
func (t *tokenSimple) enable() {
|
||||
delf := func(tk string) {
|
||||
if username, ok := t.simpleTokens[tk]; ok {
|
||||
plog.Infof("deleting token %s for user %s", tk, username)
|
||||
delete(t.simpleTokens, tk)
|
||||
}
|
||||
}
|
||||
t.simpleTokenKeeper = &simpleTokenTTLKeeper{
|
||||
tokens: make(map[string]time.Time),
|
||||
donec: make(chan struct{}),
|
||||
stopc: make(chan struct{}),
|
||||
deleteTokenFunc: delf,
|
||||
mu: &t.simpleTokensMu,
|
||||
}
|
||||
go t.simpleTokenKeeper.run()
|
||||
}
|
||||
|
||||
func (t *tokenSimple) disable() {
|
||||
t.simpleTokensMu.Lock()
|
||||
tk := t.simpleTokenKeeper
|
||||
t.simpleTokenKeeper = nil
|
||||
t.simpleTokens = make(map[string]string) // invalidate all tokens
|
||||
t.simpleTokensMu.Unlock()
|
||||
if tk != nil {
|
||||
tk.stop()
|
||||
}
|
||||
}
|
||||
|
||||
func (t *tokenSimple) info(ctx context.Context, token string, revision uint64) (*AuthInfo, bool) {
|
||||
if !t.isValidSimpleToken(ctx, token) {
|
||||
return nil, false
|
||||
}
|
||||
t.simpleTokensMu.Lock()
|
||||
username, ok := t.simpleTokens[token]
|
||||
if ok && t.simpleTokenKeeper != nil {
|
||||
t.simpleTokenKeeper.resetSimpleToken(token)
|
||||
}
|
||||
t.simpleTokensMu.Unlock()
|
||||
return &AuthInfo{Username: username, Revision: revision}, ok
|
||||
}
|
||||
|
||||
func (t *tokenSimple) assign(ctx context.Context, username string, rev uint64) (string, error) {
|
||||
// rev isn't used in simple token, it is only used in JWT
|
||||
index := ctx.Value("index").(uint64)
|
||||
simpleToken := ctx.Value("simpleToken").(string)
|
||||
token := fmt.Sprintf("%s.%d", simpleToken, index)
|
||||
t.assignSimpleTokenToUser(username, token)
|
||||
|
||||
return token, nil
|
||||
}
|
||||
|
||||
func (t *tokenSimple) isValidSimpleToken(ctx context.Context, token string) bool {
|
||||
splitted := strings.Split(token, ".")
|
||||
if len(splitted) != 2 {
|
||||
return false
|
||||
}
|
||||
index, err := strconv.Atoi(splitted[1])
|
||||
if err != nil {
|
||||
return false
|
||||
}
|
||||
|
||||
select {
|
||||
case <-t.indexWaiter(uint64(index)):
|
||||
return true
|
||||
case <-ctx.Done():
|
||||
}
|
||||
|
||||
return false
|
||||
}
|
||||
|
||||
func newTokenProviderSimple(indexWaiter func(uint64) <-chan struct{}) *tokenSimple {
|
||||
return &tokenSimple{
|
||||
simpleTokens: make(map[string]string),
|
||||
indexWaiter: indexWaiter,
|
||||
}
|
||||
}
|
||||
1059
vendor/github.com/coreos/etcd/auth/store.go
generated
vendored
1059
vendor/github.com/coreos/etcd/auth/store.go
generated
vendored
File diff suppressed because it is too large
Load Diff
86
vendor/github.com/coreos/etcd/etcdserver/api/capability.go
generated
vendored
86
vendor/github.com/coreos/etcd/etcdserver/api/capability.go
generated
vendored
@@ -1,86 +0,0 @@
|
||||
// Copyright 2015 The etcd Authors
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package api
|
||||
|
||||
import (
|
||||
"sync"
|
||||
|
||||
"github.com/coreos/etcd/version"
|
||||
"github.com/coreos/go-semver/semver"
|
||||
"github.com/coreos/pkg/capnslog"
|
||||
)
|
||||
|
||||
type Capability string
|
||||
|
||||
const (
|
||||
AuthCapability Capability = "auth"
|
||||
V3rpcCapability Capability = "v3rpc"
|
||||
)
|
||||
|
||||
var (
|
||||
plog = capnslog.NewPackageLogger("github.com/coreos/etcd", "etcdserver/api")
|
||||
|
||||
// capabilityMaps is a static map of version to capability map.
|
||||
capabilityMaps = map[string]map[Capability]bool{
|
||||
"3.0.0": {AuthCapability: true, V3rpcCapability: true},
|
||||
"3.1.0": {AuthCapability: true, V3rpcCapability: true},
|
||||
"3.2.0": {AuthCapability: true, V3rpcCapability: true},
|
||||
}
|
||||
|
||||
enableMapMu sync.RWMutex
|
||||
// enabledMap points to a map in capabilityMaps
|
||||
enabledMap map[Capability]bool
|
||||
|
||||
curVersion *semver.Version
|
||||
)
|
||||
|
||||
func init() {
|
||||
enabledMap = map[Capability]bool{
|
||||
AuthCapability: true,
|
||||
V3rpcCapability: true,
|
||||
}
|
||||
}
|
||||
|
||||
// UpdateCapability updates the enabledMap when the cluster version increases.
|
||||
func UpdateCapability(v *semver.Version) {
|
||||
if v == nil {
|
||||
// if recovered but version was never set by cluster
|
||||
return
|
||||
}
|
||||
enableMapMu.Lock()
|
||||
if curVersion != nil && !curVersion.LessThan(*v) {
|
||||
enableMapMu.Unlock()
|
||||
return
|
||||
}
|
||||
curVersion = v
|
||||
enabledMap = capabilityMaps[curVersion.String()]
|
||||
enableMapMu.Unlock()
|
||||
plog.Infof("enabled capabilities for version %s", version.Cluster(v.String()))
|
||||
}
|
||||
|
||||
func IsCapabilityEnabled(c Capability) bool {
|
||||
enableMapMu.RLock()
|
||||
defer enableMapMu.RUnlock()
|
||||
if enabledMap == nil {
|
||||
return false
|
||||
}
|
||||
return enabledMap[c]
|
||||
}
|
||||
|
||||
func EnableCapability(c Capability) {
|
||||
enableMapMu.Lock()
|
||||
defer enableMapMu.Unlock()
|
||||
enabledMap[c] = true
|
||||
}
|
||||
41
vendor/github.com/coreos/etcd/etcdserver/api/cluster.go
generated
vendored
41
vendor/github.com/coreos/etcd/etcdserver/api/cluster.go
generated
vendored
@@ -1,41 +0,0 @@
|
||||
// Copyright 2016 The etcd Authors
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package api
|
||||
|
||||
import (
|
||||
"github.com/coreos/etcd/etcdserver/membership"
|
||||
"github.com/coreos/etcd/pkg/types"
|
||||
|
||||
"github.com/coreos/go-semver/semver"
|
||||
)
|
||||
|
||||
// Cluster is an interface representing a collection of members in one etcd cluster.
|
||||
type Cluster interface {
|
||||
// ID returns the cluster ID
|
||||
ID() types.ID
|
||||
// ClientURLs returns an aggregate set of all URLs on which this
|
||||
// cluster is listening for client requests
|
||||
ClientURLs() []string
|
||||
// Members returns a slice of members sorted by their ID
|
||||
Members() []*membership.Member
|
||||
// Member retrieves a particular member based on ID, or nil if the
|
||||
// member does not exist in the cluster
|
||||
Member(id types.ID) *membership.Member
|
||||
// IsIDRemoved checks whether the given ID has been removed from this
|
||||
// cluster at some point in the past
|
||||
IsIDRemoved(id types.ID) bool
|
||||
// Version is the cluster-wide minimum major.minor version.
|
||||
Version() *semver.Version
|
||||
}
|
||||
16
vendor/github.com/coreos/etcd/etcdserver/api/doc.go
generated
vendored
16
vendor/github.com/coreos/etcd/etcdserver/api/doc.go
generated
vendored
@@ -1,16 +0,0 @@
|
||||
// Copyright 2016 The etcd Authors
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
// Package api manages the capabilities and features that are exposed to clients by the etcd cluster.
|
||||
package api
|
||||
157
vendor/github.com/coreos/etcd/etcdserver/api/v3rpc/auth.go
generated
vendored
157
vendor/github.com/coreos/etcd/etcdserver/api/v3rpc/auth.go
generated
vendored
@@ -1,157 +0,0 @@
|
||||
// Copyright 2016 The etcd Authors
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package v3rpc
|
||||
|
||||
import (
|
||||
"github.com/coreos/etcd/etcdserver"
|
||||
pb "github.com/coreos/etcd/etcdserver/etcdserverpb"
|
||||
"golang.org/x/net/context"
|
||||
)
|
||||
|
||||
type AuthServer struct {
|
||||
authenticator etcdserver.Authenticator
|
||||
}
|
||||
|
||||
func NewAuthServer(s *etcdserver.EtcdServer) *AuthServer {
|
||||
return &AuthServer{authenticator: s}
|
||||
}
|
||||
|
||||
func (as *AuthServer) AuthEnable(ctx context.Context, r *pb.AuthEnableRequest) (*pb.AuthEnableResponse, error) {
|
||||
resp, err := as.authenticator.AuthEnable(ctx, r)
|
||||
if err != nil {
|
||||
return nil, togRPCError(err)
|
||||
}
|
||||
return resp, nil
|
||||
}
|
||||
|
||||
func (as *AuthServer) AuthDisable(ctx context.Context, r *pb.AuthDisableRequest) (*pb.AuthDisableResponse, error) {
|
||||
resp, err := as.authenticator.AuthDisable(ctx, r)
|
||||
if err != nil {
|
||||
return nil, togRPCError(err)
|
||||
}
|
||||
return resp, nil
|
||||
}
|
||||
|
||||
func (as *AuthServer) Authenticate(ctx context.Context, r *pb.AuthenticateRequest) (*pb.AuthenticateResponse, error) {
|
||||
resp, err := as.authenticator.Authenticate(ctx, r)
|
||||
if err != nil {
|
||||
return nil, togRPCError(err)
|
||||
}
|
||||
return resp, nil
|
||||
}
|
||||
|
||||
func (as *AuthServer) RoleAdd(ctx context.Context, r *pb.AuthRoleAddRequest) (*pb.AuthRoleAddResponse, error) {
|
||||
resp, err := as.authenticator.RoleAdd(ctx, r)
|
||||
if err != nil {
|
||||
return nil, togRPCError(err)
|
||||
}
|
||||
return resp, nil
|
||||
}
|
||||
|
||||
func (as *AuthServer) RoleDelete(ctx context.Context, r *pb.AuthRoleDeleteRequest) (*pb.AuthRoleDeleteResponse, error) {
|
||||
resp, err := as.authenticator.RoleDelete(ctx, r)
|
||||
if err != nil {
|
||||
return nil, togRPCError(err)
|
||||
}
|
||||
return resp, nil
|
||||
}
|
||||
|
||||
func (as *AuthServer) RoleGet(ctx context.Context, r *pb.AuthRoleGetRequest) (*pb.AuthRoleGetResponse, error) {
|
||||
resp, err := as.authenticator.RoleGet(ctx, r)
|
||||
if err != nil {
|
||||
return nil, togRPCError(err)
|
||||
}
|
||||
return resp, nil
|
||||
}
|
||||
|
||||
func (as *AuthServer) RoleList(ctx context.Context, r *pb.AuthRoleListRequest) (*pb.AuthRoleListResponse, error) {
|
||||
resp, err := as.authenticator.RoleList(ctx, r)
|
||||
if err != nil {
|
||||
return nil, togRPCError(err)
|
||||
}
|
||||
return resp, nil
|
||||
}
|
||||
|
||||
func (as *AuthServer) RoleRevokePermission(ctx context.Context, r *pb.AuthRoleRevokePermissionRequest) (*pb.AuthRoleRevokePermissionResponse, error) {
|
||||
resp, err := as.authenticator.RoleRevokePermission(ctx, r)
|
||||
if err != nil {
|
||||
return nil, togRPCError(err)
|
||||
}
|
||||
return resp, nil
|
||||
}
|
||||
|
||||
func (as *AuthServer) RoleGrantPermission(ctx context.Context, r *pb.AuthRoleGrantPermissionRequest) (*pb.AuthRoleGrantPermissionResponse, error) {
|
||||
resp, err := as.authenticator.RoleGrantPermission(ctx, r)
|
||||
if err != nil {
|
||||
return nil, togRPCError(err)
|
||||
}
|
||||
return resp, nil
|
||||
}
|
||||
|
||||
func (as *AuthServer) UserAdd(ctx context.Context, r *pb.AuthUserAddRequest) (*pb.AuthUserAddResponse, error) {
|
||||
resp, err := as.authenticator.UserAdd(ctx, r)
|
||||
if err != nil {
|
||||
return nil, togRPCError(err)
|
||||
}
|
||||
return resp, nil
|
||||
}
|
||||
|
||||
func (as *AuthServer) UserDelete(ctx context.Context, r *pb.AuthUserDeleteRequest) (*pb.AuthUserDeleteResponse, error) {
|
||||
resp, err := as.authenticator.UserDelete(ctx, r)
|
||||
if err != nil {
|
||||
return nil, togRPCError(err)
|
||||
}
|
||||
return resp, nil
|
||||
}
|
||||
|
||||
func (as *AuthServer) UserGet(ctx context.Context, r *pb.AuthUserGetRequest) (*pb.AuthUserGetResponse, error) {
|
||||
resp, err := as.authenticator.UserGet(ctx, r)
|
||||
if err != nil {
|
||||
return nil, togRPCError(err)
|
||||
}
|
||||
return resp, nil
|
||||
}
|
||||
|
||||
func (as *AuthServer) UserList(ctx context.Context, r *pb.AuthUserListRequest) (*pb.AuthUserListResponse, error) {
|
||||
resp, err := as.authenticator.UserList(ctx, r)
|
||||
if err != nil {
|
||||
return nil, togRPCError(err)
|
||||
}
|
||||
return resp, nil
|
||||
}
|
||||
|
||||
func (as *AuthServer) UserGrantRole(ctx context.Context, r *pb.AuthUserGrantRoleRequest) (*pb.AuthUserGrantRoleResponse, error) {
|
||||
resp, err := as.authenticator.UserGrantRole(ctx, r)
|
||||
if err != nil {
|
||||
return nil, togRPCError(err)
|
||||
}
|
||||
return resp, nil
|
||||
}
|
||||
|
||||
func (as *AuthServer) UserRevokeRole(ctx context.Context, r *pb.AuthUserRevokeRoleRequest) (*pb.AuthUserRevokeRoleResponse, error) {
|
||||
resp, err := as.authenticator.UserRevokeRole(ctx, r)
|
||||
if err != nil {
|
||||
return nil, togRPCError(err)
|
||||
}
|
||||
return resp, nil
|
||||
}
|
||||
|
||||
func (as *AuthServer) UserChangePassword(ctx context.Context, r *pb.AuthUserChangePasswordRequest) (*pb.AuthUserChangePasswordResponse, error) {
|
||||
resp, err := as.authenticator.UserChangePassword(ctx, r)
|
||||
if err != nil {
|
||||
return nil, togRPCError(err)
|
||||
}
|
||||
return resp, nil
|
||||
}
|
||||
34
vendor/github.com/coreos/etcd/etcdserver/api/v3rpc/codec.go
generated
vendored
34
vendor/github.com/coreos/etcd/etcdserver/api/v3rpc/codec.go
generated
vendored
@@ -1,34 +0,0 @@
|
||||
// Copyright 2016 The etcd Authors
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package v3rpc
|
||||
|
||||
import "github.com/gogo/protobuf/proto"
|
||||
|
||||
type codec struct{}
|
||||
|
||||
func (c *codec) Marshal(v interface{}) ([]byte, error) {
|
||||
b, err := proto.Marshal(v.(proto.Message))
|
||||
sentBytes.Add(float64(len(b)))
|
||||
return b, err
|
||||
}
|
||||
|
||||
func (c *codec) Unmarshal(data []byte, v interface{}) error {
|
||||
receivedBytes.Add(float64(len(data)))
|
||||
return proto.Unmarshal(data, v.(proto.Message))
|
||||
}
|
||||
|
||||
func (c *codec) String() string {
|
||||
return "proto"
|
||||
}
|
||||
53
vendor/github.com/coreos/etcd/etcdserver/api/v3rpc/grpc.go
generated
vendored
53
vendor/github.com/coreos/etcd/etcdserver/api/v3rpc/grpc.go
generated
vendored
@@ -1,53 +0,0 @@
|
||||
// Copyright 2016 The etcd Authors
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package v3rpc
|
||||
|
||||
import (
|
||||
"crypto/tls"
|
||||
"math"
|
||||
|
||||
"github.com/coreos/etcd/etcdserver"
|
||||
pb "github.com/coreos/etcd/etcdserver/etcdserverpb"
|
||||
"google.golang.org/grpc"
|
||||
"google.golang.org/grpc/credentials"
|
||||
"google.golang.org/grpc/grpclog"
|
||||
)
|
||||
|
||||
const maxStreams = math.MaxUint32
|
||||
|
||||
func init() {
|
||||
grpclog.SetLogger(plog)
|
||||
}
|
||||
|
||||
func Server(s *etcdserver.EtcdServer, tls *tls.Config) *grpc.Server {
|
||||
var opts []grpc.ServerOption
|
||||
opts = append(opts, grpc.CustomCodec(&codec{}))
|
||||
if tls != nil {
|
||||
opts = append(opts, grpc.Creds(credentials.NewTLS(tls)))
|
||||
}
|
||||
opts = append(opts, grpc.UnaryInterceptor(newUnaryInterceptor(s)))
|
||||
opts = append(opts, grpc.StreamInterceptor(newStreamInterceptor(s)))
|
||||
opts = append(opts, grpc.MaxConcurrentStreams(maxStreams))
|
||||
grpcServer := grpc.NewServer(opts...)
|
||||
|
||||
pb.RegisterKVServer(grpcServer, NewQuotaKVServer(s))
|
||||
pb.RegisterWatchServer(grpcServer, NewWatchServer(s))
|
||||
pb.RegisterLeaseServer(grpcServer, NewQuotaLeaseServer(s))
|
||||
pb.RegisterClusterServer(grpcServer, NewClusterServer(s))
|
||||
pb.RegisterAuthServer(grpcServer, NewAuthServer(s))
|
||||
pb.RegisterMaintenanceServer(grpcServer, NewMaintenanceServer(s))
|
||||
|
||||
return grpcServer
|
||||
}
|
||||
46
vendor/github.com/coreos/etcd/etcdserver/api/v3rpc/header.go
generated
vendored
46
vendor/github.com/coreos/etcd/etcdserver/api/v3rpc/header.go
generated
vendored
@@ -1,46 +0,0 @@
|
||||
// Copyright 2016 The etcd Authors
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package v3rpc
|
||||
|
||||
import (
|
||||
"github.com/coreos/etcd/etcdserver"
|
||||
pb "github.com/coreos/etcd/etcdserver/etcdserverpb"
|
||||
)
|
||||
|
||||
type header struct {
|
||||
clusterID int64
|
||||
memberID int64
|
||||
raftTimer etcdserver.RaftTimer
|
||||
rev func() int64
|
||||
}
|
||||
|
||||
func newHeader(s *etcdserver.EtcdServer) header {
|
||||
return header{
|
||||
clusterID: int64(s.Cluster().ID()),
|
||||
memberID: int64(s.ID()),
|
||||
raftTimer: s,
|
||||
rev: func() int64 { return s.KV().Rev() },
|
||||
}
|
||||
}
|
||||
|
||||
// fill populates pb.ResponseHeader using etcdserver information
|
||||
func (h *header) fill(rh *pb.ResponseHeader) {
|
||||
rh.ClusterId = uint64(h.clusterID)
|
||||
rh.MemberId = uint64(h.memberID)
|
||||
rh.RaftTerm = h.raftTimer.Term()
|
||||
if rh.Revision == 0 {
|
||||
rh.Revision = h.rev()
|
||||
}
|
||||
}
|
||||
144
vendor/github.com/coreos/etcd/etcdserver/api/v3rpc/interceptor.go
generated
vendored
144
vendor/github.com/coreos/etcd/etcdserver/api/v3rpc/interceptor.go
generated
vendored
@@ -1,144 +0,0 @@
|
||||
// Copyright 2016 The etcd Authors
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package v3rpc
|
||||
|
||||
import (
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/coreos/etcd/etcdserver"
|
||||
"github.com/coreos/etcd/etcdserver/api"
|
||||
"github.com/coreos/etcd/etcdserver/api/v3rpc/rpctypes"
|
||||
"github.com/coreos/etcd/pkg/types"
|
||||
"github.com/coreos/etcd/raft"
|
||||
|
||||
prometheus "github.com/grpc-ecosystem/go-grpc-prometheus"
|
||||
"golang.org/x/net/context"
|
||||
"google.golang.org/grpc"
|
||||
"google.golang.org/grpc/metadata"
|
||||
)
|
||||
|
||||
const (
|
||||
maxNoLeaderCnt = 3
|
||||
)
|
||||
|
||||
type streamsMap struct {
|
||||
mu sync.Mutex
|
||||
streams map[grpc.ServerStream]struct{}
|
||||
}
|
||||
|
||||
func newUnaryInterceptor(s *etcdserver.EtcdServer) grpc.UnaryServerInterceptor {
|
||||
return func(ctx context.Context, req interface{}, info *grpc.UnaryServerInfo, handler grpc.UnaryHandler) (resp interface{}, err error) {
|
||||
if !api.IsCapabilityEnabled(api.V3rpcCapability) {
|
||||
return nil, rpctypes.ErrGRPCNotCapable
|
||||
}
|
||||
|
||||
md, ok := metadata.FromContext(ctx)
|
||||
if ok {
|
||||
if ks := md[rpctypes.MetadataRequireLeaderKey]; len(ks) > 0 && ks[0] == rpctypes.MetadataHasLeader {
|
||||
if s.Leader() == types.ID(raft.None) {
|
||||
return nil, rpctypes.ErrGRPCNoLeader
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return prometheus.UnaryServerInterceptor(ctx, req, info, handler)
|
||||
}
|
||||
}
|
||||
|
||||
func newStreamInterceptor(s *etcdserver.EtcdServer) grpc.StreamServerInterceptor {
|
||||
smap := monitorLeader(s)
|
||||
|
||||
return func(srv interface{}, ss grpc.ServerStream, info *grpc.StreamServerInfo, handler grpc.StreamHandler) error {
|
||||
if !api.IsCapabilityEnabled(api.V3rpcCapability) {
|
||||
return rpctypes.ErrGRPCNotCapable
|
||||
}
|
||||
|
||||
md, ok := metadata.FromContext(ss.Context())
|
||||
if ok {
|
||||
if ks := md[rpctypes.MetadataRequireLeaderKey]; len(ks) > 0 && ks[0] == rpctypes.MetadataHasLeader {
|
||||
if s.Leader() == types.ID(raft.None) {
|
||||
return rpctypes.ErrGRPCNoLeader
|
||||
}
|
||||
|
||||
cctx, cancel := context.WithCancel(ss.Context())
|
||||
ss = serverStreamWithCtx{ctx: cctx, cancel: &cancel, ServerStream: ss}
|
||||
|
||||
smap.mu.Lock()
|
||||
smap.streams[ss] = struct{}{}
|
||||
smap.mu.Unlock()
|
||||
|
||||
defer func() {
|
||||
smap.mu.Lock()
|
||||
delete(smap.streams, ss)
|
||||
smap.mu.Unlock()
|
||||
cancel()
|
||||
}()
|
||||
|
||||
}
|
||||
}
|
||||
|
||||
return prometheus.StreamServerInterceptor(srv, ss, info, handler)
|
||||
}
|
||||
}
|
||||
|
||||
type serverStreamWithCtx struct {
|
||||
grpc.ServerStream
|
||||
ctx context.Context
|
||||
cancel *context.CancelFunc
|
||||
}
|
||||
|
||||
func (ssc serverStreamWithCtx) Context() context.Context { return ssc.ctx }
|
||||
|
||||
func monitorLeader(s *etcdserver.EtcdServer) *streamsMap {
|
||||
smap := &streamsMap{
|
||||
streams: make(map[grpc.ServerStream]struct{}),
|
||||
}
|
||||
|
||||
go func() {
|
||||
election := time.Duration(s.Cfg.TickMs) * time.Duration(s.Cfg.ElectionTicks) * time.Millisecond
|
||||
noLeaderCnt := 0
|
||||
|
||||
for {
|
||||
select {
|
||||
case <-s.StopNotify():
|
||||
return
|
||||
case <-time.After(election):
|
||||
if s.Leader() == types.ID(raft.None) {
|
||||
noLeaderCnt++
|
||||
} else {
|
||||
noLeaderCnt = 0
|
||||
}
|
||||
|
||||
// We are more conservative on canceling existing streams. Reconnecting streams
|
||||
// cost much more than just rejecting new requests. So we wait until the member
|
||||
// cannot find a leader for maxNoLeaderCnt election timeouts to cancel existing streams.
|
||||
if noLeaderCnt >= maxNoLeaderCnt {
|
||||
smap.mu.Lock()
|
||||
for ss := range smap.streams {
|
||||
if ssWithCtx, ok := ss.(serverStreamWithCtx); ok {
|
||||
(*ssWithCtx.cancel)()
|
||||
<-ss.Context().Done()
|
||||
}
|
||||
}
|
||||
smap.streams = make(map[grpc.ServerStream]struct{})
|
||||
smap.mu.Unlock()
|
||||
}
|
||||
}
|
||||
}
|
||||
}()
|
||||
|
||||
return smap
|
||||
}
|
||||
259
vendor/github.com/coreos/etcd/etcdserver/api/v3rpc/key.go
generated
vendored
259
vendor/github.com/coreos/etcd/etcdserver/api/v3rpc/key.go
generated
vendored
@@ -1,259 +0,0 @@
|
||||
// Copyright 2015 The etcd Authors
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
// Package v3rpc implements etcd v3 RPC system based on gRPC.
|
||||
package v3rpc
|
||||
|
||||
import (
|
||||
"sort"
|
||||
|
||||
"github.com/coreos/etcd/etcdserver"
|
||||
"github.com/coreos/etcd/etcdserver/api/v3rpc/rpctypes"
|
||||
pb "github.com/coreos/etcd/etcdserver/etcdserverpb"
|
||||
"github.com/coreos/pkg/capnslog"
|
||||
"golang.org/x/net/context"
|
||||
)
|
||||
|
||||
var (
|
||||
plog = capnslog.NewPackageLogger("github.com/coreos/etcd", "etcdserver/api/v3rpc")
|
||||
|
||||
// Max operations per txn list. For example, Txn.Success can have at most 128 operations,
|
||||
// and Txn.Failure can have at most 128 operations.
|
||||
MaxOpsPerTxn = 128
|
||||
)
|
||||
|
||||
type kvServer struct {
|
||||
hdr header
|
||||
kv etcdserver.RaftKV
|
||||
}
|
||||
|
||||
func NewKVServer(s *etcdserver.EtcdServer) pb.KVServer {
|
||||
return &kvServer{hdr: newHeader(s), kv: s}
|
||||
}
|
||||
|
||||
func (s *kvServer) Range(ctx context.Context, r *pb.RangeRequest) (*pb.RangeResponse, error) {
|
||||
if err := checkRangeRequest(r); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
resp, err := s.kv.Range(ctx, r)
|
||||
if err != nil {
|
||||
return nil, togRPCError(err)
|
||||
}
|
||||
|
||||
if resp.Header == nil {
|
||||
plog.Panic("unexpected nil resp.Header")
|
||||
}
|
||||
s.hdr.fill(resp.Header)
|
||||
return resp, nil
|
||||
}
|
||||
|
||||
func (s *kvServer) Put(ctx context.Context, r *pb.PutRequest) (*pb.PutResponse, error) {
|
||||
if err := checkPutRequest(r); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
resp, err := s.kv.Put(ctx, r)
|
||||
if err != nil {
|
||||
return nil, togRPCError(err)
|
||||
}
|
||||
|
||||
if resp.Header == nil {
|
||||
plog.Panic("unexpected nil resp.Header")
|
||||
}
|
||||
s.hdr.fill(resp.Header)
|
||||
return resp, nil
|
||||
}
|
||||
|
||||
func (s *kvServer) DeleteRange(ctx context.Context, r *pb.DeleteRangeRequest) (*pb.DeleteRangeResponse, error) {
|
||||
if err := checkDeleteRequest(r); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
resp, err := s.kv.DeleteRange(ctx, r)
|
||||
if err != nil {
|
||||
return nil, togRPCError(err)
|
||||
}
|
||||
|
||||
if resp.Header == nil {
|
||||
plog.Panic("unexpected nil resp.Header")
|
||||
}
|
||||
s.hdr.fill(resp.Header)
|
||||
return resp, nil
|
||||
}
|
||||
|
||||
func (s *kvServer) Txn(ctx context.Context, r *pb.TxnRequest) (*pb.TxnResponse, error) {
|
||||
if err := checkTxnRequest(r); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
resp, err := s.kv.Txn(ctx, r)
|
||||
if err != nil {
|
||||
return nil, togRPCError(err)
|
||||
}
|
||||
|
||||
if resp.Header == nil {
|
||||
plog.Panic("unexpected nil resp.Header")
|
||||
}
|
||||
s.hdr.fill(resp.Header)
|
||||
return resp, nil
|
||||
}
|
||||
|
||||
func (s *kvServer) Compact(ctx context.Context, r *pb.CompactionRequest) (*pb.CompactionResponse, error) {
|
||||
resp, err := s.kv.Compact(ctx, r)
|
||||
if err != nil {
|
||||
return nil, togRPCError(err)
|
||||
}
|
||||
|
||||
if resp.Header == nil {
|
||||
plog.Panic("unexpected nil resp.Header")
|
||||
}
|
||||
s.hdr.fill(resp.Header)
|
||||
return resp, nil
|
||||
}
|
||||
|
||||
func checkRangeRequest(r *pb.RangeRequest) error {
|
||||
if len(r.Key) == 0 {
|
||||
return rpctypes.ErrGRPCEmptyKey
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func checkPutRequest(r *pb.PutRequest) error {
|
||||
if len(r.Key) == 0 {
|
||||
return rpctypes.ErrGRPCEmptyKey
|
||||
}
|
||||
if r.IgnoreValue && len(r.Value) != 0 {
|
||||
return rpctypes.ErrGRPCValueProvided
|
||||
}
|
||||
if r.IgnoreLease && r.Lease != 0 {
|
||||
return rpctypes.ErrGRPCLeaseProvided
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func checkDeleteRequest(r *pb.DeleteRangeRequest) error {
|
||||
if len(r.Key) == 0 {
|
||||
return rpctypes.ErrGRPCEmptyKey
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func checkTxnRequest(r *pb.TxnRequest) error {
|
||||
if len(r.Compare) > MaxOpsPerTxn || len(r.Success) > MaxOpsPerTxn || len(r.Failure) > MaxOpsPerTxn {
|
||||
return rpctypes.ErrGRPCTooManyOps
|
||||
}
|
||||
|
||||
for _, c := range r.Compare {
|
||||
if len(c.Key) == 0 {
|
||||
return rpctypes.ErrGRPCEmptyKey
|
||||
}
|
||||
}
|
||||
|
||||
for _, u := range r.Success {
|
||||
if err := checkRequestOp(u); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
if err := checkRequestDupKeys(r.Success); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
for _, u := range r.Failure {
|
||||
if err := checkRequestOp(u); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
return checkRequestDupKeys(r.Failure)
|
||||
}
|
||||
|
||||
// checkRequestDupKeys gives rpctypes.ErrGRPCDuplicateKey if the same key is modified twice
|
||||
func checkRequestDupKeys(reqs []*pb.RequestOp) error {
|
||||
// check put overlap
|
||||
keys := make(map[string]struct{})
|
||||
for _, requ := range reqs {
|
||||
tv, ok := requ.Request.(*pb.RequestOp_RequestPut)
|
||||
if !ok {
|
||||
continue
|
||||
}
|
||||
preq := tv.RequestPut
|
||||
if preq == nil {
|
||||
continue
|
||||
}
|
||||
if _, ok := keys[string(preq.Key)]; ok {
|
||||
return rpctypes.ErrGRPCDuplicateKey
|
||||
}
|
||||
keys[string(preq.Key)] = struct{}{}
|
||||
}
|
||||
|
||||
// no need to check deletes if no puts; delete overlaps are permitted
|
||||
if len(keys) == 0 {
|
||||
return nil
|
||||
}
|
||||
|
||||
// sort keys for range checking
|
||||
sortedKeys := []string{}
|
||||
for k := range keys {
|
||||
sortedKeys = append(sortedKeys, k)
|
||||
}
|
||||
sort.Strings(sortedKeys)
|
||||
|
||||
// check put overlap with deletes
|
||||
for _, requ := range reqs {
|
||||
tv, ok := requ.Request.(*pb.RequestOp_RequestDeleteRange)
|
||||
if !ok {
|
||||
continue
|
||||
}
|
||||
dreq := tv.RequestDeleteRange
|
||||
if dreq == nil {
|
||||
continue
|
||||
}
|
||||
if dreq.RangeEnd == nil {
|
||||
if _, found := keys[string(dreq.Key)]; found {
|
||||
return rpctypes.ErrGRPCDuplicateKey
|
||||
}
|
||||
} else {
|
||||
lo := sort.SearchStrings(sortedKeys, string(dreq.Key))
|
||||
hi := sort.SearchStrings(sortedKeys, string(dreq.RangeEnd))
|
||||
if lo != hi {
|
||||
// element between lo and hi => overlap
|
||||
return rpctypes.ErrGRPCDuplicateKey
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func checkRequestOp(u *pb.RequestOp) error {
|
||||
// TODO: ensure only one of the field is set.
|
||||
switch uv := u.Request.(type) {
|
||||
case *pb.RequestOp_RequestRange:
|
||||
if uv.RequestRange != nil {
|
||||
return checkRangeRequest(uv.RequestRange)
|
||||
}
|
||||
case *pb.RequestOp_RequestPut:
|
||||
if uv.RequestPut != nil {
|
||||
return checkPutRequest(uv.RequestPut)
|
||||
}
|
||||
case *pb.RequestOp_RequestDeleteRange:
|
||||
if uv.RequestDeleteRange != nil {
|
||||
return checkDeleteRequest(uv.RequestDeleteRange)
|
||||
}
|
||||
default:
|
||||
// empty op / nil entry
|
||||
return rpctypes.ErrGRPCKeyNotFound
|
||||
}
|
||||
return nil
|
||||
}
|
||||
123
vendor/github.com/coreos/etcd/etcdserver/api/v3rpc/lease.go
generated
vendored
123
vendor/github.com/coreos/etcd/etcdserver/api/v3rpc/lease.go
generated
vendored
@@ -1,123 +0,0 @@
|
||||
// Copyright 2016 The etcd Authors
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package v3rpc
|
||||
|
||||
import (
|
||||
"io"
|
||||
|
||||
"github.com/coreos/etcd/etcdserver"
|
||||
"github.com/coreos/etcd/etcdserver/api/v3rpc/rpctypes"
|
||||
pb "github.com/coreos/etcd/etcdserver/etcdserverpb"
|
||||
"github.com/coreos/etcd/lease"
|
||||
"golang.org/x/net/context"
|
||||
)
|
||||
|
||||
type LeaseServer struct {
|
||||
hdr header
|
||||
le etcdserver.Lessor
|
||||
}
|
||||
|
||||
func NewLeaseServer(s *etcdserver.EtcdServer) pb.LeaseServer {
|
||||
return &LeaseServer{le: s, hdr: newHeader(s)}
|
||||
}
|
||||
|
||||
func (ls *LeaseServer) LeaseGrant(ctx context.Context, cr *pb.LeaseGrantRequest) (*pb.LeaseGrantResponse, error) {
|
||||
resp, err := ls.le.LeaseGrant(ctx, cr)
|
||||
|
||||
if err != nil {
|
||||
return nil, togRPCError(err)
|
||||
}
|
||||
ls.hdr.fill(resp.Header)
|
||||
return resp, nil
|
||||
}
|
||||
|
||||
func (ls *LeaseServer) LeaseRevoke(ctx context.Context, rr *pb.LeaseRevokeRequest) (*pb.LeaseRevokeResponse, error) {
|
||||
resp, err := ls.le.LeaseRevoke(ctx, rr)
|
||||
if err != nil {
|
||||
return nil, togRPCError(err)
|
||||
}
|
||||
ls.hdr.fill(resp.Header)
|
||||
return resp, nil
|
||||
}
|
||||
|
||||
func (ls *LeaseServer) LeaseTimeToLive(ctx context.Context, rr *pb.LeaseTimeToLiveRequest) (*pb.LeaseTimeToLiveResponse, error) {
|
||||
resp, err := ls.le.LeaseTimeToLive(ctx, rr)
|
||||
if err != nil && err != lease.ErrLeaseNotFound {
|
||||
return nil, togRPCError(err)
|
||||
}
|
||||
if err == lease.ErrLeaseNotFound {
|
||||
resp = &pb.LeaseTimeToLiveResponse{
|
||||
Header: &pb.ResponseHeader{},
|
||||
ID: rr.ID,
|
||||
TTL: -1,
|
||||
}
|
||||
}
|
||||
ls.hdr.fill(resp.Header)
|
||||
return resp, nil
|
||||
}
|
||||
|
||||
func (ls *LeaseServer) LeaseKeepAlive(stream pb.Lease_LeaseKeepAliveServer) (err error) {
|
||||
errc := make(chan error, 1)
|
||||
go func() {
|
||||
errc <- ls.leaseKeepAlive(stream)
|
||||
}()
|
||||
select {
|
||||
case err = <-errc:
|
||||
case <-stream.Context().Done():
|
||||
// the only server-side cancellation is noleader for now.
|
||||
err = stream.Context().Err()
|
||||
if err == context.Canceled {
|
||||
err = rpctypes.ErrGRPCNoLeader
|
||||
}
|
||||
}
|
||||
return err
|
||||
}
|
||||
|
||||
func (ls *LeaseServer) leaseKeepAlive(stream pb.Lease_LeaseKeepAliveServer) error {
|
||||
for {
|
||||
req, err := stream.Recv()
|
||||
if err == io.EOF {
|
||||
return nil
|
||||
}
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Create header before we sent out the renew request.
|
||||
// This can make sure that the revision is strictly smaller or equal to
|
||||
// when the keepalive happened at the local server (when the local server is the leader)
|
||||
// or remote leader.
|
||||
// Without this, a lease might be revoked at rev 3 but client can see the keepalive succeeded
|
||||
// at rev 4.
|
||||
resp := &pb.LeaseKeepAliveResponse{ID: req.ID, Header: &pb.ResponseHeader{}}
|
||||
ls.hdr.fill(resp.Header)
|
||||
|
||||
ttl, err := ls.le.LeaseRenew(stream.Context(), lease.LeaseID(req.ID))
|
||||
if err == lease.ErrLeaseNotFound {
|
||||
err = nil
|
||||
ttl = 0
|
||||
}
|
||||
|
||||
if err != nil {
|
||||
return togRPCError(err)
|
||||
}
|
||||
|
||||
resp.TTL = ttl
|
||||
err = stream.Send(resp)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
}
|
||||
190
vendor/github.com/coreos/etcd/etcdserver/api/v3rpc/maintenance.go
generated
vendored
190
vendor/github.com/coreos/etcd/etcdserver/api/v3rpc/maintenance.go
generated
vendored
@@ -1,190 +0,0 @@
|
||||
// Copyright 2016 The etcd Authors
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package v3rpc
|
||||
|
||||
import (
|
||||
"crypto/sha256"
|
||||
"io"
|
||||
|
||||
"github.com/coreos/etcd/auth"
|
||||
"github.com/coreos/etcd/etcdserver"
|
||||
pb "github.com/coreos/etcd/etcdserver/etcdserverpb"
|
||||
"github.com/coreos/etcd/mvcc"
|
||||
"github.com/coreos/etcd/mvcc/backend"
|
||||
"github.com/coreos/etcd/pkg/types"
|
||||
"github.com/coreos/etcd/version"
|
||||
"golang.org/x/net/context"
|
||||
)
|
||||
|
||||
type KVGetter interface {
|
||||
KV() mvcc.ConsistentWatchableKV
|
||||
}
|
||||
|
||||
type BackendGetter interface {
|
||||
Backend() backend.Backend
|
||||
}
|
||||
|
||||
type Alarmer interface {
|
||||
Alarm(ctx context.Context, ar *pb.AlarmRequest) (*pb.AlarmResponse, error)
|
||||
}
|
||||
|
||||
type RaftStatusGetter interface {
|
||||
Index() uint64
|
||||
Term() uint64
|
||||
Leader() types.ID
|
||||
}
|
||||
|
||||
type AuthGetter interface {
|
||||
AuthInfoFromCtx(ctx context.Context) (*auth.AuthInfo, error)
|
||||
AuthStore() auth.AuthStore
|
||||
}
|
||||
|
||||
type maintenanceServer struct {
|
||||
rg RaftStatusGetter
|
||||
kg KVGetter
|
||||
bg BackendGetter
|
||||
a Alarmer
|
||||
hdr header
|
||||
}
|
||||
|
||||
func NewMaintenanceServer(s *etcdserver.EtcdServer) pb.MaintenanceServer {
|
||||
srv := &maintenanceServer{rg: s, kg: s, bg: s, a: s, hdr: newHeader(s)}
|
||||
return &authMaintenanceServer{srv, s}
|
||||
}
|
||||
|
||||
func (ms *maintenanceServer) Defragment(ctx context.Context, sr *pb.DefragmentRequest) (*pb.DefragmentResponse, error) {
|
||||
plog.Noticef("starting to defragment the storage backend...")
|
||||
err := ms.bg.Backend().Defrag()
|
||||
if err != nil {
|
||||
plog.Errorf("failed to defragment the storage backend (%v)", err)
|
||||
return nil, err
|
||||
}
|
||||
plog.Noticef("finished defragmenting the storage backend")
|
||||
return &pb.DefragmentResponse{}, nil
|
||||
}
|
||||
|
||||
func (ms *maintenanceServer) Snapshot(sr *pb.SnapshotRequest, srv pb.Maintenance_SnapshotServer) error {
|
||||
snap := ms.bg.Backend().Snapshot()
|
||||
pr, pw := io.Pipe()
|
||||
|
||||
defer pr.Close()
|
||||
|
||||
go func() {
|
||||
snap.WriteTo(pw)
|
||||
if err := snap.Close(); err != nil {
|
||||
plog.Errorf("error closing snapshot (%v)", err)
|
||||
}
|
||||
pw.Close()
|
||||
}()
|
||||
|
||||
// send file data
|
||||
h := sha256.New()
|
||||
br := int64(0)
|
||||
buf := make([]byte, 32*1024)
|
||||
sz := snap.Size()
|
||||
for br < sz {
|
||||
n, err := io.ReadFull(pr, buf)
|
||||
if err != nil && err != io.EOF && err != io.ErrUnexpectedEOF {
|
||||
return togRPCError(err)
|
||||
}
|
||||
br += int64(n)
|
||||
resp := &pb.SnapshotResponse{
|
||||
RemainingBytes: uint64(sz - br),
|
||||
Blob: buf[:n],
|
||||
}
|
||||
if err = srv.Send(resp); err != nil {
|
||||
return togRPCError(err)
|
||||
}
|
||||
h.Write(buf[:n])
|
||||
}
|
||||
|
||||
// send sha
|
||||
sha := h.Sum(nil)
|
||||
hresp := &pb.SnapshotResponse{RemainingBytes: 0, Blob: sha}
|
||||
if err := srv.Send(hresp); err != nil {
|
||||
return togRPCError(err)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (ms *maintenanceServer) Hash(ctx context.Context, r *pb.HashRequest) (*pb.HashResponse, error) {
|
||||
h, rev, err := ms.kg.KV().Hash()
|
||||
if err != nil {
|
||||
return nil, togRPCError(err)
|
||||
}
|
||||
resp := &pb.HashResponse{Header: &pb.ResponseHeader{Revision: rev}, Hash: h}
|
||||
ms.hdr.fill(resp.Header)
|
||||
return resp, nil
|
||||
}
|
||||
|
||||
func (ms *maintenanceServer) Alarm(ctx context.Context, ar *pb.AlarmRequest) (*pb.AlarmResponse, error) {
|
||||
return ms.a.Alarm(ctx, ar)
|
||||
}
|
||||
|
||||
func (ms *maintenanceServer) Status(ctx context.Context, ar *pb.StatusRequest) (*pb.StatusResponse, error) {
|
||||
resp := &pb.StatusResponse{
|
||||
Header: &pb.ResponseHeader{Revision: ms.hdr.rev()},
|
||||
Version: version.Version,
|
||||
DbSize: ms.bg.Backend().Size(),
|
||||
Leader: uint64(ms.rg.Leader()),
|
||||
RaftIndex: ms.rg.Index(),
|
||||
RaftTerm: ms.rg.Term(),
|
||||
}
|
||||
ms.hdr.fill(resp.Header)
|
||||
return resp, nil
|
||||
}
|
||||
|
||||
type authMaintenanceServer struct {
|
||||
*maintenanceServer
|
||||
ag AuthGetter
|
||||
}
|
||||
|
||||
func (ams *authMaintenanceServer) isAuthenticated(ctx context.Context) error {
|
||||
authInfo, err := ams.ag.AuthInfoFromCtx(ctx)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return ams.ag.AuthStore().IsAdminPermitted(authInfo)
|
||||
}
|
||||
|
||||
func (ams *authMaintenanceServer) Defragment(ctx context.Context, sr *pb.DefragmentRequest) (*pb.DefragmentResponse, error) {
|
||||
if err := ams.isAuthenticated(ctx); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return ams.maintenanceServer.Defragment(ctx, sr)
|
||||
}
|
||||
|
||||
func (ams *authMaintenanceServer) Snapshot(sr *pb.SnapshotRequest, srv pb.Maintenance_SnapshotServer) error {
|
||||
if err := ams.isAuthenticated(srv.Context()); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return ams.maintenanceServer.Snapshot(sr, srv)
|
||||
}
|
||||
|
||||
func (ams *authMaintenanceServer) Hash(ctx context.Context, r *pb.HashRequest) (*pb.HashResponse, error) {
|
||||
if err := ams.isAuthenticated(ctx); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return ams.maintenanceServer.Hash(ctx, r)
|
||||
}
|
||||
|
||||
func (ams *authMaintenanceServer) Status(ctx context.Context, ar *pb.StatusRequest) (*pb.StatusResponse, error) {
|
||||
return ams.maintenanceServer.Status(ctx, ar)
|
||||
}
|
||||
103
vendor/github.com/coreos/etcd/etcdserver/api/v3rpc/member.go
generated
vendored
103
vendor/github.com/coreos/etcd/etcdserver/api/v3rpc/member.go
generated
vendored
@@ -1,103 +0,0 @@
|
||||
// Copyright 2016 The etcd Authors
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package v3rpc
|
||||
|
||||
import (
|
||||
"time"
|
||||
|
||||
"github.com/coreos/etcd/etcdserver"
|
||||
"github.com/coreos/etcd/etcdserver/api"
|
||||
"github.com/coreos/etcd/etcdserver/api/v3rpc/rpctypes"
|
||||
pb "github.com/coreos/etcd/etcdserver/etcdserverpb"
|
||||
"github.com/coreos/etcd/etcdserver/membership"
|
||||
"github.com/coreos/etcd/pkg/types"
|
||||
"golang.org/x/net/context"
|
||||
)
|
||||
|
||||
type ClusterServer struct {
|
||||
cluster api.Cluster
|
||||
server etcdserver.Server
|
||||
raftTimer etcdserver.RaftTimer
|
||||
}
|
||||
|
||||
func NewClusterServer(s *etcdserver.EtcdServer) *ClusterServer {
|
||||
return &ClusterServer{
|
||||
cluster: s.Cluster(),
|
||||
server: s,
|
||||
raftTimer: s,
|
||||
}
|
||||
}
|
||||
|
||||
func (cs *ClusterServer) MemberAdd(ctx context.Context, r *pb.MemberAddRequest) (*pb.MemberAddResponse, error) {
|
||||
urls, err := types.NewURLs(r.PeerURLs)
|
||||
if err != nil {
|
||||
return nil, rpctypes.ErrGRPCMemberBadURLs
|
||||
}
|
||||
|
||||
now := time.Now()
|
||||
m := membership.NewMember("", urls, "", &now)
|
||||
membs, merr := cs.server.AddMember(ctx, *m)
|
||||
if merr != nil {
|
||||
return nil, togRPCError(merr)
|
||||
}
|
||||
|
||||
return &pb.MemberAddResponse{
|
||||
Header: cs.header(),
|
||||
Member: &pb.Member{ID: uint64(m.ID), PeerURLs: m.PeerURLs},
|
||||
Members: membersToProtoMembers(membs),
|
||||
}, nil
|
||||
}
|
||||
|
||||
func (cs *ClusterServer) MemberRemove(ctx context.Context, r *pb.MemberRemoveRequest) (*pb.MemberRemoveResponse, error) {
|
||||
membs, err := cs.server.RemoveMember(ctx, r.ID)
|
||||
if err != nil {
|
||||
return nil, togRPCError(err)
|
||||
}
|
||||
return &pb.MemberRemoveResponse{Header: cs.header(), Members: membersToProtoMembers(membs)}, nil
|
||||
}
|
||||
|
||||
func (cs *ClusterServer) MemberUpdate(ctx context.Context, r *pb.MemberUpdateRequest) (*pb.MemberUpdateResponse, error) {
|
||||
m := membership.Member{
|
||||
ID: types.ID(r.ID),
|
||||
RaftAttributes: membership.RaftAttributes{PeerURLs: r.PeerURLs},
|
||||
}
|
||||
membs, err := cs.server.UpdateMember(ctx, m)
|
||||
if err != nil {
|
||||
return nil, togRPCError(err)
|
||||
}
|
||||
return &pb.MemberUpdateResponse{Header: cs.header(), Members: membersToProtoMembers(membs)}, nil
|
||||
}
|
||||
|
||||
func (cs *ClusterServer) MemberList(ctx context.Context, r *pb.MemberListRequest) (*pb.MemberListResponse, error) {
|
||||
membs := membersToProtoMembers(cs.cluster.Members())
|
||||
return &pb.MemberListResponse{Header: cs.header(), Members: membs}, nil
|
||||
}
|
||||
|
||||
func (cs *ClusterServer) header() *pb.ResponseHeader {
|
||||
return &pb.ResponseHeader{ClusterId: uint64(cs.cluster.ID()), MemberId: uint64(cs.server.ID()), RaftTerm: cs.raftTimer.Term()}
|
||||
}
|
||||
|
||||
func membersToProtoMembers(membs []*membership.Member) []*pb.Member {
|
||||
protoMembs := make([]*pb.Member, len(membs))
|
||||
for i := range membs {
|
||||
protoMembs[i] = &pb.Member{
|
||||
Name: membs[i].Name,
|
||||
ID: uint64(membs[i].ID),
|
||||
PeerURLs: membs[i].PeerURLs,
|
||||
ClientURLs: membs[i].ClientURLs,
|
||||
}
|
||||
}
|
||||
return protoMembs
|
||||
}
|
||||
38
vendor/github.com/coreos/etcd/etcdserver/api/v3rpc/metrics.go
generated
vendored
38
vendor/github.com/coreos/etcd/etcdserver/api/v3rpc/metrics.go
generated
vendored
@@ -1,38 +0,0 @@
|
||||
// Copyright 2016 The etcd Authors
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package v3rpc
|
||||
|
||||
import "github.com/prometheus/client_golang/prometheus"
|
||||
|
||||
var (
|
||||
sentBytes = prometheus.NewCounter(prometheus.CounterOpts{
|
||||
Namespace: "etcd",
|
||||
Subsystem: "network",
|
||||
Name: "client_grpc_sent_bytes_total",
|
||||
Help: "The total number of bytes sent to grpc clients.",
|
||||
})
|
||||
|
||||
receivedBytes = prometheus.NewCounter(prometheus.CounterOpts{
|
||||
Namespace: "etcd",
|
||||
Subsystem: "network",
|
||||
Name: "client_grpc_received_bytes_total",
|
||||
Help: "The total number of bytes received from grpc clients.",
|
||||
})
|
||||
)
|
||||
|
||||
func init() {
|
||||
prometheus.MustRegister(sentBytes)
|
||||
prometheus.MustRegister(receivedBytes)
|
||||
}
|
||||
89
vendor/github.com/coreos/etcd/etcdserver/api/v3rpc/quota.go
generated
vendored
89
vendor/github.com/coreos/etcd/etcdserver/api/v3rpc/quota.go
generated
vendored
@@ -1,89 +0,0 @@
|
||||
// Copyright 2016 The etcd Authors
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package v3rpc
|
||||
|
||||
import (
|
||||
"github.com/coreos/etcd/etcdserver"
|
||||
"github.com/coreos/etcd/etcdserver/api/v3rpc/rpctypes"
|
||||
pb "github.com/coreos/etcd/etcdserver/etcdserverpb"
|
||||
"github.com/coreos/etcd/pkg/types"
|
||||
"golang.org/x/net/context"
|
||||
)
|
||||
|
||||
type quotaKVServer struct {
|
||||
pb.KVServer
|
||||
qa quotaAlarmer
|
||||
}
|
||||
|
||||
type quotaAlarmer struct {
|
||||
q etcdserver.Quota
|
||||
a Alarmer
|
||||
id types.ID
|
||||
}
|
||||
|
||||
// check whether request satisfies the quota. If there is not enough space,
|
||||
// ignore request and raise the free space alarm.
|
||||
func (qa *quotaAlarmer) check(ctx context.Context, r interface{}) error {
|
||||
if qa.q.Available(r) {
|
||||
return nil
|
||||
}
|
||||
req := &pb.AlarmRequest{
|
||||
MemberID: uint64(qa.id),
|
||||
Action: pb.AlarmRequest_ACTIVATE,
|
||||
Alarm: pb.AlarmType_NOSPACE,
|
||||
}
|
||||
qa.a.Alarm(ctx, req)
|
||||
return rpctypes.ErrGRPCNoSpace
|
||||
}
|
||||
|
||||
func NewQuotaKVServer(s *etcdserver.EtcdServer) pb.KVServer {
|
||||
return "aKVServer{
|
||||
NewKVServer(s),
|
||||
quotaAlarmer{etcdserver.NewBackendQuota(s), s, s.ID()},
|
||||
}
|
||||
}
|
||||
|
||||
func (s *quotaKVServer) Put(ctx context.Context, r *pb.PutRequest) (*pb.PutResponse, error) {
|
||||
if err := s.qa.check(ctx, r); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return s.KVServer.Put(ctx, r)
|
||||
}
|
||||
|
||||
func (s *quotaKVServer) Txn(ctx context.Context, r *pb.TxnRequest) (*pb.TxnResponse, error) {
|
||||
if err := s.qa.check(ctx, r); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return s.KVServer.Txn(ctx, r)
|
||||
}
|
||||
|
||||
type quotaLeaseServer struct {
|
||||
pb.LeaseServer
|
||||
qa quotaAlarmer
|
||||
}
|
||||
|
||||
func (s *quotaLeaseServer) LeaseGrant(ctx context.Context, cr *pb.LeaseGrantRequest) (*pb.LeaseGrantResponse, error) {
|
||||
if err := s.qa.check(ctx, cr); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return s.LeaseServer.LeaseGrant(ctx, cr)
|
||||
}
|
||||
|
||||
func NewQuotaLeaseServer(s *etcdserver.EtcdServer) pb.LeaseServer {
|
||||
return "aLeaseServer{
|
||||
NewLeaseServer(s),
|
||||
quotaAlarmer{etcdserver.NewBackendQuota(s), s, s.ID()},
|
||||
}
|
||||
}
|
||||
103
vendor/github.com/coreos/etcd/etcdserver/api/v3rpc/util.go
generated
vendored
103
vendor/github.com/coreos/etcd/etcdserver/api/v3rpc/util.go
generated
vendored
@@ -1,103 +0,0 @@
|
||||
// Copyright 2016 The etcd Authors
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package v3rpc
|
||||
|
||||
import (
|
||||
"github.com/coreos/etcd/auth"
|
||||
"github.com/coreos/etcd/etcdserver"
|
||||
"github.com/coreos/etcd/etcdserver/api/v3rpc/rpctypes"
|
||||
"github.com/coreos/etcd/etcdserver/membership"
|
||||
"github.com/coreos/etcd/lease"
|
||||
"github.com/coreos/etcd/mvcc"
|
||||
"google.golang.org/grpc"
|
||||
"google.golang.org/grpc/codes"
|
||||
)
|
||||
|
||||
func togRPCError(err error) error {
|
||||
switch err {
|
||||
case membership.ErrIDRemoved:
|
||||
return rpctypes.ErrGRPCMemberNotFound
|
||||
case membership.ErrIDNotFound:
|
||||
return rpctypes.ErrGRPCMemberNotFound
|
||||
case membership.ErrIDExists:
|
||||
return rpctypes.ErrGRPCMemberExist
|
||||
case membership.ErrPeerURLexists:
|
||||
return rpctypes.ErrGRPCPeerURLExist
|
||||
case etcdserver.ErrNotEnoughStartedMembers:
|
||||
return rpctypes.ErrMemberNotEnoughStarted
|
||||
|
||||
case mvcc.ErrCompacted:
|
||||
return rpctypes.ErrGRPCCompacted
|
||||
case mvcc.ErrFutureRev:
|
||||
return rpctypes.ErrGRPCFutureRev
|
||||
case etcdserver.ErrRequestTooLarge:
|
||||
return rpctypes.ErrGRPCRequestTooLarge
|
||||
case etcdserver.ErrNoSpace:
|
||||
return rpctypes.ErrGRPCNoSpace
|
||||
case etcdserver.ErrTooManyRequests:
|
||||
return rpctypes.ErrTooManyRequests
|
||||
|
||||
case etcdserver.ErrNoLeader:
|
||||
return rpctypes.ErrGRPCNoLeader
|
||||
case etcdserver.ErrStopped:
|
||||
return rpctypes.ErrGRPCStopped
|
||||
case etcdserver.ErrTimeout:
|
||||
return rpctypes.ErrGRPCTimeout
|
||||
case etcdserver.ErrTimeoutDueToLeaderFail:
|
||||
return rpctypes.ErrGRPCTimeoutDueToLeaderFail
|
||||
case etcdserver.ErrTimeoutDueToConnectionLost:
|
||||
return rpctypes.ErrGRPCTimeoutDueToConnectionLost
|
||||
case etcdserver.ErrUnhealthy:
|
||||
return rpctypes.ErrGRPCUnhealthy
|
||||
case etcdserver.ErrKeyNotFound:
|
||||
return rpctypes.ErrGRPCKeyNotFound
|
||||
|
||||
case lease.ErrLeaseNotFound:
|
||||
return rpctypes.ErrGRPCLeaseNotFound
|
||||
case lease.ErrLeaseExists:
|
||||
return rpctypes.ErrGRPCLeaseExist
|
||||
|
||||
case auth.ErrRootUserNotExist:
|
||||
return rpctypes.ErrGRPCRootUserNotExist
|
||||
case auth.ErrRootRoleNotExist:
|
||||
return rpctypes.ErrGRPCRootRoleNotExist
|
||||
case auth.ErrUserAlreadyExist:
|
||||
return rpctypes.ErrGRPCUserAlreadyExist
|
||||
case auth.ErrUserEmpty:
|
||||
return rpctypes.ErrGRPCUserEmpty
|
||||
case auth.ErrUserNotFound:
|
||||
return rpctypes.ErrGRPCUserNotFound
|
||||
case auth.ErrRoleAlreadyExist:
|
||||
return rpctypes.ErrGRPCRoleAlreadyExist
|
||||
case auth.ErrRoleNotFound:
|
||||
return rpctypes.ErrGRPCRoleNotFound
|
||||
case auth.ErrAuthFailed:
|
||||
return rpctypes.ErrGRPCAuthFailed
|
||||
case auth.ErrPermissionDenied:
|
||||
return rpctypes.ErrGRPCPermissionDenied
|
||||
case auth.ErrRoleNotGranted:
|
||||
return rpctypes.ErrGRPCRoleNotGranted
|
||||
case auth.ErrPermissionNotGranted:
|
||||
return rpctypes.ErrGRPCPermissionNotGranted
|
||||
case auth.ErrAuthNotEnabled:
|
||||
return rpctypes.ErrGRPCAuthNotEnabled
|
||||
case auth.ErrInvalidAuthToken:
|
||||
return rpctypes.ErrGRPCInvalidAuthToken
|
||||
case auth.ErrInvalidAuthMgmt:
|
||||
return rpctypes.ErrGRPCInvalidAuthMgmt
|
||||
default:
|
||||
return grpc.Errorf(codes.Unknown, err.Error())
|
||||
}
|
||||
}
|
||||
426
vendor/github.com/coreos/etcd/etcdserver/api/v3rpc/watch.go
generated
vendored
426
vendor/github.com/coreos/etcd/etcdserver/api/v3rpc/watch.go
generated
vendored
@@ -1,426 +0,0 @@
|
||||
// Copyright 2015 The etcd Authors
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package v3rpc
|
||||
|
||||
import (
|
||||
"io"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"golang.org/x/net/context"
|
||||
|
||||
"github.com/coreos/etcd/auth"
|
||||
"github.com/coreos/etcd/etcdserver"
|
||||
"github.com/coreos/etcd/etcdserver/api/v3rpc/rpctypes"
|
||||
pb "github.com/coreos/etcd/etcdserver/etcdserverpb"
|
||||
"github.com/coreos/etcd/mvcc"
|
||||
"github.com/coreos/etcd/mvcc/mvccpb"
|
||||
)
|
||||
|
||||
type watchServer struct {
|
||||
clusterID int64
|
||||
memberID int64
|
||||
raftTimer etcdserver.RaftTimer
|
||||
watchable mvcc.WatchableKV
|
||||
|
||||
ag AuthGetter
|
||||
}
|
||||
|
||||
func NewWatchServer(s *etcdserver.EtcdServer) pb.WatchServer {
|
||||
return &watchServer{
|
||||
clusterID: int64(s.Cluster().ID()),
|
||||
memberID: int64(s.ID()),
|
||||
raftTimer: s,
|
||||
watchable: s.Watchable(),
|
||||
ag: s,
|
||||
}
|
||||
}
|
||||
|
||||
var (
|
||||
// External test can read this with GetProgressReportInterval()
|
||||
// and change this to a small value to finish fast with
|
||||
// SetProgressReportInterval().
|
||||
progressReportInterval = 10 * time.Minute
|
||||
progressReportIntervalMu sync.RWMutex
|
||||
)
|
||||
|
||||
func GetProgressReportInterval() time.Duration {
|
||||
progressReportIntervalMu.RLock()
|
||||
defer progressReportIntervalMu.RUnlock()
|
||||
return progressReportInterval
|
||||
}
|
||||
|
||||
func SetProgressReportInterval(newTimeout time.Duration) {
|
||||
progressReportIntervalMu.Lock()
|
||||
defer progressReportIntervalMu.Unlock()
|
||||
progressReportInterval = newTimeout
|
||||
}
|
||||
|
||||
const (
|
||||
// We send ctrl response inside the read loop. We do not want
|
||||
// send to block read, but we still want ctrl response we sent to
|
||||
// be serialized. Thus we use a buffered chan to solve the problem.
|
||||
// A small buffer should be OK for most cases, since we expect the
|
||||
// ctrl requests are infrequent.
|
||||
ctrlStreamBufLen = 16
|
||||
)
|
||||
|
||||
// serverWatchStream is an etcd server side stream. It receives requests
|
||||
// from client side gRPC stream. It receives watch events from mvcc.WatchStream,
|
||||
// and creates responses that forwarded to gRPC stream.
|
||||
// It also forwards control message like watch created and canceled.
|
||||
type serverWatchStream struct {
|
||||
clusterID int64
|
||||
memberID int64
|
||||
raftTimer etcdserver.RaftTimer
|
||||
|
||||
watchable mvcc.WatchableKV
|
||||
|
||||
gRPCStream pb.Watch_WatchServer
|
||||
watchStream mvcc.WatchStream
|
||||
ctrlStream chan *pb.WatchResponse
|
||||
|
||||
// mu protects progress, prevKV
|
||||
mu sync.Mutex
|
||||
// progress tracks the watchID that stream might need to send
|
||||
// progress to.
|
||||
// TODO: combine progress and prevKV into a single struct?
|
||||
progress map[mvcc.WatchID]bool
|
||||
prevKV map[mvcc.WatchID]bool
|
||||
|
||||
// closec indicates the stream is closed.
|
||||
closec chan struct{}
|
||||
|
||||
// wg waits for the send loop to complete
|
||||
wg sync.WaitGroup
|
||||
|
||||
ag AuthGetter
|
||||
}
|
||||
|
||||
func (ws *watchServer) Watch(stream pb.Watch_WatchServer) (err error) {
|
||||
sws := serverWatchStream{
|
||||
clusterID: ws.clusterID,
|
||||
memberID: ws.memberID,
|
||||
raftTimer: ws.raftTimer,
|
||||
|
||||
watchable: ws.watchable,
|
||||
|
||||
gRPCStream: stream,
|
||||
watchStream: ws.watchable.NewWatchStream(),
|
||||
// chan for sending control response like watcher created and canceled.
|
||||
ctrlStream: make(chan *pb.WatchResponse, ctrlStreamBufLen),
|
||||
progress: make(map[mvcc.WatchID]bool),
|
||||
prevKV: make(map[mvcc.WatchID]bool),
|
||||
closec: make(chan struct{}),
|
||||
|
||||
ag: ws.ag,
|
||||
}
|
||||
|
||||
sws.wg.Add(1)
|
||||
go func() {
|
||||
sws.sendLoop()
|
||||
sws.wg.Done()
|
||||
}()
|
||||
|
||||
errc := make(chan error, 1)
|
||||
// Ideally recvLoop would also use sws.wg to signal its completion
|
||||
// but when stream.Context().Done() is closed, the stream's recv
|
||||
// may continue to block since it uses a different context, leading to
|
||||
// deadlock when calling sws.close().
|
||||
go func() {
|
||||
if rerr := sws.recvLoop(); rerr != nil {
|
||||
errc <- rerr
|
||||
}
|
||||
}()
|
||||
select {
|
||||
case err = <-errc:
|
||||
close(sws.ctrlStream)
|
||||
case <-stream.Context().Done():
|
||||
err = stream.Context().Err()
|
||||
// the only server-side cancellation is noleader for now.
|
||||
if err == context.Canceled {
|
||||
err = rpctypes.ErrGRPCNoLeader
|
||||
}
|
||||
}
|
||||
sws.close()
|
||||
return err
|
||||
}
|
||||
|
||||
func (sws *serverWatchStream) isWatchPermitted(wcr *pb.WatchCreateRequest) bool {
|
||||
authInfo, err := sws.ag.AuthInfoFromCtx(sws.gRPCStream.Context())
|
||||
if err != nil {
|
||||
return false
|
||||
}
|
||||
if authInfo == nil {
|
||||
// if auth is enabled, IsRangePermitted() can cause an error
|
||||
authInfo = &auth.AuthInfo{}
|
||||
}
|
||||
|
||||
return sws.ag.AuthStore().IsRangePermitted(authInfo, wcr.Key, wcr.RangeEnd) == nil
|
||||
}
|
||||
|
||||
func (sws *serverWatchStream) recvLoop() error {
|
||||
for {
|
||||
req, err := sws.gRPCStream.Recv()
|
||||
if err == io.EOF {
|
||||
return nil
|
||||
}
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
switch uv := req.RequestUnion.(type) {
|
||||
case *pb.WatchRequest_CreateRequest:
|
||||
if uv.CreateRequest == nil {
|
||||
break
|
||||
}
|
||||
|
||||
creq := uv.CreateRequest
|
||||
if len(creq.Key) == 0 {
|
||||
// \x00 is the smallest key
|
||||
creq.Key = []byte{0}
|
||||
}
|
||||
if len(creq.RangeEnd) == 0 {
|
||||
// force nil since watchstream.Watch distinguishes
|
||||
// between nil and []byte{} for single key / >=
|
||||
creq.RangeEnd = nil
|
||||
}
|
||||
if len(creq.RangeEnd) == 1 && creq.RangeEnd[0] == 0 {
|
||||
// support >= key queries
|
||||
creq.RangeEnd = []byte{}
|
||||
}
|
||||
|
||||
if !sws.isWatchPermitted(creq) {
|
||||
wr := &pb.WatchResponse{
|
||||
Header: sws.newResponseHeader(sws.watchStream.Rev()),
|
||||
WatchId: -1,
|
||||
Canceled: true,
|
||||
Created: true,
|
||||
CancelReason: rpctypes.ErrGRPCPermissionDenied.Error(),
|
||||
}
|
||||
|
||||
select {
|
||||
case sws.ctrlStream <- wr:
|
||||
case <-sws.closec:
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
filters := FiltersFromRequest(creq)
|
||||
|
||||
wsrev := sws.watchStream.Rev()
|
||||
rev := creq.StartRevision
|
||||
if rev == 0 {
|
||||
rev = wsrev + 1
|
||||
}
|
||||
id := sws.watchStream.Watch(creq.Key, creq.RangeEnd, rev, filters...)
|
||||
if id != -1 {
|
||||
sws.mu.Lock()
|
||||
if creq.ProgressNotify {
|
||||
sws.progress[id] = true
|
||||
}
|
||||
if creq.PrevKv {
|
||||
sws.prevKV[id] = true
|
||||
}
|
||||
sws.mu.Unlock()
|
||||
}
|
||||
wr := &pb.WatchResponse{
|
||||
Header: sws.newResponseHeader(wsrev),
|
||||
WatchId: int64(id),
|
||||
Created: true,
|
||||
Canceled: id == -1,
|
||||
}
|
||||
select {
|
||||
case sws.ctrlStream <- wr:
|
||||
case <-sws.closec:
|
||||
return nil
|
||||
}
|
||||
case *pb.WatchRequest_CancelRequest:
|
||||
if uv.CancelRequest != nil {
|
||||
id := uv.CancelRequest.WatchId
|
||||
err := sws.watchStream.Cancel(mvcc.WatchID(id))
|
||||
if err == nil {
|
||||
sws.ctrlStream <- &pb.WatchResponse{
|
||||
Header: sws.newResponseHeader(sws.watchStream.Rev()),
|
||||
WatchId: id,
|
||||
Canceled: true,
|
||||
}
|
||||
sws.mu.Lock()
|
||||
delete(sws.progress, mvcc.WatchID(id))
|
||||
delete(sws.prevKV, mvcc.WatchID(id))
|
||||
sws.mu.Unlock()
|
||||
}
|
||||
}
|
||||
default:
|
||||
// we probably should not shutdown the entire stream when
|
||||
// receive an valid command.
|
||||
// so just do nothing instead.
|
||||
continue
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (sws *serverWatchStream) sendLoop() {
|
||||
// watch ids that are currently active
|
||||
ids := make(map[mvcc.WatchID]struct{})
|
||||
// watch responses pending on a watch id creation message
|
||||
pending := make(map[mvcc.WatchID][]*pb.WatchResponse)
|
||||
|
||||
interval := GetProgressReportInterval()
|
||||
progressTicker := time.NewTicker(interval)
|
||||
|
||||
defer func() {
|
||||
progressTicker.Stop()
|
||||
// drain the chan to clean up pending events
|
||||
for ws := range sws.watchStream.Chan() {
|
||||
mvcc.ReportEventReceived(len(ws.Events))
|
||||
}
|
||||
for _, wrs := range pending {
|
||||
for _, ws := range wrs {
|
||||
mvcc.ReportEventReceived(len(ws.Events))
|
||||
}
|
||||
}
|
||||
}()
|
||||
|
||||
for {
|
||||
select {
|
||||
case wresp, ok := <-sws.watchStream.Chan():
|
||||
if !ok {
|
||||
return
|
||||
}
|
||||
|
||||
// TODO: evs is []mvccpb.Event type
|
||||
// either return []*mvccpb.Event from the mvcc package
|
||||
// or define protocol buffer with []mvccpb.Event.
|
||||
evs := wresp.Events
|
||||
events := make([]*mvccpb.Event, len(evs))
|
||||
sws.mu.Lock()
|
||||
needPrevKV := sws.prevKV[wresp.WatchID]
|
||||
sws.mu.Unlock()
|
||||
for i := range evs {
|
||||
events[i] = &evs[i]
|
||||
|
||||
if needPrevKV {
|
||||
opt := mvcc.RangeOptions{Rev: evs[i].Kv.ModRevision - 1}
|
||||
r, err := sws.watchable.Range(evs[i].Kv.Key, nil, opt)
|
||||
if err == nil && len(r.KVs) != 0 {
|
||||
events[i].PrevKv = &(r.KVs[0])
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
wr := &pb.WatchResponse{
|
||||
Header: sws.newResponseHeader(wresp.Revision),
|
||||
WatchId: int64(wresp.WatchID),
|
||||
Events: events,
|
||||
CompactRevision: wresp.CompactRevision,
|
||||
}
|
||||
|
||||
if _, hasId := ids[wresp.WatchID]; !hasId {
|
||||
// buffer if id not yet announced
|
||||
wrs := append(pending[wresp.WatchID], wr)
|
||||
pending[wresp.WatchID] = wrs
|
||||
continue
|
||||
}
|
||||
|
||||
mvcc.ReportEventReceived(len(evs))
|
||||
if err := sws.gRPCStream.Send(wr); err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
sws.mu.Lock()
|
||||
if len(evs) > 0 && sws.progress[wresp.WatchID] {
|
||||
// elide next progress update if sent a key update
|
||||
sws.progress[wresp.WatchID] = false
|
||||
}
|
||||
sws.mu.Unlock()
|
||||
|
||||
case c, ok := <-sws.ctrlStream:
|
||||
if !ok {
|
||||
return
|
||||
}
|
||||
|
||||
if err := sws.gRPCStream.Send(c); err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
// track id creation
|
||||
wid := mvcc.WatchID(c.WatchId)
|
||||
if c.Canceled {
|
||||
delete(ids, wid)
|
||||
continue
|
||||
}
|
||||
if c.Created {
|
||||
// flush buffered events
|
||||
ids[wid] = struct{}{}
|
||||
for _, v := range pending[wid] {
|
||||
mvcc.ReportEventReceived(len(v.Events))
|
||||
if err := sws.gRPCStream.Send(v); err != nil {
|
||||
return
|
||||
}
|
||||
}
|
||||
delete(pending, wid)
|
||||
}
|
||||
case <-progressTicker.C:
|
||||
sws.mu.Lock()
|
||||
for id, ok := range sws.progress {
|
||||
if ok {
|
||||
sws.watchStream.RequestProgress(id)
|
||||
}
|
||||
sws.progress[id] = true
|
||||
}
|
||||
sws.mu.Unlock()
|
||||
case <-sws.closec:
|
||||
return
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (sws *serverWatchStream) close() {
|
||||
sws.watchStream.Close()
|
||||
close(sws.closec)
|
||||
sws.wg.Wait()
|
||||
}
|
||||
|
||||
func (sws *serverWatchStream) newResponseHeader(rev int64) *pb.ResponseHeader {
|
||||
return &pb.ResponseHeader{
|
||||
ClusterId: uint64(sws.clusterID),
|
||||
MemberId: uint64(sws.memberID),
|
||||
Revision: rev,
|
||||
RaftTerm: sws.raftTimer.Term(),
|
||||
}
|
||||
}
|
||||
|
||||
func filterNoDelete(e mvccpb.Event) bool {
|
||||
return e.Type == mvccpb.DELETE
|
||||
}
|
||||
|
||||
func filterNoPut(e mvccpb.Event) bool {
|
||||
return e.Type == mvccpb.PUT
|
||||
}
|
||||
|
||||
func FiltersFromRequest(creq *pb.WatchCreateRequest) []mvcc.FilterFunc {
|
||||
filters := make([]mvcc.FilterFunc, 0, len(creq.Filters))
|
||||
for _, ft := range creq.Filters {
|
||||
switch ft {
|
||||
case pb.WatchCreateRequest_NOPUT:
|
||||
filters = append(filters, filterNoPut)
|
||||
case pb.WatchCreateRequest_NODELETE:
|
||||
filters = append(filters, filterNoDelete)
|
||||
default:
|
||||
}
|
||||
}
|
||||
return filters
|
||||
}
|
||||
878
vendor/github.com/coreos/etcd/etcdserver/apply.go
generated
vendored
878
vendor/github.com/coreos/etcd/etcdserver/apply.go
generated
vendored
@@ -1,878 +0,0 @@
|
||||
// Copyright 2016 The etcd Authors
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package etcdserver
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"sort"
|
||||
"time"
|
||||
|
||||
pb "github.com/coreos/etcd/etcdserver/etcdserverpb"
|
||||
"github.com/coreos/etcd/lease"
|
||||
"github.com/coreos/etcd/mvcc"
|
||||
"github.com/coreos/etcd/mvcc/mvccpb"
|
||||
"github.com/coreos/etcd/pkg/types"
|
||||
"github.com/gogo/protobuf/proto"
|
||||
"golang.org/x/net/context"
|
||||
)
|
||||
|
||||
const (
|
||||
warnApplyDuration = 100 * time.Millisecond
|
||||
)
|
||||
|
||||
type applyResult struct {
|
||||
resp proto.Message
|
||||
err error
|
||||
// physc signals the physical effect of the request has completed in addition
|
||||
// to being logically reflected by the node. Currently only used for
|
||||
// Compaction requests.
|
||||
physc <-chan struct{}
|
||||
}
|
||||
|
||||
// applierV3 is the interface for processing V3 raft messages
|
||||
type applierV3 interface {
|
||||
Apply(r *pb.InternalRaftRequest) *applyResult
|
||||
|
||||
Put(txn mvcc.TxnWrite, p *pb.PutRequest) (*pb.PutResponse, error)
|
||||
Range(txn mvcc.TxnRead, r *pb.RangeRequest) (*pb.RangeResponse, error)
|
||||
DeleteRange(txn mvcc.TxnWrite, dr *pb.DeleteRangeRequest) (*pb.DeleteRangeResponse, error)
|
||||
Txn(rt *pb.TxnRequest) (*pb.TxnResponse, error)
|
||||
Compaction(compaction *pb.CompactionRequest) (*pb.CompactionResponse, <-chan struct{}, error)
|
||||
|
||||
LeaseGrant(lc *pb.LeaseGrantRequest) (*pb.LeaseGrantResponse, error)
|
||||
LeaseRevoke(lc *pb.LeaseRevokeRequest) (*pb.LeaseRevokeResponse, error)
|
||||
|
||||
Alarm(*pb.AlarmRequest) (*pb.AlarmResponse, error)
|
||||
|
||||
Authenticate(r *pb.InternalAuthenticateRequest) (*pb.AuthenticateResponse, error)
|
||||
|
||||
AuthEnable() (*pb.AuthEnableResponse, error)
|
||||
AuthDisable() (*pb.AuthDisableResponse, error)
|
||||
|
||||
UserAdd(ua *pb.AuthUserAddRequest) (*pb.AuthUserAddResponse, error)
|
||||
UserDelete(ua *pb.AuthUserDeleteRequest) (*pb.AuthUserDeleteResponse, error)
|
||||
UserChangePassword(ua *pb.AuthUserChangePasswordRequest) (*pb.AuthUserChangePasswordResponse, error)
|
||||
UserGrantRole(ua *pb.AuthUserGrantRoleRequest) (*pb.AuthUserGrantRoleResponse, error)
|
||||
UserGet(ua *pb.AuthUserGetRequest) (*pb.AuthUserGetResponse, error)
|
||||
UserRevokeRole(ua *pb.AuthUserRevokeRoleRequest) (*pb.AuthUserRevokeRoleResponse, error)
|
||||
RoleAdd(ua *pb.AuthRoleAddRequest) (*pb.AuthRoleAddResponse, error)
|
||||
RoleGrantPermission(ua *pb.AuthRoleGrantPermissionRequest) (*pb.AuthRoleGrantPermissionResponse, error)
|
||||
RoleGet(ua *pb.AuthRoleGetRequest) (*pb.AuthRoleGetResponse, error)
|
||||
RoleRevokePermission(ua *pb.AuthRoleRevokePermissionRequest) (*pb.AuthRoleRevokePermissionResponse, error)
|
||||
RoleDelete(ua *pb.AuthRoleDeleteRequest) (*pb.AuthRoleDeleteResponse, error)
|
||||
UserList(ua *pb.AuthUserListRequest) (*pb.AuthUserListResponse, error)
|
||||
RoleList(ua *pb.AuthRoleListRequest) (*pb.AuthRoleListResponse, error)
|
||||
}
|
||||
|
||||
type applierV3backend struct {
|
||||
s *EtcdServer
|
||||
}
|
||||
|
||||
func (s *EtcdServer) newApplierV3() applierV3 {
|
||||
return newAuthApplierV3(
|
||||
s.AuthStore(),
|
||||
newQuotaApplierV3(s, &applierV3backend{s}),
|
||||
)
|
||||
}
|
||||
|
||||
func (a *applierV3backend) Apply(r *pb.InternalRaftRequest) *applyResult {
|
||||
ar := &applyResult{}
|
||||
|
||||
// call into a.s.applyV3.F instead of a.F so upper appliers can check individual calls
|
||||
switch {
|
||||
case r.Range != nil:
|
||||
ar.resp, ar.err = a.s.applyV3.Range(nil, r.Range)
|
||||
case r.Put != nil:
|
||||
ar.resp, ar.err = a.s.applyV3.Put(nil, r.Put)
|
||||
case r.DeleteRange != nil:
|
||||
ar.resp, ar.err = a.s.applyV3.DeleteRange(nil, r.DeleteRange)
|
||||
case r.Txn != nil:
|
||||
ar.resp, ar.err = a.s.applyV3.Txn(r.Txn)
|
||||
case r.Compaction != nil:
|
||||
ar.resp, ar.physc, ar.err = a.s.applyV3.Compaction(r.Compaction)
|
||||
case r.LeaseGrant != nil:
|
||||
ar.resp, ar.err = a.s.applyV3.LeaseGrant(r.LeaseGrant)
|
||||
case r.LeaseRevoke != nil:
|
||||
ar.resp, ar.err = a.s.applyV3.LeaseRevoke(r.LeaseRevoke)
|
||||
case r.Alarm != nil:
|
||||
ar.resp, ar.err = a.s.applyV3.Alarm(r.Alarm)
|
||||
case r.Authenticate != nil:
|
||||
ar.resp, ar.err = a.s.applyV3.Authenticate(r.Authenticate)
|
||||
case r.AuthEnable != nil:
|
||||
ar.resp, ar.err = a.s.applyV3.AuthEnable()
|
||||
case r.AuthDisable != nil:
|
||||
ar.resp, ar.err = a.s.applyV3.AuthDisable()
|
||||
case r.AuthUserAdd != nil:
|
||||
ar.resp, ar.err = a.s.applyV3.UserAdd(r.AuthUserAdd)
|
||||
case r.AuthUserDelete != nil:
|
||||
ar.resp, ar.err = a.s.applyV3.UserDelete(r.AuthUserDelete)
|
||||
case r.AuthUserChangePassword != nil:
|
||||
ar.resp, ar.err = a.s.applyV3.UserChangePassword(r.AuthUserChangePassword)
|
||||
case r.AuthUserGrantRole != nil:
|
||||
ar.resp, ar.err = a.s.applyV3.UserGrantRole(r.AuthUserGrantRole)
|
||||
case r.AuthUserGet != nil:
|
||||
ar.resp, ar.err = a.s.applyV3.UserGet(r.AuthUserGet)
|
||||
case r.AuthUserRevokeRole != nil:
|
||||
ar.resp, ar.err = a.s.applyV3.UserRevokeRole(r.AuthUserRevokeRole)
|
||||
case r.AuthRoleAdd != nil:
|
||||
ar.resp, ar.err = a.s.applyV3.RoleAdd(r.AuthRoleAdd)
|
||||
case r.AuthRoleGrantPermission != nil:
|
||||
ar.resp, ar.err = a.s.applyV3.RoleGrantPermission(r.AuthRoleGrantPermission)
|
||||
case r.AuthRoleGet != nil:
|
||||
ar.resp, ar.err = a.s.applyV3.RoleGet(r.AuthRoleGet)
|
||||
case r.AuthRoleRevokePermission != nil:
|
||||
ar.resp, ar.err = a.s.applyV3.RoleRevokePermission(r.AuthRoleRevokePermission)
|
||||
case r.AuthRoleDelete != nil:
|
||||
ar.resp, ar.err = a.s.applyV3.RoleDelete(r.AuthRoleDelete)
|
||||
case r.AuthUserList != nil:
|
||||
ar.resp, ar.err = a.s.applyV3.UserList(r.AuthUserList)
|
||||
case r.AuthRoleList != nil:
|
||||
ar.resp, ar.err = a.s.applyV3.RoleList(r.AuthRoleList)
|
||||
default:
|
||||
panic("not implemented")
|
||||
}
|
||||
return ar
|
||||
}
|
||||
|
||||
func (a *applierV3backend) Put(txn mvcc.TxnWrite, p *pb.PutRequest) (resp *pb.PutResponse, err error) {
|
||||
resp = &pb.PutResponse{}
|
||||
resp.Header = &pb.ResponseHeader{}
|
||||
|
||||
val, leaseID := p.Value, lease.LeaseID(p.Lease)
|
||||
if txn == nil {
|
||||
if leaseID != lease.NoLease {
|
||||
if l := a.s.lessor.Lookup(leaseID); l == nil {
|
||||
return nil, lease.ErrLeaseNotFound
|
||||
}
|
||||
}
|
||||
txn = a.s.KV().Write()
|
||||
defer txn.End()
|
||||
}
|
||||
|
||||
var rr *mvcc.RangeResult
|
||||
if p.IgnoreValue || p.IgnoreLease || p.PrevKv {
|
||||
rr, err = txn.Range(p.Key, nil, mvcc.RangeOptions{})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
if p.IgnoreValue || p.IgnoreLease {
|
||||
if rr == nil || len(rr.KVs) == 0 {
|
||||
// ignore_{lease,value} flag expects previous key-value pair
|
||||
return nil, ErrKeyNotFound
|
||||
}
|
||||
}
|
||||
if p.IgnoreValue {
|
||||
val = rr.KVs[0].Value
|
||||
}
|
||||
if p.IgnoreLease {
|
||||
leaseID = lease.LeaseID(rr.KVs[0].Lease)
|
||||
}
|
||||
if p.PrevKv {
|
||||
if rr != nil && len(rr.KVs) != 0 {
|
||||
resp.PrevKv = &rr.KVs[0]
|
||||
}
|
||||
}
|
||||
|
||||
resp.Header.Revision = txn.Put(p.Key, val, leaseID)
|
||||
return resp, nil
|
||||
}
|
||||
|
||||
func (a *applierV3backend) DeleteRange(txn mvcc.TxnWrite, dr *pb.DeleteRangeRequest) (*pb.DeleteRangeResponse, error) {
|
||||
resp := &pb.DeleteRangeResponse{}
|
||||
resp.Header = &pb.ResponseHeader{}
|
||||
|
||||
if txn == nil {
|
||||
txn = a.s.kv.Write()
|
||||
defer txn.End()
|
||||
}
|
||||
|
||||
if isGteRange(dr.RangeEnd) {
|
||||
dr.RangeEnd = []byte{}
|
||||
}
|
||||
|
||||
if dr.PrevKv {
|
||||
rr, err := txn.Range(dr.Key, dr.RangeEnd, mvcc.RangeOptions{})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if rr != nil {
|
||||
for i := range rr.KVs {
|
||||
resp.PrevKvs = append(resp.PrevKvs, &rr.KVs[i])
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
resp.Deleted, resp.Header.Revision = txn.DeleteRange(dr.Key, dr.RangeEnd)
|
||||
return resp, nil
|
||||
}
|
||||
|
||||
func (a *applierV3backend) Range(txn mvcc.TxnRead, r *pb.RangeRequest) (*pb.RangeResponse, error) {
|
||||
resp := &pb.RangeResponse{}
|
||||
resp.Header = &pb.ResponseHeader{}
|
||||
|
||||
if txn == nil {
|
||||
txn = a.s.kv.Read()
|
||||
defer txn.End()
|
||||
}
|
||||
|
||||
if isGteRange(r.RangeEnd) {
|
||||
r.RangeEnd = []byte{}
|
||||
}
|
||||
|
||||
limit := r.Limit
|
||||
if r.SortOrder != pb.RangeRequest_NONE ||
|
||||
r.MinModRevision != 0 || r.MaxModRevision != 0 ||
|
||||
r.MinCreateRevision != 0 || r.MaxCreateRevision != 0 {
|
||||
// fetch everything; sort and truncate afterwards
|
||||
limit = 0
|
||||
}
|
||||
if limit > 0 {
|
||||
// fetch one extra for 'more' flag
|
||||
limit = limit + 1
|
||||
}
|
||||
|
||||
ro := mvcc.RangeOptions{
|
||||
Limit: limit,
|
||||
Rev: r.Revision,
|
||||
Count: r.CountOnly,
|
||||
}
|
||||
|
||||
rr, err := txn.Range(r.Key, r.RangeEnd, ro)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if r.MaxModRevision != 0 {
|
||||
f := func(kv *mvccpb.KeyValue) bool { return kv.ModRevision > r.MaxModRevision }
|
||||
pruneKVs(rr, f)
|
||||
}
|
||||
if r.MinModRevision != 0 {
|
||||
f := func(kv *mvccpb.KeyValue) bool { return kv.ModRevision < r.MinModRevision }
|
||||
pruneKVs(rr, f)
|
||||
}
|
||||
if r.MaxCreateRevision != 0 {
|
||||
f := func(kv *mvccpb.KeyValue) bool { return kv.CreateRevision > r.MaxCreateRevision }
|
||||
pruneKVs(rr, f)
|
||||
}
|
||||
if r.MinCreateRevision != 0 {
|
||||
f := func(kv *mvccpb.KeyValue) bool { return kv.CreateRevision < r.MinCreateRevision }
|
||||
pruneKVs(rr, f)
|
||||
}
|
||||
|
||||
sortOrder := r.SortOrder
|
||||
if r.SortTarget != pb.RangeRequest_KEY && sortOrder == pb.RangeRequest_NONE {
|
||||
// Since current mvcc.Range implementation returns results
|
||||
// sorted by keys in lexiographically ascending order,
|
||||
// sort ASCEND by default only when target is not 'KEY'
|
||||
sortOrder = pb.RangeRequest_ASCEND
|
||||
}
|
||||
if sortOrder != pb.RangeRequest_NONE {
|
||||
var sorter sort.Interface
|
||||
switch {
|
||||
case r.SortTarget == pb.RangeRequest_KEY:
|
||||
sorter = &kvSortByKey{&kvSort{rr.KVs}}
|
||||
case r.SortTarget == pb.RangeRequest_VERSION:
|
||||
sorter = &kvSortByVersion{&kvSort{rr.KVs}}
|
||||
case r.SortTarget == pb.RangeRequest_CREATE:
|
||||
sorter = &kvSortByCreate{&kvSort{rr.KVs}}
|
||||
case r.SortTarget == pb.RangeRequest_MOD:
|
||||
sorter = &kvSortByMod{&kvSort{rr.KVs}}
|
||||
case r.SortTarget == pb.RangeRequest_VALUE:
|
||||
sorter = &kvSortByValue{&kvSort{rr.KVs}}
|
||||
}
|
||||
switch {
|
||||
case sortOrder == pb.RangeRequest_ASCEND:
|
||||
sort.Sort(sorter)
|
||||
case sortOrder == pb.RangeRequest_DESCEND:
|
||||
sort.Sort(sort.Reverse(sorter))
|
||||
}
|
||||
}
|
||||
|
||||
if r.Limit > 0 && len(rr.KVs) > int(r.Limit) {
|
||||
rr.KVs = rr.KVs[:r.Limit]
|
||||
resp.More = true
|
||||
}
|
||||
|
||||
resp.Header.Revision = rr.Rev
|
||||
resp.Count = int64(rr.Count)
|
||||
for i := range rr.KVs {
|
||||
if r.KeysOnly {
|
||||
rr.KVs[i].Value = nil
|
||||
}
|
||||
resp.Kvs = append(resp.Kvs, &rr.KVs[i])
|
||||
}
|
||||
return resp, nil
|
||||
}
|
||||
|
||||
func (a *applierV3backend) Txn(rt *pb.TxnRequest) (*pb.TxnResponse, error) {
|
||||
isWrite := !isTxnReadonly(rt)
|
||||
txn := mvcc.NewReadOnlyTxnWrite(a.s.KV().Read())
|
||||
|
||||
reqs, ok := a.compareToOps(txn, rt)
|
||||
if isWrite {
|
||||
if err := a.checkRequestPut(txn, reqs); err != nil {
|
||||
txn.End()
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
if err := checkRequestRange(txn, reqs); err != nil {
|
||||
txn.End()
|
||||
return nil, err
|
||||
}
|
||||
|
||||
resps := make([]*pb.ResponseOp, len(reqs))
|
||||
txnResp := &pb.TxnResponse{
|
||||
Responses: resps,
|
||||
Succeeded: ok,
|
||||
Header: &pb.ResponseHeader{},
|
||||
}
|
||||
|
||||
// When executing mutable txn ops, etcd must hold the txn lock so
|
||||
// readers do not see any intermediate results. Since writes are
|
||||
// serialized on the raft loop, the revision in the read view will
|
||||
// be the revision of the write txn.
|
||||
if isWrite {
|
||||
txn.End()
|
||||
txn = a.s.KV().Write()
|
||||
}
|
||||
for i := range reqs {
|
||||
resps[i] = a.applyUnion(txn, reqs[i])
|
||||
}
|
||||
rev := txn.Rev()
|
||||
if len(txn.Changes()) != 0 {
|
||||
rev++
|
||||
}
|
||||
txn.End()
|
||||
|
||||
txnResp.Header.Revision = rev
|
||||
return txnResp, nil
|
||||
}
|
||||
|
||||
func (a *applierV3backend) compareToOps(rv mvcc.ReadView, rt *pb.TxnRequest) ([]*pb.RequestOp, bool) {
|
||||
for _, c := range rt.Compare {
|
||||
if !applyCompare(rv, c) {
|
||||
return rt.Failure, false
|
||||
}
|
||||
}
|
||||
return rt.Success, true
|
||||
}
|
||||
|
||||
// applyCompare applies the compare request.
|
||||
// If the comparison succeeds, it returns true. Otherwise, returns false.
|
||||
func applyCompare(rv mvcc.ReadView, c *pb.Compare) bool {
|
||||
rr, err := rv.Range(c.Key, nil, mvcc.RangeOptions{})
|
||||
if err != nil {
|
||||
return false
|
||||
}
|
||||
var ckv mvccpb.KeyValue
|
||||
if len(rr.KVs) != 0 {
|
||||
ckv = rr.KVs[0]
|
||||
} else {
|
||||
// Use the zero value of ckv normally. However...
|
||||
if c.Target == pb.Compare_VALUE {
|
||||
// Always fail if we're comparing a value on a key that doesn't exist.
|
||||
// We can treat non-existence as the empty set explicitly, such that
|
||||
// even a key with a value of length 0 bytes is still a real key
|
||||
// that was written that way
|
||||
return false
|
||||
}
|
||||
}
|
||||
|
||||
// -1 is less, 0 is equal, 1 is greater
|
||||
var result int
|
||||
switch c.Target {
|
||||
case pb.Compare_VALUE:
|
||||
tv, _ := c.TargetUnion.(*pb.Compare_Value)
|
||||
if tv != nil {
|
||||
result = bytes.Compare(ckv.Value, tv.Value)
|
||||
}
|
||||
case pb.Compare_CREATE:
|
||||
tv, _ := c.TargetUnion.(*pb.Compare_CreateRevision)
|
||||
if tv != nil {
|
||||
result = compareInt64(ckv.CreateRevision, tv.CreateRevision)
|
||||
}
|
||||
|
||||
case pb.Compare_MOD:
|
||||
tv, _ := c.TargetUnion.(*pb.Compare_ModRevision)
|
||||
if tv != nil {
|
||||
result = compareInt64(ckv.ModRevision, tv.ModRevision)
|
||||
}
|
||||
case pb.Compare_VERSION:
|
||||
tv, _ := c.TargetUnion.(*pb.Compare_Version)
|
||||
if tv != nil {
|
||||
result = compareInt64(ckv.Version, tv.Version)
|
||||
}
|
||||
}
|
||||
|
||||
switch c.Result {
|
||||
case pb.Compare_EQUAL:
|
||||
return result == 0
|
||||
case pb.Compare_NOT_EQUAL:
|
||||
return result != 0
|
||||
case pb.Compare_GREATER:
|
||||
return result > 0
|
||||
case pb.Compare_LESS:
|
||||
return result < 0
|
||||
}
|
||||
return true
|
||||
}
|
||||
|
||||
func (a *applierV3backend) applyUnion(txn mvcc.TxnWrite, union *pb.RequestOp) *pb.ResponseOp {
|
||||
switch tv := union.Request.(type) {
|
||||
case *pb.RequestOp_RequestRange:
|
||||
if tv.RequestRange != nil {
|
||||
resp, err := a.Range(txn, tv.RequestRange)
|
||||
if err != nil {
|
||||
plog.Panicf("unexpected error during txn: %v", err)
|
||||
}
|
||||
return &pb.ResponseOp{Response: &pb.ResponseOp_ResponseRange{ResponseRange: resp}}
|
||||
}
|
||||
case *pb.RequestOp_RequestPut:
|
||||
if tv.RequestPut != nil {
|
||||
resp, err := a.Put(txn, tv.RequestPut)
|
||||
if err != nil {
|
||||
plog.Panicf("unexpected error during txn: %v", err)
|
||||
}
|
||||
return &pb.ResponseOp{Response: &pb.ResponseOp_ResponsePut{ResponsePut: resp}}
|
||||
}
|
||||
case *pb.RequestOp_RequestDeleteRange:
|
||||
if tv.RequestDeleteRange != nil {
|
||||
resp, err := a.DeleteRange(txn, tv.RequestDeleteRange)
|
||||
if err != nil {
|
||||
plog.Panicf("unexpected error during txn: %v", err)
|
||||
}
|
||||
return &pb.ResponseOp{Response: &pb.ResponseOp_ResponseDeleteRange{ResponseDeleteRange: resp}}
|
||||
}
|
||||
default:
|
||||
// empty union
|
||||
return nil
|
||||
}
|
||||
return nil
|
||||
|
||||
}
|
||||
|
||||
func (a *applierV3backend) Compaction(compaction *pb.CompactionRequest) (*pb.CompactionResponse, <-chan struct{}, error) {
|
||||
resp := &pb.CompactionResponse{}
|
||||
resp.Header = &pb.ResponseHeader{}
|
||||
ch, err := a.s.KV().Compact(compaction.Revision)
|
||||
if err != nil {
|
||||
return nil, ch, err
|
||||
}
|
||||
// get the current revision. which key to get is not important.
|
||||
rr, _ := a.s.KV().Range([]byte("compaction"), nil, mvcc.RangeOptions{})
|
||||
resp.Header.Revision = rr.Rev
|
||||
return resp, ch, err
|
||||
}
|
||||
|
||||
func (a *applierV3backend) LeaseGrant(lc *pb.LeaseGrantRequest) (*pb.LeaseGrantResponse, error) {
|
||||
l, err := a.s.lessor.Grant(lease.LeaseID(lc.ID), lc.TTL)
|
||||
resp := &pb.LeaseGrantResponse{}
|
||||
if err == nil {
|
||||
resp.ID = int64(l.ID)
|
||||
resp.TTL = l.TTL()
|
||||
resp.Header = newHeader(a.s)
|
||||
}
|
||||
return resp, err
|
||||
}
|
||||
|
||||
func (a *applierV3backend) LeaseRevoke(lc *pb.LeaseRevokeRequest) (*pb.LeaseRevokeResponse, error) {
|
||||
err := a.s.lessor.Revoke(lease.LeaseID(lc.ID))
|
||||
return &pb.LeaseRevokeResponse{Header: newHeader(a.s)}, err
|
||||
}
|
||||
|
||||
func (a *applierV3backend) Alarm(ar *pb.AlarmRequest) (*pb.AlarmResponse, error) {
|
||||
resp := &pb.AlarmResponse{}
|
||||
oldCount := len(a.s.alarmStore.Get(ar.Alarm))
|
||||
|
||||
switch ar.Action {
|
||||
case pb.AlarmRequest_GET:
|
||||
resp.Alarms = a.s.alarmStore.Get(ar.Alarm)
|
||||
case pb.AlarmRequest_ACTIVATE:
|
||||
m := a.s.alarmStore.Activate(types.ID(ar.MemberID), ar.Alarm)
|
||||
if m == nil {
|
||||
break
|
||||
}
|
||||
resp.Alarms = append(resp.Alarms, m)
|
||||
activated := oldCount == 0 && len(a.s.alarmStore.Get(m.Alarm)) == 1
|
||||
if !activated {
|
||||
break
|
||||
}
|
||||
|
||||
switch m.Alarm {
|
||||
case pb.AlarmType_NOSPACE:
|
||||
plog.Warningf("alarm raised %+v", m)
|
||||
a.s.applyV3 = newApplierV3Capped(a)
|
||||
default:
|
||||
plog.Errorf("unimplemented alarm activation (%+v)", m)
|
||||
}
|
||||
case pb.AlarmRequest_DEACTIVATE:
|
||||
m := a.s.alarmStore.Deactivate(types.ID(ar.MemberID), ar.Alarm)
|
||||
if m == nil {
|
||||
break
|
||||
}
|
||||
resp.Alarms = append(resp.Alarms, m)
|
||||
deactivated := oldCount > 0 && len(a.s.alarmStore.Get(ar.Alarm)) == 0
|
||||
if !deactivated {
|
||||
break
|
||||
}
|
||||
|
||||
switch m.Alarm {
|
||||
case pb.AlarmType_NOSPACE:
|
||||
plog.Infof("alarm disarmed %+v", ar)
|
||||
a.s.applyV3 = a.s.newApplierV3()
|
||||
default:
|
||||
plog.Errorf("unimplemented alarm deactivation (%+v)", m)
|
||||
}
|
||||
default:
|
||||
return nil, nil
|
||||
}
|
||||
return resp, nil
|
||||
}
|
||||
|
||||
type applierV3Capped struct {
|
||||
applierV3
|
||||
q backendQuota
|
||||
}
|
||||
|
||||
// newApplierV3Capped creates an applyV3 that will reject Puts and transactions
|
||||
// with Puts so that the number of keys in the store is capped.
|
||||
func newApplierV3Capped(base applierV3) applierV3 { return &applierV3Capped{applierV3: base} }
|
||||
|
||||
func (a *applierV3Capped) Put(txn mvcc.TxnWrite, p *pb.PutRequest) (*pb.PutResponse, error) {
|
||||
return nil, ErrNoSpace
|
||||
}
|
||||
|
||||
func (a *applierV3Capped) Txn(r *pb.TxnRequest) (*pb.TxnResponse, error) {
|
||||
if a.q.Cost(r) > 0 {
|
||||
return nil, ErrNoSpace
|
||||
}
|
||||
return a.applierV3.Txn(r)
|
||||
}
|
||||
|
||||
func (a *applierV3Capped) LeaseGrant(lc *pb.LeaseGrantRequest) (*pb.LeaseGrantResponse, error) {
|
||||
return nil, ErrNoSpace
|
||||
}
|
||||
|
||||
func (a *applierV3backend) AuthEnable() (*pb.AuthEnableResponse, error) {
|
||||
err := a.s.AuthStore().AuthEnable()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return &pb.AuthEnableResponse{Header: newHeader(a.s)}, nil
|
||||
}
|
||||
|
||||
func (a *applierV3backend) AuthDisable() (*pb.AuthDisableResponse, error) {
|
||||
a.s.AuthStore().AuthDisable()
|
||||
return &pb.AuthDisableResponse{Header: newHeader(a.s)}, nil
|
||||
}
|
||||
|
||||
func (a *applierV3backend) Authenticate(r *pb.InternalAuthenticateRequest) (*pb.AuthenticateResponse, error) {
|
||||
ctx := context.WithValue(context.WithValue(a.s.ctx, "index", a.s.consistIndex.ConsistentIndex()), "simpleToken", r.SimpleToken)
|
||||
resp, err := a.s.AuthStore().Authenticate(ctx, r.Name, r.Password)
|
||||
if resp != nil {
|
||||
resp.Header = newHeader(a.s)
|
||||
}
|
||||
return resp, err
|
||||
}
|
||||
|
||||
func (a *applierV3backend) UserAdd(r *pb.AuthUserAddRequest) (*pb.AuthUserAddResponse, error) {
|
||||
resp, err := a.s.AuthStore().UserAdd(r)
|
||||
if resp != nil {
|
||||
resp.Header = newHeader(a.s)
|
||||
}
|
||||
return resp, err
|
||||
}
|
||||
|
||||
func (a *applierV3backend) UserDelete(r *pb.AuthUserDeleteRequest) (*pb.AuthUserDeleteResponse, error) {
|
||||
resp, err := a.s.AuthStore().UserDelete(r)
|
||||
if resp != nil {
|
||||
resp.Header = newHeader(a.s)
|
||||
}
|
||||
return resp, err
|
||||
}
|
||||
|
||||
func (a *applierV3backend) UserChangePassword(r *pb.AuthUserChangePasswordRequest) (*pb.AuthUserChangePasswordResponse, error) {
|
||||
resp, err := a.s.AuthStore().UserChangePassword(r)
|
||||
if resp != nil {
|
||||
resp.Header = newHeader(a.s)
|
||||
}
|
||||
return resp, err
|
||||
}
|
||||
|
||||
func (a *applierV3backend) UserGrantRole(r *pb.AuthUserGrantRoleRequest) (*pb.AuthUserGrantRoleResponse, error) {
|
||||
resp, err := a.s.AuthStore().UserGrantRole(r)
|
||||
if resp != nil {
|
||||
resp.Header = newHeader(a.s)
|
||||
}
|
||||
return resp, err
|
||||
}
|
||||
|
||||
func (a *applierV3backend) UserGet(r *pb.AuthUserGetRequest) (*pb.AuthUserGetResponse, error) {
|
||||
resp, err := a.s.AuthStore().UserGet(r)
|
||||
if resp != nil {
|
||||
resp.Header = newHeader(a.s)
|
||||
}
|
||||
return resp, err
|
||||
}
|
||||
|
||||
func (a *applierV3backend) UserRevokeRole(r *pb.AuthUserRevokeRoleRequest) (*pb.AuthUserRevokeRoleResponse, error) {
|
||||
resp, err := a.s.AuthStore().UserRevokeRole(r)
|
||||
if resp != nil {
|
||||
resp.Header = newHeader(a.s)
|
||||
}
|
||||
return resp, err
|
||||
}
|
||||
|
||||
func (a *applierV3backend) RoleAdd(r *pb.AuthRoleAddRequest) (*pb.AuthRoleAddResponse, error) {
|
||||
resp, err := a.s.AuthStore().RoleAdd(r)
|
||||
if resp != nil {
|
||||
resp.Header = newHeader(a.s)
|
||||
}
|
||||
return resp, err
|
||||
}
|
||||
|
||||
func (a *applierV3backend) RoleGrantPermission(r *pb.AuthRoleGrantPermissionRequest) (*pb.AuthRoleGrantPermissionResponse, error) {
|
||||
resp, err := a.s.AuthStore().RoleGrantPermission(r)
|
||||
if resp != nil {
|
||||
resp.Header = newHeader(a.s)
|
||||
}
|
||||
return resp, err
|
||||
}
|
||||
|
||||
func (a *applierV3backend) RoleGet(r *pb.AuthRoleGetRequest) (*pb.AuthRoleGetResponse, error) {
|
||||
resp, err := a.s.AuthStore().RoleGet(r)
|
||||
if resp != nil {
|
||||
resp.Header = newHeader(a.s)
|
||||
}
|
||||
return resp, err
|
||||
}
|
||||
|
||||
func (a *applierV3backend) RoleRevokePermission(r *pb.AuthRoleRevokePermissionRequest) (*pb.AuthRoleRevokePermissionResponse, error) {
|
||||
resp, err := a.s.AuthStore().RoleRevokePermission(r)
|
||||
if resp != nil {
|
||||
resp.Header = newHeader(a.s)
|
||||
}
|
||||
return resp, err
|
||||
}
|
||||
|
||||
func (a *applierV3backend) RoleDelete(r *pb.AuthRoleDeleteRequest) (*pb.AuthRoleDeleteResponse, error) {
|
||||
resp, err := a.s.AuthStore().RoleDelete(r)
|
||||
if resp != nil {
|
||||
resp.Header = newHeader(a.s)
|
||||
}
|
||||
return resp, err
|
||||
}
|
||||
|
||||
func (a *applierV3backend) UserList(r *pb.AuthUserListRequest) (*pb.AuthUserListResponse, error) {
|
||||
resp, err := a.s.AuthStore().UserList(r)
|
||||
if resp != nil {
|
||||
resp.Header = newHeader(a.s)
|
||||
}
|
||||
return resp, err
|
||||
}
|
||||
|
||||
func (a *applierV3backend) RoleList(r *pb.AuthRoleListRequest) (*pb.AuthRoleListResponse, error) {
|
||||
resp, err := a.s.AuthStore().RoleList(r)
|
||||
if resp != nil {
|
||||
resp.Header = newHeader(a.s)
|
||||
}
|
||||
return resp, err
|
||||
}
|
||||
|
||||
type quotaApplierV3 struct {
|
||||
applierV3
|
||||
q Quota
|
||||
}
|
||||
|
||||
func newQuotaApplierV3(s *EtcdServer, app applierV3) applierV3 {
|
||||
return "aApplierV3{app, NewBackendQuota(s)}
|
||||
}
|
||||
|
||||
func (a *quotaApplierV3) Put(txn mvcc.TxnWrite, p *pb.PutRequest) (*pb.PutResponse, error) {
|
||||
ok := a.q.Available(p)
|
||||
resp, err := a.applierV3.Put(txn, p)
|
||||
if err == nil && !ok {
|
||||
err = ErrNoSpace
|
||||
}
|
||||
return resp, err
|
||||
}
|
||||
|
||||
func (a *quotaApplierV3) Txn(rt *pb.TxnRequest) (*pb.TxnResponse, error) {
|
||||
ok := a.q.Available(rt)
|
||||
resp, err := a.applierV3.Txn(rt)
|
||||
if err == nil && !ok {
|
||||
err = ErrNoSpace
|
||||
}
|
||||
return resp, err
|
||||
}
|
||||
|
||||
func (a *quotaApplierV3) LeaseGrant(lc *pb.LeaseGrantRequest) (*pb.LeaseGrantResponse, error) {
|
||||
ok := a.q.Available(lc)
|
||||
resp, err := a.applierV3.LeaseGrant(lc)
|
||||
if err == nil && !ok {
|
||||
err = ErrNoSpace
|
||||
}
|
||||
return resp, err
|
||||
}
|
||||
|
||||
type kvSort struct{ kvs []mvccpb.KeyValue }
|
||||
|
||||
func (s *kvSort) Swap(i, j int) {
|
||||
t := s.kvs[i]
|
||||
s.kvs[i] = s.kvs[j]
|
||||
s.kvs[j] = t
|
||||
}
|
||||
func (s *kvSort) Len() int { return len(s.kvs) }
|
||||
|
||||
type kvSortByKey struct{ *kvSort }
|
||||
|
||||
func (s *kvSortByKey) Less(i, j int) bool {
|
||||
return bytes.Compare(s.kvs[i].Key, s.kvs[j].Key) < 0
|
||||
}
|
||||
|
||||
type kvSortByVersion struct{ *kvSort }
|
||||
|
||||
func (s *kvSortByVersion) Less(i, j int) bool {
|
||||
return (s.kvs[i].Version - s.kvs[j].Version) < 0
|
||||
}
|
||||
|
||||
type kvSortByCreate struct{ *kvSort }
|
||||
|
||||
func (s *kvSortByCreate) Less(i, j int) bool {
|
||||
return (s.kvs[i].CreateRevision - s.kvs[j].CreateRevision) < 0
|
||||
}
|
||||
|
||||
type kvSortByMod struct{ *kvSort }
|
||||
|
||||
func (s *kvSortByMod) Less(i, j int) bool {
|
||||
return (s.kvs[i].ModRevision - s.kvs[j].ModRevision) < 0
|
||||
}
|
||||
|
||||
type kvSortByValue struct{ *kvSort }
|
||||
|
||||
func (s *kvSortByValue) Less(i, j int) bool {
|
||||
return bytes.Compare(s.kvs[i].Value, s.kvs[j].Value) < 0
|
||||
}
|
||||
|
||||
func (a *applierV3backend) checkRequestPut(rv mvcc.ReadView, reqs []*pb.RequestOp) error {
|
||||
for _, requ := range reqs {
|
||||
tv, ok := requ.Request.(*pb.RequestOp_RequestPut)
|
||||
if !ok {
|
||||
continue
|
||||
}
|
||||
preq := tv.RequestPut
|
||||
if preq == nil {
|
||||
continue
|
||||
}
|
||||
if preq.IgnoreValue || preq.IgnoreLease {
|
||||
// expects previous key-value, error if not exist
|
||||
rr, err := rv.Range(preq.Key, nil, mvcc.RangeOptions{})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if rr == nil || len(rr.KVs) == 0 {
|
||||
return ErrKeyNotFound
|
||||
}
|
||||
}
|
||||
if lease.LeaseID(preq.Lease) == lease.NoLease {
|
||||
continue
|
||||
}
|
||||
if l := a.s.lessor.Lookup(lease.LeaseID(preq.Lease)); l == nil {
|
||||
return lease.ErrLeaseNotFound
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func checkRequestRange(rv mvcc.ReadView, reqs []*pb.RequestOp) error {
|
||||
for _, requ := range reqs {
|
||||
tv, ok := requ.Request.(*pb.RequestOp_RequestRange)
|
||||
if !ok {
|
||||
continue
|
||||
}
|
||||
greq := tv.RequestRange
|
||||
if greq == nil || greq.Revision == 0 {
|
||||
continue
|
||||
}
|
||||
|
||||
if greq.Revision > rv.Rev() {
|
||||
return mvcc.ErrFutureRev
|
||||
}
|
||||
if greq.Revision < rv.FirstRev() {
|
||||
return mvcc.ErrCompacted
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func compareInt64(a, b int64) int {
|
||||
switch {
|
||||
case a < b:
|
||||
return -1
|
||||
case a > b:
|
||||
return 1
|
||||
default:
|
||||
return 0
|
||||
}
|
||||
}
|
||||
|
||||
// isGteRange determines if the range end is a >= range. This works around grpc
|
||||
// sending empty byte strings as nil; >= is encoded in the range end as '\0'.
|
||||
func isGteRange(rangeEnd []byte) bool {
|
||||
return len(rangeEnd) == 1 && rangeEnd[0] == 0
|
||||
}
|
||||
|
||||
func noSideEffect(r *pb.InternalRaftRequest) bool {
|
||||
return r.Range != nil || r.AuthUserGet != nil || r.AuthRoleGet != nil
|
||||
}
|
||||
|
||||
func removeNeedlessRangeReqs(txn *pb.TxnRequest) {
|
||||
f := func(ops []*pb.RequestOp) []*pb.RequestOp {
|
||||
j := 0
|
||||
for i := 0; i < len(ops); i++ {
|
||||
if _, ok := ops[i].Request.(*pb.RequestOp_RequestRange); ok {
|
||||
continue
|
||||
}
|
||||
ops[j] = ops[i]
|
||||
j++
|
||||
}
|
||||
|
||||
return ops[:j]
|
||||
}
|
||||
|
||||
txn.Success = f(txn.Success)
|
||||
txn.Failure = f(txn.Failure)
|
||||
}
|
||||
|
||||
func pruneKVs(rr *mvcc.RangeResult, isPrunable func(*mvccpb.KeyValue) bool) {
|
||||
j := 0
|
||||
for i := range rr.KVs {
|
||||
rr.KVs[j] = rr.KVs[i]
|
||||
if !isPrunable(&rr.KVs[i]) {
|
||||
j++
|
||||
}
|
||||
}
|
||||
rr.KVs = rr.KVs[:j]
|
||||
}
|
||||
|
||||
func newHeader(s *EtcdServer) *pb.ResponseHeader {
|
||||
return &pb.ResponseHeader{
|
||||
ClusterId: uint64(s.Cluster().ID()),
|
||||
MemberId: uint64(s.ID()),
|
||||
Revision: s.KV().Rev(),
|
||||
RaftTerm: s.Term(),
|
||||
}
|
||||
}
|
||||
196
vendor/github.com/coreos/etcd/etcdserver/apply_auth.go
generated
vendored
196
vendor/github.com/coreos/etcd/etcdserver/apply_auth.go
generated
vendored
@@ -1,196 +0,0 @@
|
||||
// Copyright 2016 The etcd Authors
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package etcdserver
|
||||
|
||||
import (
|
||||
"sync"
|
||||
|
||||
"github.com/coreos/etcd/auth"
|
||||
pb "github.com/coreos/etcd/etcdserver/etcdserverpb"
|
||||
"github.com/coreos/etcd/mvcc"
|
||||
)
|
||||
|
||||
type authApplierV3 struct {
|
||||
applierV3
|
||||
as auth.AuthStore
|
||||
|
||||
// mu serializes Apply so that user isn't corrupted and so that
|
||||
// serialized requests don't leak data from TOCTOU errors
|
||||
mu sync.Mutex
|
||||
|
||||
authInfo auth.AuthInfo
|
||||
}
|
||||
|
||||
func newAuthApplierV3(as auth.AuthStore, base applierV3) *authApplierV3 {
|
||||
return &authApplierV3{applierV3: base, as: as}
|
||||
}
|
||||
|
||||
func (aa *authApplierV3) Apply(r *pb.InternalRaftRequest) *applyResult {
|
||||
aa.mu.Lock()
|
||||
defer aa.mu.Unlock()
|
||||
if r.Header != nil {
|
||||
// backward-compatible with pre-3.0 releases when internalRaftRequest
|
||||
// does not have header field
|
||||
aa.authInfo.Username = r.Header.Username
|
||||
aa.authInfo.Revision = r.Header.AuthRevision
|
||||
}
|
||||
if needAdminPermission(r) {
|
||||
if err := aa.as.IsAdminPermitted(&aa.authInfo); err != nil {
|
||||
aa.authInfo.Username = ""
|
||||
aa.authInfo.Revision = 0
|
||||
return &applyResult{err: err}
|
||||
}
|
||||
}
|
||||
ret := aa.applierV3.Apply(r)
|
||||
aa.authInfo.Username = ""
|
||||
aa.authInfo.Revision = 0
|
||||
return ret
|
||||
}
|
||||
|
||||
func (aa *authApplierV3) Put(txn mvcc.TxnWrite, r *pb.PutRequest) (*pb.PutResponse, error) {
|
||||
if err := aa.as.IsPutPermitted(&aa.authInfo, r.Key); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if r.PrevKv {
|
||||
err := aa.as.IsRangePermitted(&aa.authInfo, r.Key, nil)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
return aa.applierV3.Put(txn, r)
|
||||
}
|
||||
|
||||
func (aa *authApplierV3) Range(txn mvcc.TxnRead, r *pb.RangeRequest) (*pb.RangeResponse, error) {
|
||||
if err := aa.as.IsRangePermitted(&aa.authInfo, r.Key, r.RangeEnd); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return aa.applierV3.Range(txn, r)
|
||||
}
|
||||
|
||||
func (aa *authApplierV3) DeleteRange(txn mvcc.TxnWrite, r *pb.DeleteRangeRequest) (*pb.DeleteRangeResponse, error) {
|
||||
if err := aa.as.IsDeleteRangePermitted(&aa.authInfo, r.Key, r.RangeEnd); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if r.PrevKv {
|
||||
err := aa.as.IsRangePermitted(&aa.authInfo, r.Key, r.RangeEnd)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
|
||||
return aa.applierV3.DeleteRange(txn, r)
|
||||
}
|
||||
|
||||
func checkTxnReqsPermission(as auth.AuthStore, ai *auth.AuthInfo, reqs []*pb.RequestOp) error {
|
||||
for _, requ := range reqs {
|
||||
switch tv := requ.Request.(type) {
|
||||
case *pb.RequestOp_RequestRange:
|
||||
if tv.RequestRange == nil {
|
||||
continue
|
||||
}
|
||||
|
||||
if err := as.IsRangePermitted(ai, tv.RequestRange.Key, tv.RequestRange.RangeEnd); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
case *pb.RequestOp_RequestPut:
|
||||
if tv.RequestPut == nil {
|
||||
continue
|
||||
}
|
||||
|
||||
if err := as.IsPutPermitted(ai, tv.RequestPut.Key); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
case *pb.RequestOp_RequestDeleteRange:
|
||||
if tv.RequestDeleteRange == nil {
|
||||
continue
|
||||
}
|
||||
|
||||
if tv.RequestDeleteRange.PrevKv {
|
||||
err := as.IsRangePermitted(ai, tv.RequestDeleteRange.Key, tv.RequestDeleteRange.RangeEnd)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
err := as.IsDeleteRangePermitted(ai, tv.RequestDeleteRange.Key, tv.RequestDeleteRange.RangeEnd)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func checkTxnAuth(as auth.AuthStore, ai *auth.AuthInfo, rt *pb.TxnRequest) error {
|
||||
for _, c := range rt.Compare {
|
||||
if err := as.IsRangePermitted(ai, c.Key, nil); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
if err := checkTxnReqsPermission(as, ai, rt.Success); err != nil {
|
||||
return err
|
||||
}
|
||||
if err := checkTxnReqsPermission(as, ai, rt.Failure); err != nil {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (aa *authApplierV3) Txn(rt *pb.TxnRequest) (*pb.TxnResponse, error) {
|
||||
if err := checkTxnAuth(aa.as, &aa.authInfo, rt); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return aa.applierV3.Txn(rt)
|
||||
}
|
||||
|
||||
func needAdminPermission(r *pb.InternalRaftRequest) bool {
|
||||
switch {
|
||||
case r.AuthEnable != nil:
|
||||
return true
|
||||
case r.AuthDisable != nil:
|
||||
return true
|
||||
case r.AuthUserAdd != nil:
|
||||
return true
|
||||
case r.AuthUserDelete != nil:
|
||||
return true
|
||||
case r.AuthUserChangePassword != nil:
|
||||
return true
|
||||
case r.AuthUserGrantRole != nil:
|
||||
return true
|
||||
case r.AuthUserGet != nil:
|
||||
return true
|
||||
case r.AuthUserRevokeRole != nil:
|
||||
return true
|
||||
case r.AuthRoleAdd != nil:
|
||||
return true
|
||||
case r.AuthRoleGrantPermission != nil:
|
||||
return true
|
||||
case r.AuthRoleGet != nil:
|
||||
return true
|
||||
case r.AuthRoleRevokePermission != nil:
|
||||
return true
|
||||
case r.AuthRoleDelete != nil:
|
||||
return true
|
||||
case r.AuthUserList != nil:
|
||||
return true
|
||||
case r.AuthRoleList != nil:
|
||||
return true
|
||||
default:
|
||||
return false
|
||||
}
|
||||
}
|
||||
140
vendor/github.com/coreos/etcd/etcdserver/apply_v2.go
generated
vendored
140
vendor/github.com/coreos/etcd/etcdserver/apply_v2.go
generated
vendored
@@ -1,140 +0,0 @@
|
||||
// Copyright 2016 The etcd Authors
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package etcdserver
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"path"
|
||||
"time"
|
||||
|
||||
"github.com/coreos/etcd/etcdserver/api"
|
||||
pb "github.com/coreos/etcd/etcdserver/etcdserverpb"
|
||||
"github.com/coreos/etcd/etcdserver/membership"
|
||||
"github.com/coreos/etcd/pkg/pbutil"
|
||||
"github.com/coreos/etcd/store"
|
||||
"github.com/coreos/go-semver/semver"
|
||||
)
|
||||
|
||||
// ApplierV2 is the interface for processing V2 raft messages
|
||||
type ApplierV2 interface {
|
||||
Delete(r *pb.Request) Response
|
||||
Post(r *pb.Request) Response
|
||||
Put(r *pb.Request) Response
|
||||
QGet(r *pb.Request) Response
|
||||
Sync(r *pb.Request) Response
|
||||
}
|
||||
|
||||
func NewApplierV2(s store.Store, c *membership.RaftCluster) ApplierV2 {
|
||||
return &applierV2store{store: s, cluster: c}
|
||||
}
|
||||
|
||||
type applierV2store struct {
|
||||
store store.Store
|
||||
cluster *membership.RaftCluster
|
||||
}
|
||||
|
||||
func (a *applierV2store) Delete(r *pb.Request) Response {
|
||||
switch {
|
||||
case r.PrevIndex > 0 || r.PrevValue != "":
|
||||
return toResponse(a.store.CompareAndDelete(r.Path, r.PrevValue, r.PrevIndex))
|
||||
default:
|
||||
return toResponse(a.store.Delete(r.Path, r.Dir, r.Recursive))
|
||||
}
|
||||
}
|
||||
|
||||
func (a *applierV2store) Post(r *pb.Request) Response {
|
||||
return toResponse(a.store.Create(r.Path, r.Dir, r.Val, true, toTTLOptions(r)))
|
||||
}
|
||||
|
||||
func (a *applierV2store) Put(r *pb.Request) Response {
|
||||
ttlOptions := toTTLOptions(r)
|
||||
exists, existsSet := pbutil.GetBool(r.PrevExist)
|
||||
switch {
|
||||
case existsSet:
|
||||
if exists {
|
||||
if r.PrevIndex == 0 && r.PrevValue == "" {
|
||||
return toResponse(a.store.Update(r.Path, r.Val, ttlOptions))
|
||||
}
|
||||
return toResponse(a.store.CompareAndSwap(r.Path, r.PrevValue, r.PrevIndex, r.Val, ttlOptions))
|
||||
}
|
||||
return toResponse(a.store.Create(r.Path, r.Dir, r.Val, false, ttlOptions))
|
||||
case r.PrevIndex > 0 || r.PrevValue != "":
|
||||
return toResponse(a.store.CompareAndSwap(r.Path, r.PrevValue, r.PrevIndex, r.Val, ttlOptions))
|
||||
default:
|
||||
if storeMemberAttributeRegexp.MatchString(r.Path) {
|
||||
id := membership.MustParseMemberIDFromKey(path.Dir(r.Path))
|
||||
var attr membership.Attributes
|
||||
if err := json.Unmarshal([]byte(r.Val), &attr); err != nil {
|
||||
plog.Panicf("unmarshal %s should never fail: %v", r.Val, err)
|
||||
}
|
||||
if a.cluster != nil {
|
||||
a.cluster.UpdateAttributes(id, attr)
|
||||
}
|
||||
// return an empty response since there is no consumer.
|
||||
return Response{}
|
||||
}
|
||||
if r.Path == membership.StoreClusterVersionKey() {
|
||||
if a.cluster != nil {
|
||||
a.cluster.SetVersion(semver.Must(semver.NewVersion(r.Val)), api.UpdateCapability)
|
||||
}
|
||||
// return an empty response since there is no consumer.
|
||||
return Response{}
|
||||
}
|
||||
return toResponse(a.store.Set(r.Path, r.Dir, r.Val, ttlOptions))
|
||||
}
|
||||
}
|
||||
|
||||
func (a *applierV2store) QGet(r *pb.Request) Response {
|
||||
return toResponse(a.store.Get(r.Path, r.Recursive, r.Sorted))
|
||||
}
|
||||
|
||||
func (a *applierV2store) Sync(r *pb.Request) Response {
|
||||
a.store.DeleteExpiredKeys(time.Unix(0, r.Time))
|
||||
return Response{}
|
||||
}
|
||||
|
||||
// applyV2Request interprets r as a call to store.X and returns a Response interpreted
|
||||
// from store.Event
|
||||
func (s *EtcdServer) applyV2Request(r *pb.Request) Response {
|
||||
toTTLOptions(r)
|
||||
switch r.Method {
|
||||
case "POST":
|
||||
return s.applyV2.Post(r)
|
||||
case "PUT":
|
||||
return s.applyV2.Put(r)
|
||||
case "DELETE":
|
||||
return s.applyV2.Delete(r)
|
||||
case "QGET":
|
||||
return s.applyV2.QGet(r)
|
||||
case "SYNC":
|
||||
return s.applyV2.Sync(r)
|
||||
default:
|
||||
// This should never be reached, but just in case:
|
||||
return Response{err: ErrUnknownMethod}
|
||||
}
|
||||
}
|
||||
|
||||
func toTTLOptions(r *pb.Request) store.TTLOptionSet {
|
||||
refresh, _ := pbutil.GetBool(r.Refresh)
|
||||
ttlOptions := store.TTLOptionSet{Refresh: refresh}
|
||||
if r.Expiration != 0 {
|
||||
ttlOptions.ExpireTime = time.Unix(0, r.Expiration)
|
||||
}
|
||||
return ttlOptions
|
||||
}
|
||||
|
||||
func toResponse(ev *store.Event, err error) Response {
|
||||
return Response{Event: ev, err: err}
|
||||
}
|
||||
81
vendor/github.com/coreos/etcd/etcdserver/backend.go
generated
vendored
81
vendor/github.com/coreos/etcd/etcdserver/backend.go
generated
vendored
@@ -1,81 +0,0 @@
|
||||
// Copyright 2017 The etcd Authors
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package etcdserver
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"os"
|
||||
"time"
|
||||
|
||||
"github.com/coreos/etcd/lease"
|
||||
"github.com/coreos/etcd/mvcc"
|
||||
"github.com/coreos/etcd/mvcc/backend"
|
||||
"github.com/coreos/etcd/raft/raftpb"
|
||||
"github.com/coreos/etcd/snap"
|
||||
)
|
||||
|
||||
func newBackend(cfg *ServerConfig) backend.Backend {
|
||||
bcfg := backend.DefaultBackendConfig()
|
||||
bcfg.Path = cfg.backendPath()
|
||||
if cfg.QuotaBackendBytes > 0 && cfg.QuotaBackendBytes != DefaultQuotaBytes {
|
||||
// permit 10% excess over quota for disarm
|
||||
bcfg.MmapSize = uint64(cfg.QuotaBackendBytes + cfg.QuotaBackendBytes/10)
|
||||
}
|
||||
return backend.New(bcfg)
|
||||
}
|
||||
|
||||
// openSnapshotBackend renames a snapshot db to the current etcd db and opens it.
|
||||
func openSnapshotBackend(cfg *ServerConfig, ss *snap.Snapshotter, snapshot raftpb.Snapshot) (backend.Backend, error) {
|
||||
snapPath, err := ss.DBFilePath(snapshot.Metadata.Index)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("database snapshot file path error: %v", err)
|
||||
}
|
||||
if err := os.Rename(snapPath, cfg.backendPath()); err != nil {
|
||||
return nil, fmt.Errorf("rename snapshot file error: %v", err)
|
||||
}
|
||||
return openBackend(cfg), nil
|
||||
}
|
||||
|
||||
// openBackend returns a backend using the current etcd db.
|
||||
func openBackend(cfg *ServerConfig) backend.Backend {
|
||||
fn := cfg.backendPath()
|
||||
beOpened := make(chan backend.Backend)
|
||||
go func() {
|
||||
beOpened <- newBackend(cfg)
|
||||
}()
|
||||
select {
|
||||
case be := <-beOpened:
|
||||
return be
|
||||
case <-time.After(time.Second):
|
||||
plog.Warningf("another etcd process is using %q and holds the file lock.", fn)
|
||||
plog.Warningf("waiting for it to exit before starting...")
|
||||
}
|
||||
return <-beOpened
|
||||
}
|
||||
|
||||
// recoverBackendSnapshot recovers the DB from a snapshot in case etcd crashes
|
||||
// before updating the backend db after persisting raft snapshot to disk,
|
||||
// violating the invariant snapshot.Metadata.Index < db.consistentIndex. In this
|
||||
// case, replace the db with the snapshot db sent by the leader.
|
||||
func recoverSnapshotBackend(cfg *ServerConfig, oldbe backend.Backend, snapshot raftpb.Snapshot) (backend.Backend, error) {
|
||||
var cIndex consistentIndex
|
||||
kv := mvcc.New(oldbe, &lease.FakeLessor{}, &cIndex)
|
||||
defer kv.Close()
|
||||
if snapshot.Metadata.Index <= kv.ConsistentIndex() {
|
||||
return oldbe, nil
|
||||
}
|
||||
oldbe.Close()
|
||||
return openSnapshotBackend(cfg, snap.New(cfg.SnapDir()), snapshot)
|
||||
}
|
||||
258
vendor/github.com/coreos/etcd/etcdserver/cluster_util.go
generated
vendored
258
vendor/github.com/coreos/etcd/etcdserver/cluster_util.go
generated
vendored
@@ -1,258 +0,0 @@
|
||||
// Copyright 2015 The etcd Authors
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package etcdserver
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
"net/http"
|
||||
"sort"
|
||||
"time"
|
||||
|
||||
"github.com/coreos/etcd/etcdserver/membership"
|
||||
"github.com/coreos/etcd/pkg/types"
|
||||
"github.com/coreos/etcd/version"
|
||||
"github.com/coreos/go-semver/semver"
|
||||
)
|
||||
|
||||
// isMemberBootstrapped tries to check if the given member has been bootstrapped
|
||||
// in the given cluster.
|
||||
func isMemberBootstrapped(cl *membership.RaftCluster, member string, rt http.RoundTripper, timeout time.Duration) bool {
|
||||
rcl, err := getClusterFromRemotePeers(getRemotePeerURLs(cl, member), timeout, false, rt)
|
||||
if err != nil {
|
||||
return false
|
||||
}
|
||||
id := cl.MemberByName(member).ID
|
||||
m := rcl.Member(id)
|
||||
if m == nil {
|
||||
return false
|
||||
}
|
||||
if len(m.ClientURLs) > 0 {
|
||||
return true
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
// GetClusterFromRemotePeers takes a set of URLs representing etcd peers, and
|
||||
// attempts to construct a Cluster by accessing the members endpoint on one of
|
||||
// these URLs. The first URL to provide a response is used. If no URLs provide
|
||||
// a response, or a Cluster cannot be successfully created from a received
|
||||
// response, an error is returned.
|
||||
// Each request has a 10-second timeout. Because the upper limit of TTL is 5s,
|
||||
// 10 second is enough for building connection and finishing request.
|
||||
func GetClusterFromRemotePeers(urls []string, rt http.RoundTripper) (*membership.RaftCluster, error) {
|
||||
return getClusterFromRemotePeers(urls, 10*time.Second, true, rt)
|
||||
}
|
||||
|
||||
// If logerr is true, it prints out more error messages.
|
||||
func getClusterFromRemotePeers(urls []string, timeout time.Duration, logerr bool, rt http.RoundTripper) (*membership.RaftCluster, error) {
|
||||
cc := &http.Client{
|
||||
Transport: rt,
|
||||
Timeout: timeout,
|
||||
}
|
||||
for _, u := range urls {
|
||||
resp, err := cc.Get(u + "/members")
|
||||
if err != nil {
|
||||
if logerr {
|
||||
plog.Warningf("could not get cluster response from %s: %v", u, err)
|
||||
}
|
||||
continue
|
||||
}
|
||||
b, err := ioutil.ReadAll(resp.Body)
|
||||
resp.Body.Close()
|
||||
if err != nil {
|
||||
if logerr {
|
||||
plog.Warningf("could not read the body of cluster response: %v", err)
|
||||
}
|
||||
continue
|
||||
}
|
||||
var membs []*membership.Member
|
||||
if err = json.Unmarshal(b, &membs); err != nil {
|
||||
if logerr {
|
||||
plog.Warningf("could not unmarshal cluster response: %v", err)
|
||||
}
|
||||
continue
|
||||
}
|
||||
id, err := types.IDFromString(resp.Header.Get("X-Etcd-Cluster-ID"))
|
||||
if err != nil {
|
||||
if logerr {
|
||||
plog.Warningf("could not parse the cluster ID from cluster res: %v", err)
|
||||
}
|
||||
continue
|
||||
}
|
||||
|
||||
// check the length of membership members
|
||||
// if the membership members are present then prepare and return raft cluster
|
||||
// if membership members are not present then the raft cluster formed will be
|
||||
// an invalid empty cluster hence return failed to get raft cluster member(s) from the given urls error
|
||||
if len(membs) > 0 {
|
||||
return membership.NewClusterFromMembers("", id, membs), nil
|
||||
}
|
||||
|
||||
return nil, fmt.Errorf("failed to get raft cluster member(s) from the given urls.")
|
||||
}
|
||||
return nil, fmt.Errorf("could not retrieve cluster information from the given urls")
|
||||
}
|
||||
|
||||
// getRemotePeerURLs returns peer urls of remote members in the cluster. The
|
||||
// returned list is sorted in ascending lexicographical order.
|
||||
func getRemotePeerURLs(cl *membership.RaftCluster, local string) []string {
|
||||
us := make([]string, 0)
|
||||
for _, m := range cl.Members() {
|
||||
if m.Name == local {
|
||||
continue
|
||||
}
|
||||
us = append(us, m.PeerURLs...)
|
||||
}
|
||||
sort.Strings(us)
|
||||
return us
|
||||
}
|
||||
|
||||
// getVersions returns the versions of the members in the given cluster.
|
||||
// The key of the returned map is the member's ID. The value of the returned map
|
||||
// is the semver versions string, including server and cluster.
|
||||
// If it fails to get the version of a member, the key will be nil.
|
||||
func getVersions(cl *membership.RaftCluster, local types.ID, rt http.RoundTripper) map[string]*version.Versions {
|
||||
members := cl.Members()
|
||||
vers := make(map[string]*version.Versions)
|
||||
for _, m := range members {
|
||||
if m.ID == local {
|
||||
cv := "not_decided"
|
||||
if cl.Version() != nil {
|
||||
cv = cl.Version().String()
|
||||
}
|
||||
vers[m.ID.String()] = &version.Versions{Server: version.Version, Cluster: cv}
|
||||
continue
|
||||
}
|
||||
ver, err := getVersion(m, rt)
|
||||
if err != nil {
|
||||
plog.Warningf("cannot get the version of member %s (%v)", m.ID, err)
|
||||
vers[m.ID.String()] = nil
|
||||
} else {
|
||||
vers[m.ID.String()] = ver
|
||||
}
|
||||
}
|
||||
return vers
|
||||
}
|
||||
|
||||
// decideClusterVersion decides the cluster version based on the versions map.
|
||||
// The returned version is the min server version in the map, or nil if the min
|
||||
// version in unknown.
|
||||
func decideClusterVersion(vers map[string]*version.Versions) *semver.Version {
|
||||
var cv *semver.Version
|
||||
lv := semver.Must(semver.NewVersion(version.Version))
|
||||
|
||||
for mid, ver := range vers {
|
||||
if ver == nil {
|
||||
return nil
|
||||
}
|
||||
v, err := semver.NewVersion(ver.Server)
|
||||
if err != nil {
|
||||
plog.Errorf("cannot understand the version of member %s (%v)", mid, err)
|
||||
return nil
|
||||
}
|
||||
if lv.LessThan(*v) {
|
||||
plog.Warningf("the local etcd version %s is not up-to-date", lv.String())
|
||||
plog.Warningf("member %s has a higher version %s", mid, ver.Server)
|
||||
}
|
||||
if cv == nil {
|
||||
cv = v
|
||||
} else if v.LessThan(*cv) {
|
||||
cv = v
|
||||
}
|
||||
}
|
||||
return cv
|
||||
}
|
||||
|
||||
// isCompatibleWithCluster return true if the local member has a compatible version with
|
||||
// the current running cluster.
|
||||
// The version is considered as compatible when at least one of the other members in the cluster has a
|
||||
// cluster version in the range of [MinClusterVersion, Version] and no known members has a cluster version
|
||||
// out of the range.
|
||||
// We set this rule since when the local member joins, another member might be offline.
|
||||
func isCompatibleWithCluster(cl *membership.RaftCluster, local types.ID, rt http.RoundTripper) bool {
|
||||
vers := getVersions(cl, local, rt)
|
||||
minV := semver.Must(semver.NewVersion(version.MinClusterVersion))
|
||||
maxV := semver.Must(semver.NewVersion(version.Version))
|
||||
maxV = &semver.Version{
|
||||
Major: maxV.Major,
|
||||
Minor: maxV.Minor,
|
||||
}
|
||||
|
||||
return isCompatibleWithVers(vers, local, minV, maxV)
|
||||
}
|
||||
|
||||
func isCompatibleWithVers(vers map[string]*version.Versions, local types.ID, minV, maxV *semver.Version) bool {
|
||||
var ok bool
|
||||
for id, v := range vers {
|
||||
// ignore comparison with local version
|
||||
if id == local.String() {
|
||||
continue
|
||||
}
|
||||
if v == nil {
|
||||
continue
|
||||
}
|
||||
clusterv, err := semver.NewVersion(v.Cluster)
|
||||
if err != nil {
|
||||
plog.Errorf("cannot understand the cluster version of member %s (%v)", id, err)
|
||||
continue
|
||||
}
|
||||
if clusterv.LessThan(*minV) {
|
||||
plog.Warningf("the running cluster version(%v) is lower than the minimal cluster version(%v) supported", clusterv.String(), minV.String())
|
||||
return false
|
||||
}
|
||||
if maxV.LessThan(*clusterv) {
|
||||
plog.Warningf("the running cluster version(%v) is higher than the maximum cluster version(%v) supported", clusterv.String(), maxV.String())
|
||||
return false
|
||||
}
|
||||
ok = true
|
||||
}
|
||||
return ok
|
||||
}
|
||||
|
||||
// getVersion returns the Versions of the given member via its
|
||||
// peerURLs. Returns the last error if it fails to get the version.
|
||||
func getVersion(m *membership.Member, rt http.RoundTripper) (*version.Versions, error) {
|
||||
cc := &http.Client{
|
||||
Transport: rt,
|
||||
}
|
||||
var (
|
||||
err error
|
||||
resp *http.Response
|
||||
)
|
||||
|
||||
for _, u := range m.PeerURLs {
|
||||
resp, err = cc.Get(u + "/version")
|
||||
if err != nil {
|
||||
plog.Warningf("failed to reach the peerURL(%s) of member %s (%v)", u, m.ID, err)
|
||||
continue
|
||||
}
|
||||
var b []byte
|
||||
b, err = ioutil.ReadAll(resp.Body)
|
||||
resp.Body.Close()
|
||||
if err != nil {
|
||||
plog.Warningf("failed to read out the response body from the peerURL(%s) of member %s (%v)", u, m.ID, err)
|
||||
continue
|
||||
}
|
||||
var vers version.Versions
|
||||
if err = json.Unmarshal(b, &vers); err != nil {
|
||||
plog.Warningf("failed to unmarshal the response body got from the peerURL(%s) of member %s (%v)", u, m.ID, err)
|
||||
continue
|
||||
}
|
||||
return &vers, nil
|
||||
}
|
||||
return nil, err
|
||||
}
|
||||
204
vendor/github.com/coreos/etcd/etcdserver/config.go
generated
vendored
204
vendor/github.com/coreos/etcd/etcdserver/config.go
generated
vendored
@@ -1,204 +0,0 @@
|
||||
// Copyright 2015 The etcd Authors
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package etcdserver
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"path/filepath"
|
||||
"sort"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"golang.org/x/net/context"
|
||||
|
||||
"github.com/coreos/etcd/pkg/netutil"
|
||||
"github.com/coreos/etcd/pkg/transport"
|
||||
"github.com/coreos/etcd/pkg/types"
|
||||
)
|
||||
|
||||
// ServerConfig holds the configuration of etcd as taken from the command line or discovery.
|
||||
type ServerConfig struct {
|
||||
Name string
|
||||
DiscoveryURL string
|
||||
DiscoveryProxy string
|
||||
ClientURLs types.URLs
|
||||
PeerURLs types.URLs
|
||||
DataDir string
|
||||
// DedicatedWALDir config will make the etcd to write the WAL to the WALDir
|
||||
// rather than the dataDir/member/wal.
|
||||
DedicatedWALDir string
|
||||
SnapCount uint64
|
||||
MaxSnapFiles uint
|
||||
MaxWALFiles uint
|
||||
InitialPeerURLsMap types.URLsMap
|
||||
InitialClusterToken string
|
||||
NewCluster bool
|
||||
ForceNewCluster bool
|
||||
PeerTLSInfo transport.TLSInfo
|
||||
|
||||
TickMs uint
|
||||
ElectionTicks int
|
||||
BootstrapTimeout time.Duration
|
||||
|
||||
AutoCompactionRetention int
|
||||
QuotaBackendBytes int64
|
||||
|
||||
StrictReconfigCheck bool
|
||||
|
||||
// ClientCertAuthEnabled is true when cert has been signed by the client CA.
|
||||
ClientCertAuthEnabled bool
|
||||
|
||||
AuthToken string
|
||||
}
|
||||
|
||||
// VerifyBootstrap sanity-checks the initial config for bootstrap case
|
||||
// and returns an error for things that should never happen.
|
||||
func (c *ServerConfig) VerifyBootstrap() error {
|
||||
if err := c.hasLocalMember(); err != nil {
|
||||
return err
|
||||
}
|
||||
if err := c.advertiseMatchesCluster(); err != nil {
|
||||
return err
|
||||
}
|
||||
if checkDuplicateURL(c.InitialPeerURLsMap) {
|
||||
return fmt.Errorf("initial cluster %s has duplicate url", c.InitialPeerURLsMap)
|
||||
}
|
||||
if c.InitialPeerURLsMap.String() == "" && c.DiscoveryURL == "" {
|
||||
return fmt.Errorf("initial cluster unset and no discovery URL found")
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// VerifyJoinExisting sanity-checks the initial config for join existing cluster
|
||||
// case and returns an error for things that should never happen.
|
||||
func (c *ServerConfig) VerifyJoinExisting() error {
|
||||
// The member has announced its peer urls to the cluster before starting; no need to
|
||||
// set the configuration again.
|
||||
if err := c.hasLocalMember(); err != nil {
|
||||
return err
|
||||
}
|
||||
if checkDuplicateURL(c.InitialPeerURLsMap) {
|
||||
return fmt.Errorf("initial cluster %s has duplicate url", c.InitialPeerURLsMap)
|
||||
}
|
||||
if c.DiscoveryURL != "" {
|
||||
return fmt.Errorf("discovery URL should not be set when joining existing initial cluster")
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// hasLocalMember checks that the cluster at least contains the local server.
|
||||
func (c *ServerConfig) hasLocalMember() error {
|
||||
if urls := c.InitialPeerURLsMap[c.Name]; urls == nil {
|
||||
return fmt.Errorf("couldn't find local name %q in the initial cluster configuration", c.Name)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// advertiseMatchesCluster confirms peer URLs match those in the cluster peer list.
|
||||
func (c *ServerConfig) advertiseMatchesCluster() error {
|
||||
urls, apurls := c.InitialPeerURLsMap[c.Name], c.PeerURLs.StringSlice()
|
||||
urls.Sort()
|
||||
sort.Strings(apurls)
|
||||
ctx, cancel := context.WithTimeout(context.TODO(), 30*time.Second)
|
||||
defer cancel()
|
||||
if !netutil.URLStringsEqual(ctx, apurls, urls.StringSlice()) {
|
||||
umap := map[string]types.URLs{c.Name: c.PeerURLs}
|
||||
return fmt.Errorf("--initial-cluster must include %s given --initial-advertise-peer-urls=%s", types.URLsMap(umap).String(), strings.Join(apurls, ","))
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (c *ServerConfig) MemberDir() string { return filepath.Join(c.DataDir, "member") }
|
||||
|
||||
func (c *ServerConfig) WALDir() string {
|
||||
if c.DedicatedWALDir != "" {
|
||||
return c.DedicatedWALDir
|
||||
}
|
||||
return filepath.Join(c.MemberDir(), "wal")
|
||||
}
|
||||
|
||||
func (c *ServerConfig) SnapDir() string { return filepath.Join(c.MemberDir(), "snap") }
|
||||
|
||||
func (c *ServerConfig) ShouldDiscover() bool { return c.DiscoveryURL != "" }
|
||||
|
||||
// ReqTimeout returns timeout for request to finish.
|
||||
func (c *ServerConfig) ReqTimeout() time.Duration {
|
||||
// 5s for queue waiting, computation and disk IO delay
|
||||
// + 2 * election timeout for possible leader election
|
||||
return 5*time.Second + 2*time.Duration(c.ElectionTicks)*time.Duration(c.TickMs)*time.Millisecond
|
||||
}
|
||||
|
||||
func (c *ServerConfig) electionTimeout() time.Duration {
|
||||
return time.Duration(c.ElectionTicks) * time.Duration(c.TickMs) * time.Millisecond
|
||||
}
|
||||
|
||||
func (c *ServerConfig) peerDialTimeout() time.Duration {
|
||||
// 1s for queue wait and system delay
|
||||
// + one RTT, which is smaller than 1/5 election timeout
|
||||
return time.Second + time.Duration(c.ElectionTicks)*time.Duration(c.TickMs)*time.Millisecond/5
|
||||
}
|
||||
|
||||
func (c *ServerConfig) PrintWithInitial() { c.print(true) }
|
||||
|
||||
func (c *ServerConfig) Print() { c.print(false) }
|
||||
|
||||
func (c *ServerConfig) print(initial bool) {
|
||||
plog.Infof("name = %s", c.Name)
|
||||
if c.ForceNewCluster {
|
||||
plog.Infof("force new cluster")
|
||||
}
|
||||
plog.Infof("data dir = %s", c.DataDir)
|
||||
plog.Infof("member dir = %s", c.MemberDir())
|
||||
if c.DedicatedWALDir != "" {
|
||||
plog.Infof("dedicated WAL dir = %s", c.DedicatedWALDir)
|
||||
}
|
||||
plog.Infof("heartbeat = %dms", c.TickMs)
|
||||
plog.Infof("election = %dms", c.ElectionTicks*int(c.TickMs))
|
||||
plog.Infof("snapshot count = %d", c.SnapCount)
|
||||
if len(c.DiscoveryURL) != 0 {
|
||||
plog.Infof("discovery URL= %s", c.DiscoveryURL)
|
||||
if len(c.DiscoveryProxy) != 0 {
|
||||
plog.Infof("discovery proxy = %s", c.DiscoveryProxy)
|
||||
}
|
||||
}
|
||||
plog.Infof("advertise client URLs = %s", c.ClientURLs)
|
||||
if initial {
|
||||
plog.Infof("initial advertise peer URLs = %s", c.PeerURLs)
|
||||
plog.Infof("initial cluster = %s", c.InitialPeerURLsMap)
|
||||
}
|
||||
}
|
||||
|
||||
func checkDuplicateURL(urlsmap types.URLsMap) bool {
|
||||
um := make(map[string]bool)
|
||||
for _, urls := range urlsmap {
|
||||
for _, url := range urls {
|
||||
u := url.String()
|
||||
if um[u] {
|
||||
return true
|
||||
}
|
||||
um[u] = true
|
||||
}
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
func (c *ServerConfig) bootstrapTimeout() time.Duration {
|
||||
if c.BootstrapTimeout != 0 {
|
||||
return c.BootstrapTimeout
|
||||
}
|
||||
return time.Second
|
||||
}
|
||||
|
||||
func (c *ServerConfig) backendPath() string { return filepath.Join(c.SnapDir(), "db") }
|
||||
33
vendor/github.com/coreos/etcd/etcdserver/consistent_index.go
generated
vendored
33
vendor/github.com/coreos/etcd/etcdserver/consistent_index.go
generated
vendored
@@ -1,33 +0,0 @@
|
||||
// Copyright 2015 The etcd Authors
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package etcdserver
|
||||
|
||||
import (
|
||||
"sync/atomic"
|
||||
)
|
||||
|
||||
// consistentIndex represents the offset of an entry in a consistent replica log.
|
||||
// It implements the mvcc.ConsistentIndexGetter interface.
|
||||
// It is always set to the offset of current entry before executing the entry,
|
||||
// so ConsistentWatchableKV could get the consistent index from it.
|
||||
type consistentIndex uint64
|
||||
|
||||
func (i *consistentIndex) setConsistentIndex(v uint64) {
|
||||
atomic.StoreUint64((*uint64)(i), v)
|
||||
}
|
||||
|
||||
func (i *consistentIndex) ConsistentIndex() uint64 {
|
||||
return atomic.LoadUint64((*uint64)(i))
|
||||
}
|
||||
16
vendor/github.com/coreos/etcd/etcdserver/doc.go
generated
vendored
16
vendor/github.com/coreos/etcd/etcdserver/doc.go
generated
vendored
@@ -1,16 +0,0 @@
|
||||
// Copyright 2015 The etcd Authors
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
// Package etcdserver defines how etcd servers interact and store their states.
|
||||
package etcdserver
|
||||
46
vendor/github.com/coreos/etcd/etcdserver/errors.go
generated
vendored
46
vendor/github.com/coreos/etcd/etcdserver/errors.go
generated
vendored
@@ -1,46 +0,0 @@
|
||||
// Copyright 2015 The etcd Authors
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package etcdserver
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
)
|
||||
|
||||
var (
|
||||
ErrUnknownMethod = errors.New("etcdserver: unknown method")
|
||||
ErrStopped = errors.New("etcdserver: server stopped")
|
||||
ErrCanceled = errors.New("etcdserver: request cancelled")
|
||||
ErrTimeout = errors.New("etcdserver: request timed out")
|
||||
ErrTimeoutDueToLeaderFail = errors.New("etcdserver: request timed out, possibly due to previous leader failure")
|
||||
ErrTimeoutDueToConnectionLost = errors.New("etcdserver: request timed out, possibly due to connection lost")
|
||||
ErrTimeoutLeaderTransfer = errors.New("etcdserver: request timed out, leader transfer took too long")
|
||||
ErrNotEnoughStartedMembers = errors.New("etcdserver: re-configuration failed due to not enough started members")
|
||||
ErrNoLeader = errors.New("etcdserver: no leader")
|
||||
ErrRequestTooLarge = errors.New("etcdserver: request is too large")
|
||||
ErrNoSpace = errors.New("etcdserver: no space")
|
||||
ErrTooManyRequests = errors.New("etcdserver: too many requests")
|
||||
ErrUnhealthy = errors.New("etcdserver: unhealthy cluster")
|
||||
ErrKeyNotFound = errors.New("etcdserver: key not found")
|
||||
)
|
||||
|
||||
type DiscoveryError struct {
|
||||
Op string
|
||||
Err error
|
||||
}
|
||||
|
||||
func (e DiscoveryError) Error() string {
|
||||
return fmt.Sprintf("failed to %s discovery cluster (%v)", e.Op, e.Err)
|
||||
}
|
||||
102
vendor/github.com/coreos/etcd/etcdserver/metrics.go
generated
vendored
102
vendor/github.com/coreos/etcd/etcdserver/metrics.go
generated
vendored
@@ -1,102 +0,0 @@
|
||||
// Copyright 2015 The etcd Authors
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package etcdserver
|
||||
|
||||
import (
|
||||
"time"
|
||||
|
||||
"github.com/coreos/etcd/pkg/runtime"
|
||||
"github.com/prometheus/client_golang/prometheus"
|
||||
)
|
||||
|
||||
var (
|
||||
hasLeader = prometheus.NewGauge(prometheus.GaugeOpts{
|
||||
Namespace: "etcd",
|
||||
Subsystem: "server",
|
||||
Name: "has_leader",
|
||||
Help: "Whether or not a leader exists. 1 is existence, 0 is not.",
|
||||
})
|
||||
leaderChanges = prometheus.NewCounter(prometheus.CounterOpts{
|
||||
Namespace: "etcd",
|
||||
Subsystem: "server",
|
||||
Name: "leader_changes_seen_total",
|
||||
Help: "The number of leader changes seen.",
|
||||
})
|
||||
proposalsCommitted = prometheus.NewGauge(prometheus.GaugeOpts{
|
||||
Namespace: "etcd",
|
||||
Subsystem: "server",
|
||||
Name: "proposals_committed_total",
|
||||
Help: "The total number of consensus proposals committed.",
|
||||
})
|
||||
proposalsApplied = prometheus.NewGauge(prometheus.GaugeOpts{
|
||||
Namespace: "etcd",
|
||||
Subsystem: "server",
|
||||
Name: "proposals_applied_total",
|
||||
Help: "The total number of consensus proposals applied.",
|
||||
})
|
||||
proposalsPending = prometheus.NewGauge(prometheus.GaugeOpts{
|
||||
Namespace: "etcd",
|
||||
Subsystem: "server",
|
||||
Name: "proposals_pending",
|
||||
Help: "The current number of pending proposals to commit.",
|
||||
})
|
||||
proposalsFailed = prometheus.NewCounter(prometheus.CounterOpts{
|
||||
Namespace: "etcd",
|
||||
Subsystem: "server",
|
||||
Name: "proposals_failed_total",
|
||||
Help: "The total number of failed proposals seen.",
|
||||
})
|
||||
leaseExpired = prometheus.NewCounter(prometheus.CounterOpts{
|
||||
Namespace: "etcd_debugging",
|
||||
Subsystem: "server",
|
||||
Name: "lease_expired_total",
|
||||
Help: "The total number of expired leases.",
|
||||
})
|
||||
)
|
||||
|
||||
func init() {
|
||||
prometheus.MustRegister(hasLeader)
|
||||
prometheus.MustRegister(leaderChanges)
|
||||
prometheus.MustRegister(proposalsCommitted)
|
||||
prometheus.MustRegister(proposalsApplied)
|
||||
prometheus.MustRegister(proposalsPending)
|
||||
prometheus.MustRegister(proposalsFailed)
|
||||
prometheus.MustRegister(leaseExpired)
|
||||
}
|
||||
|
||||
func monitorFileDescriptor(done <-chan struct{}) {
|
||||
ticker := time.NewTicker(5 * time.Second)
|
||||
defer ticker.Stop()
|
||||
for {
|
||||
used, err := runtime.FDUsage()
|
||||
if err != nil {
|
||||
plog.Errorf("cannot monitor file descriptor usage (%v)", err)
|
||||
return
|
||||
}
|
||||
limit, err := runtime.FDLimit()
|
||||
if err != nil {
|
||||
plog.Errorf("cannot monitor file descriptor usage (%v)", err)
|
||||
return
|
||||
}
|
||||
if used >= limit/5*4 {
|
||||
plog.Warningf("80%% of the file descriptor limit is used [used = %d, limit = %d]", used, limit)
|
||||
}
|
||||
select {
|
||||
case <-ticker.C:
|
||||
case <-done:
|
||||
return
|
||||
}
|
||||
}
|
||||
}
|
||||
121
vendor/github.com/coreos/etcd/etcdserver/quota.go
generated
vendored
121
vendor/github.com/coreos/etcd/etcdserver/quota.go
generated
vendored
@@ -1,121 +0,0 @@
|
||||
// Copyright 2016 The etcd Authors
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package etcdserver
|
||||
|
||||
import (
|
||||
pb "github.com/coreos/etcd/etcdserver/etcdserverpb"
|
||||
)
|
||||
|
||||
const (
|
||||
// DefaultQuotaBytes is the number of bytes the backend Size may
|
||||
// consume before exceeding the space quota.
|
||||
DefaultQuotaBytes = int64(2 * 1024 * 1024 * 1024) // 2GB
|
||||
// MaxQuotaBytes is the maximum number of bytes suggested for a backend
|
||||
// quota. A larger quota may lead to degraded performance.
|
||||
MaxQuotaBytes = int64(8 * 1024 * 1024 * 1024) // 8GB
|
||||
)
|
||||
|
||||
// Quota represents an arbitrary quota against arbitrary requests. Each request
|
||||
// costs some charge; if there is not enough remaining charge, then there are
|
||||
// too few resources available within the quota to apply the request.
|
||||
type Quota interface {
|
||||
// Available judges whether the given request fits within the quota.
|
||||
Available(req interface{}) bool
|
||||
// Cost computes the charge against the quota for a given request.
|
||||
Cost(req interface{}) int
|
||||
// Remaining is the amount of charge left for the quota.
|
||||
Remaining() int64
|
||||
}
|
||||
|
||||
type passthroughQuota struct{}
|
||||
|
||||
func (*passthroughQuota) Available(interface{}) bool { return true }
|
||||
func (*passthroughQuota) Cost(interface{}) int { return 0 }
|
||||
func (*passthroughQuota) Remaining() int64 { return 1 }
|
||||
|
||||
type backendQuota struct {
|
||||
s *EtcdServer
|
||||
maxBackendBytes int64
|
||||
}
|
||||
|
||||
const (
|
||||
// leaseOverhead is an estimate for the cost of storing a lease
|
||||
leaseOverhead = 64
|
||||
// kvOverhead is an estimate for the cost of storing a key's metadata
|
||||
kvOverhead = 256
|
||||
)
|
||||
|
||||
func NewBackendQuota(s *EtcdServer) Quota {
|
||||
if s.Cfg.QuotaBackendBytes < 0 {
|
||||
// disable quotas if negative
|
||||
plog.Warningf("disabling backend quota")
|
||||
return &passthroughQuota{}
|
||||
}
|
||||
if s.Cfg.QuotaBackendBytes == 0 {
|
||||
// use default size if no quota size given
|
||||
return &backendQuota{s, DefaultQuotaBytes}
|
||||
}
|
||||
if s.Cfg.QuotaBackendBytes > MaxQuotaBytes {
|
||||
plog.Warningf("backend quota %v exceeds maximum recommended quota %v", s.Cfg.QuotaBackendBytes, MaxQuotaBytes)
|
||||
}
|
||||
return &backendQuota{s, s.Cfg.QuotaBackendBytes}
|
||||
}
|
||||
|
||||
func (b *backendQuota) Available(v interface{}) bool {
|
||||
// TODO: maybe optimize backend.Size()
|
||||
return b.s.Backend().Size()+int64(b.Cost(v)) < b.maxBackendBytes
|
||||
}
|
||||
|
||||
func (b *backendQuota) Cost(v interface{}) int {
|
||||
switch r := v.(type) {
|
||||
case *pb.PutRequest:
|
||||
return costPut(r)
|
||||
case *pb.TxnRequest:
|
||||
return costTxn(r)
|
||||
case *pb.LeaseGrantRequest:
|
||||
return leaseOverhead
|
||||
default:
|
||||
panic("unexpected cost")
|
||||
}
|
||||
}
|
||||
|
||||
func costPut(r *pb.PutRequest) int { return kvOverhead + len(r.Key) + len(r.Value) }
|
||||
|
||||
func costTxnReq(u *pb.RequestOp) int {
|
||||
r := u.GetRequestPut()
|
||||
if r == nil {
|
||||
return 0
|
||||
}
|
||||
return costPut(r)
|
||||
}
|
||||
|
||||
func costTxn(r *pb.TxnRequest) int {
|
||||
sizeSuccess := 0
|
||||
for _, u := range r.Success {
|
||||
sizeSuccess += costTxnReq(u)
|
||||
}
|
||||
sizeFailure := 0
|
||||
for _, u := range r.Failure {
|
||||
sizeFailure += costTxnReq(u)
|
||||
}
|
||||
if sizeFailure > sizeSuccess {
|
||||
return sizeFailure
|
||||
}
|
||||
return sizeSuccess
|
||||
}
|
||||
|
||||
func (b *backendQuota) Remaining() int64 {
|
||||
return b.maxBackendBytes - b.s.Backend().Size()
|
||||
}
|
||||
594
vendor/github.com/coreos/etcd/etcdserver/raft.go
generated
vendored
594
vendor/github.com/coreos/etcd/etcdserver/raft.go
generated
vendored
@@ -1,594 +0,0 @@
|
||||
// Copyright 2015 The etcd Authors
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package etcdserver
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"expvar"
|
||||
"sort"
|
||||
"sync"
|
||||
"sync/atomic"
|
||||
"time"
|
||||
|
||||
pb "github.com/coreos/etcd/etcdserver/etcdserverpb"
|
||||
"github.com/coreos/etcd/etcdserver/membership"
|
||||
"github.com/coreos/etcd/pkg/contention"
|
||||
"github.com/coreos/etcd/pkg/pbutil"
|
||||
"github.com/coreos/etcd/pkg/types"
|
||||
"github.com/coreos/etcd/raft"
|
||||
"github.com/coreos/etcd/raft/raftpb"
|
||||
"github.com/coreos/etcd/rafthttp"
|
||||
"github.com/coreos/etcd/wal"
|
||||
"github.com/coreos/etcd/wal/walpb"
|
||||
"github.com/coreos/pkg/capnslog"
|
||||
)
|
||||
|
||||
const (
|
||||
// Number of entries for slow follower to catch-up after compacting
|
||||
// the raft storage entries.
|
||||
// We expect the follower has a millisecond level latency with the leader.
|
||||
// The max throughput is around 10K. Keep a 5K entries is enough for helping
|
||||
// follower to catch up.
|
||||
numberOfCatchUpEntries = 5000
|
||||
|
||||
// The max throughput of etcd will not exceed 100MB/s (100K * 1KB value).
|
||||
// Assuming the RTT is around 10ms, 1MB max size is large enough.
|
||||
maxSizePerMsg = 1 * 1024 * 1024
|
||||
// Never overflow the rafthttp buffer, which is 4096.
|
||||
// TODO: a better const?
|
||||
maxInflightMsgs = 4096 / 8
|
||||
)
|
||||
|
||||
var (
|
||||
// protects raftStatus
|
||||
raftStatusMu sync.Mutex
|
||||
// indirection for expvar func interface
|
||||
// expvar panics when publishing duplicate name
|
||||
// expvar does not support remove a registered name
|
||||
// so only register a func that calls raftStatus
|
||||
// and change raftStatus as we need.
|
||||
raftStatus func() raft.Status
|
||||
)
|
||||
|
||||
func init() {
|
||||
raft.SetLogger(capnslog.NewPackageLogger("github.com/coreos/etcd", "raft"))
|
||||
expvar.Publish("raft.status", expvar.Func(func() interface{} {
|
||||
raftStatusMu.Lock()
|
||||
defer raftStatusMu.Unlock()
|
||||
return raftStatus()
|
||||
}))
|
||||
}
|
||||
|
||||
type RaftTimer interface {
|
||||
Index() uint64
|
||||
Term() uint64
|
||||
}
|
||||
|
||||
// apply contains entries, snapshot to be applied. Once
|
||||
// an apply is consumed, the entries will be persisted to
|
||||
// to raft storage concurrently; the application must read
|
||||
// raftDone before assuming the raft messages are stable.
|
||||
type apply struct {
|
||||
entries []raftpb.Entry
|
||||
snapshot raftpb.Snapshot
|
||||
// notifyc synchronizes etcd server applies with the raft node
|
||||
notifyc chan struct{}
|
||||
}
|
||||
|
||||
type raftNode struct {
|
||||
// Cache of the latest raft index and raft term the server has seen.
|
||||
// These three unit64 fields must be the first elements to keep 64-bit
|
||||
// alignment for atomic access to the fields.
|
||||
index uint64
|
||||
term uint64
|
||||
lead uint64
|
||||
|
||||
raftNodeConfig
|
||||
|
||||
// a chan to send/receive snapshot
|
||||
msgSnapC chan raftpb.Message
|
||||
|
||||
// a chan to send out apply
|
||||
applyc chan apply
|
||||
|
||||
// a chan to send out readState
|
||||
readStateC chan raft.ReadState
|
||||
|
||||
// utility
|
||||
ticker *time.Ticker
|
||||
// contention detectors for raft heartbeat message
|
||||
td *contention.TimeoutDetector
|
||||
|
||||
stopped chan struct{}
|
||||
done chan struct{}
|
||||
}
|
||||
|
||||
type raftNodeConfig struct {
|
||||
// to check if msg receiver is removed from cluster
|
||||
isIDRemoved func(id uint64) bool
|
||||
raft.Node
|
||||
raftStorage *raft.MemoryStorage
|
||||
storage Storage
|
||||
heartbeat time.Duration // for logging
|
||||
// transport specifies the transport to send and receive msgs to members.
|
||||
// Sending messages MUST NOT block. It is okay to drop messages, since
|
||||
// clients should timeout and reissue their messages.
|
||||
// If transport is nil, server will panic.
|
||||
transport rafthttp.Transporter
|
||||
}
|
||||
|
||||
func newRaftNode(cfg raftNodeConfig) *raftNode {
|
||||
r := &raftNode{
|
||||
raftNodeConfig: cfg,
|
||||
// set up contention detectors for raft heartbeat message.
|
||||
// expect to send a heartbeat within 2 heartbeat intervals.
|
||||
td: contention.NewTimeoutDetector(2 * cfg.heartbeat),
|
||||
readStateC: make(chan raft.ReadState, 1),
|
||||
msgSnapC: make(chan raftpb.Message, maxInFlightMsgSnap),
|
||||
applyc: make(chan apply),
|
||||
stopped: make(chan struct{}),
|
||||
done: make(chan struct{}),
|
||||
}
|
||||
if r.heartbeat == 0 {
|
||||
r.ticker = &time.Ticker{}
|
||||
} else {
|
||||
r.ticker = time.NewTicker(r.heartbeat)
|
||||
}
|
||||
return r
|
||||
}
|
||||
|
||||
// start prepares and starts raftNode in a new goroutine. It is no longer safe
|
||||
// to modify the fields after it has been started.
|
||||
func (r *raftNode) start(rh *raftReadyHandler) {
|
||||
internalTimeout := time.Second
|
||||
|
||||
go func() {
|
||||
defer r.onStop()
|
||||
islead := false
|
||||
|
||||
for {
|
||||
select {
|
||||
case <-r.ticker.C:
|
||||
r.Tick()
|
||||
case rd := <-r.Ready():
|
||||
if rd.SoftState != nil {
|
||||
newLeader := rd.SoftState.Lead != raft.None && atomic.LoadUint64(&r.lead) != rd.SoftState.Lead
|
||||
if newLeader {
|
||||
leaderChanges.Inc()
|
||||
}
|
||||
|
||||
if rd.SoftState.Lead == raft.None {
|
||||
hasLeader.Set(0)
|
||||
} else {
|
||||
hasLeader.Set(1)
|
||||
}
|
||||
|
||||
atomic.StoreUint64(&r.lead, rd.SoftState.Lead)
|
||||
islead = rd.RaftState == raft.StateLeader
|
||||
rh.updateLeadership(newLeader)
|
||||
r.td.Reset()
|
||||
}
|
||||
|
||||
if len(rd.ReadStates) != 0 {
|
||||
select {
|
||||
case r.readStateC <- rd.ReadStates[len(rd.ReadStates)-1]:
|
||||
case <-time.After(internalTimeout):
|
||||
plog.Warningf("timed out sending read state")
|
||||
case <-r.stopped:
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
notifyc := make(chan struct{}, 1)
|
||||
ap := apply{
|
||||
entries: rd.CommittedEntries,
|
||||
snapshot: rd.Snapshot,
|
||||
notifyc: notifyc,
|
||||
}
|
||||
|
||||
updateCommittedIndex(&ap, rh)
|
||||
|
||||
select {
|
||||
case r.applyc <- ap:
|
||||
case <-r.stopped:
|
||||
return
|
||||
}
|
||||
|
||||
// the leader can write to its disk in parallel with replicating to the followers and them
|
||||
// writing to their disks.
|
||||
// For more details, check raft thesis 10.2.1
|
||||
if islead {
|
||||
// gofail: var raftBeforeLeaderSend struct{}
|
||||
r.transport.Send(r.processMessages(rd.Messages))
|
||||
}
|
||||
|
||||
// gofail: var raftBeforeSave struct{}
|
||||
if err := r.storage.Save(rd.HardState, rd.Entries); err != nil {
|
||||
plog.Fatalf("raft save state and entries error: %v", err)
|
||||
}
|
||||
if !raft.IsEmptyHardState(rd.HardState) {
|
||||
proposalsCommitted.Set(float64(rd.HardState.Commit))
|
||||
}
|
||||
// gofail: var raftAfterSave struct{}
|
||||
|
||||
if !raft.IsEmptySnap(rd.Snapshot) {
|
||||
// gofail: var raftBeforeSaveSnap struct{}
|
||||
if err := r.storage.SaveSnap(rd.Snapshot); err != nil {
|
||||
plog.Fatalf("raft save snapshot error: %v", err)
|
||||
}
|
||||
// etcdserver now claim the snapshot has been persisted onto the disk
|
||||
notifyc <- struct{}{}
|
||||
|
||||
// gofail: var raftAfterSaveSnap struct{}
|
||||
r.raftStorage.ApplySnapshot(rd.Snapshot)
|
||||
plog.Infof("raft applied incoming snapshot at index %d", rd.Snapshot.Metadata.Index)
|
||||
// gofail: var raftAfterApplySnap struct{}
|
||||
}
|
||||
|
||||
r.raftStorage.Append(rd.Entries)
|
||||
|
||||
if !islead {
|
||||
// finish processing incoming messages before we signal raftdone chan
|
||||
msgs := r.processMessages(rd.Messages)
|
||||
|
||||
// now unblocks 'applyAll' that waits on Raft log disk writes before triggering snapshots
|
||||
notifyc <- struct{}{}
|
||||
|
||||
// Candidate or follower needs to wait for all pending configuration
|
||||
// changes to be applied before sending messages.
|
||||
// Otherwise we might incorrectly count votes (e.g. votes from removed members).
|
||||
// Also slow machine's follower raft-layer could proceed to become the leader
|
||||
// on its own single-node cluster, before apply-layer applies the config change.
|
||||
// We simply wait for ALL pending entries to be applied for now.
|
||||
// We might improve this later on if it causes unnecessary long blocking issues.
|
||||
waitApply := false
|
||||
for _, ent := range rd.CommittedEntries {
|
||||
if ent.Type == raftpb.EntryConfChange {
|
||||
waitApply = true
|
||||
break
|
||||
}
|
||||
}
|
||||
if waitApply {
|
||||
// blocks until 'applyAll' calls 'applyWait.Trigger'
|
||||
// to be in sync with scheduled config-change job
|
||||
// (assume notifyc has cap of 1)
|
||||
select {
|
||||
case notifyc <- struct{}{}:
|
||||
case <-r.stopped:
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
// gofail: var raftBeforeFollowerSend struct{}
|
||||
r.transport.Send(msgs)
|
||||
} else {
|
||||
// leader already processed 'MsgSnap' and signaled
|
||||
notifyc <- struct{}{}
|
||||
}
|
||||
|
||||
r.Advance()
|
||||
case <-r.stopped:
|
||||
return
|
||||
}
|
||||
}
|
||||
}()
|
||||
}
|
||||
|
||||
func updateCommittedIndex(ap *apply, rh *raftReadyHandler) {
|
||||
var ci uint64
|
||||
if len(ap.entries) != 0 {
|
||||
ci = ap.entries[len(ap.entries)-1].Index
|
||||
}
|
||||
if ap.snapshot.Metadata.Index > ci {
|
||||
ci = ap.snapshot.Metadata.Index
|
||||
}
|
||||
if ci != 0 {
|
||||
rh.updateCommittedIndex(ci)
|
||||
}
|
||||
}
|
||||
|
||||
func (r *raftNode) processMessages(ms []raftpb.Message) []raftpb.Message {
|
||||
sentAppResp := false
|
||||
for i := len(ms) - 1; i >= 0; i-- {
|
||||
if r.isIDRemoved(ms[i].To) {
|
||||
ms[i].To = 0
|
||||
}
|
||||
|
||||
if ms[i].Type == raftpb.MsgAppResp {
|
||||
if sentAppResp {
|
||||
ms[i].To = 0
|
||||
} else {
|
||||
sentAppResp = true
|
||||
}
|
||||
}
|
||||
|
||||
if ms[i].Type == raftpb.MsgSnap {
|
||||
// There are two separate data store: the store for v2, and the KV for v3.
|
||||
// The msgSnap only contains the most recent snapshot of store without KV.
|
||||
// So we need to redirect the msgSnap to etcd server main loop for merging in the
|
||||
// current store snapshot and KV snapshot.
|
||||
select {
|
||||
case r.msgSnapC <- ms[i]:
|
||||
default:
|
||||
// drop msgSnap if the inflight chan if full.
|
||||
}
|
||||
ms[i].To = 0
|
||||
}
|
||||
if ms[i].Type == raftpb.MsgHeartbeat {
|
||||
ok, exceed := r.td.Observe(ms[i].To)
|
||||
if !ok {
|
||||
// TODO: limit request rate.
|
||||
plog.Warningf("failed to send out heartbeat on time (exceeded the %v timeout for %v)", r.heartbeat, exceed)
|
||||
plog.Warningf("server is likely overloaded")
|
||||
}
|
||||
}
|
||||
}
|
||||
return ms
|
||||
}
|
||||
|
||||
func (r *raftNode) apply() chan apply {
|
||||
return r.applyc
|
||||
}
|
||||
|
||||
func (r *raftNode) stop() {
|
||||
r.stopped <- struct{}{}
|
||||
<-r.done
|
||||
}
|
||||
|
||||
func (r *raftNode) onStop() {
|
||||
r.Stop()
|
||||
r.ticker.Stop()
|
||||
r.transport.Stop()
|
||||
if err := r.storage.Close(); err != nil {
|
||||
plog.Panicf("raft close storage error: %v", err)
|
||||
}
|
||||
close(r.done)
|
||||
}
|
||||
|
||||
// for testing
|
||||
func (r *raftNode) pauseSending() {
|
||||
p := r.transport.(rafthttp.Pausable)
|
||||
p.Pause()
|
||||
}
|
||||
|
||||
func (r *raftNode) resumeSending() {
|
||||
p := r.transport.(rafthttp.Pausable)
|
||||
p.Resume()
|
||||
}
|
||||
|
||||
// advanceTicksForElection advances ticks to the node for fast election.
|
||||
// This reduces the time to wait for first leader election if bootstrapping the whole
|
||||
// cluster, while leaving at least 1 heartbeat for possible existing leader
|
||||
// to contact it.
|
||||
func advanceTicksForElection(n raft.Node, electionTicks int) {
|
||||
for i := 0; i < electionTicks-1; i++ {
|
||||
n.Tick()
|
||||
}
|
||||
}
|
||||
|
||||
func startNode(cfg *ServerConfig, cl *membership.RaftCluster, ids []types.ID) (id types.ID, n raft.Node, s *raft.MemoryStorage, w *wal.WAL) {
|
||||
var err error
|
||||
member := cl.MemberByName(cfg.Name)
|
||||
metadata := pbutil.MustMarshal(
|
||||
&pb.Metadata{
|
||||
NodeID: uint64(member.ID),
|
||||
ClusterID: uint64(cl.ID()),
|
||||
},
|
||||
)
|
||||
if w, err = wal.Create(cfg.WALDir(), metadata); err != nil {
|
||||
plog.Fatalf("create wal error: %v", err)
|
||||
}
|
||||
peers := make([]raft.Peer, len(ids))
|
||||
for i, id := range ids {
|
||||
ctx, err := json.Marshal((*cl).Member(id))
|
||||
if err != nil {
|
||||
plog.Panicf("marshal member should never fail: %v", err)
|
||||
}
|
||||
peers[i] = raft.Peer{ID: uint64(id), Context: ctx}
|
||||
}
|
||||
id = member.ID
|
||||
plog.Infof("starting member %s in cluster %s", id, cl.ID())
|
||||
s = raft.NewMemoryStorage()
|
||||
c := &raft.Config{
|
||||
ID: uint64(id),
|
||||
ElectionTick: cfg.ElectionTicks,
|
||||
HeartbeatTick: 1,
|
||||
Storage: s,
|
||||
MaxSizePerMsg: maxSizePerMsg,
|
||||
MaxInflightMsgs: maxInflightMsgs,
|
||||
CheckQuorum: true,
|
||||
}
|
||||
|
||||
n = raft.StartNode(c, peers)
|
||||
raftStatusMu.Lock()
|
||||
raftStatus = n.Status
|
||||
raftStatusMu.Unlock()
|
||||
advanceTicksForElection(n, c.ElectionTick)
|
||||
return
|
||||
}
|
||||
|
||||
func restartNode(cfg *ServerConfig, snapshot *raftpb.Snapshot) (types.ID, *membership.RaftCluster, raft.Node, *raft.MemoryStorage, *wal.WAL) {
|
||||
var walsnap walpb.Snapshot
|
||||
if snapshot != nil {
|
||||
walsnap.Index, walsnap.Term = snapshot.Metadata.Index, snapshot.Metadata.Term
|
||||
}
|
||||
w, id, cid, st, ents := readWAL(cfg.WALDir(), walsnap)
|
||||
|
||||
plog.Infof("restarting member %s in cluster %s at commit index %d", id, cid, st.Commit)
|
||||
cl := membership.NewCluster("")
|
||||
cl.SetID(cid)
|
||||
s := raft.NewMemoryStorage()
|
||||
if snapshot != nil {
|
||||
s.ApplySnapshot(*snapshot)
|
||||
}
|
||||
s.SetHardState(st)
|
||||
s.Append(ents)
|
||||
c := &raft.Config{
|
||||
ID: uint64(id),
|
||||
ElectionTick: cfg.ElectionTicks,
|
||||
HeartbeatTick: 1,
|
||||
Storage: s,
|
||||
MaxSizePerMsg: maxSizePerMsg,
|
||||
MaxInflightMsgs: maxInflightMsgs,
|
||||
CheckQuorum: true,
|
||||
}
|
||||
|
||||
n := raft.RestartNode(c)
|
||||
raftStatusMu.Lock()
|
||||
raftStatus = n.Status
|
||||
raftStatusMu.Unlock()
|
||||
advanceTicksForElection(n, c.ElectionTick)
|
||||
return id, cl, n, s, w
|
||||
}
|
||||
|
||||
func restartAsStandaloneNode(cfg *ServerConfig, snapshot *raftpb.Snapshot) (types.ID, *membership.RaftCluster, raft.Node, *raft.MemoryStorage, *wal.WAL) {
|
||||
var walsnap walpb.Snapshot
|
||||
if snapshot != nil {
|
||||
walsnap.Index, walsnap.Term = snapshot.Metadata.Index, snapshot.Metadata.Term
|
||||
}
|
||||
w, id, cid, st, ents := readWAL(cfg.WALDir(), walsnap)
|
||||
|
||||
// discard the previously uncommitted entries
|
||||
for i, ent := range ents {
|
||||
if ent.Index > st.Commit {
|
||||
plog.Infof("discarding %d uncommitted WAL entries ", len(ents)-i)
|
||||
ents = ents[:i]
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
// force append the configuration change entries
|
||||
toAppEnts := createConfigChangeEnts(getIDs(snapshot, ents), uint64(id), st.Term, st.Commit)
|
||||
ents = append(ents, toAppEnts...)
|
||||
|
||||
// force commit newly appended entries
|
||||
err := w.Save(raftpb.HardState{}, toAppEnts)
|
||||
if err != nil {
|
||||
plog.Fatalf("%v", err)
|
||||
}
|
||||
if len(ents) != 0 {
|
||||
st.Commit = ents[len(ents)-1].Index
|
||||
}
|
||||
|
||||
plog.Printf("forcing restart of member %s in cluster %s at commit index %d", id, cid, st.Commit)
|
||||
cl := membership.NewCluster("")
|
||||
cl.SetID(cid)
|
||||
s := raft.NewMemoryStorage()
|
||||
if snapshot != nil {
|
||||
s.ApplySnapshot(*snapshot)
|
||||
}
|
||||
s.SetHardState(st)
|
||||
s.Append(ents)
|
||||
c := &raft.Config{
|
||||
ID: uint64(id),
|
||||
ElectionTick: cfg.ElectionTicks,
|
||||
HeartbeatTick: 1,
|
||||
Storage: s,
|
||||
MaxSizePerMsg: maxSizePerMsg,
|
||||
MaxInflightMsgs: maxInflightMsgs,
|
||||
}
|
||||
n := raft.RestartNode(c)
|
||||
raftStatus = n.Status
|
||||
return id, cl, n, s, w
|
||||
}
|
||||
|
||||
// getIDs returns an ordered set of IDs included in the given snapshot and
|
||||
// the entries. The given snapshot/entries can contain two kinds of
|
||||
// ID-related entry:
|
||||
// - ConfChangeAddNode, in which case the contained ID will be added into the set.
|
||||
// - ConfChangeRemoveNode, in which case the contained ID will be removed from the set.
|
||||
func getIDs(snap *raftpb.Snapshot, ents []raftpb.Entry) []uint64 {
|
||||
ids := make(map[uint64]bool)
|
||||
if snap != nil {
|
||||
for _, id := range snap.Metadata.ConfState.Nodes {
|
||||
ids[id] = true
|
||||
}
|
||||
}
|
||||
for _, e := range ents {
|
||||
if e.Type != raftpb.EntryConfChange {
|
||||
continue
|
||||
}
|
||||
var cc raftpb.ConfChange
|
||||
pbutil.MustUnmarshal(&cc, e.Data)
|
||||
switch cc.Type {
|
||||
case raftpb.ConfChangeAddNode:
|
||||
ids[cc.NodeID] = true
|
||||
case raftpb.ConfChangeRemoveNode:
|
||||
delete(ids, cc.NodeID)
|
||||
case raftpb.ConfChangeUpdateNode:
|
||||
// do nothing
|
||||
default:
|
||||
plog.Panicf("ConfChange Type should be either ConfChangeAddNode or ConfChangeRemoveNode!")
|
||||
}
|
||||
}
|
||||
sids := make(types.Uint64Slice, 0, len(ids))
|
||||
for id := range ids {
|
||||
sids = append(sids, id)
|
||||
}
|
||||
sort.Sort(sids)
|
||||
return []uint64(sids)
|
||||
}
|
||||
|
||||
// createConfigChangeEnts creates a series of Raft entries (i.e.
|
||||
// EntryConfChange) to remove the set of given IDs from the cluster. The ID
|
||||
// `self` is _not_ removed, even if present in the set.
|
||||
// If `self` is not inside the given ids, it creates a Raft entry to add a
|
||||
// default member with the given `self`.
|
||||
func createConfigChangeEnts(ids []uint64, self uint64, term, index uint64) []raftpb.Entry {
|
||||
ents := make([]raftpb.Entry, 0)
|
||||
next := index + 1
|
||||
found := false
|
||||
for _, id := range ids {
|
||||
if id == self {
|
||||
found = true
|
||||
continue
|
||||
}
|
||||
cc := &raftpb.ConfChange{
|
||||
Type: raftpb.ConfChangeRemoveNode,
|
||||
NodeID: id,
|
||||
}
|
||||
e := raftpb.Entry{
|
||||
Type: raftpb.EntryConfChange,
|
||||
Data: pbutil.MustMarshal(cc),
|
||||
Term: term,
|
||||
Index: next,
|
||||
}
|
||||
ents = append(ents, e)
|
||||
next++
|
||||
}
|
||||
if !found {
|
||||
m := membership.Member{
|
||||
ID: types.ID(self),
|
||||
RaftAttributes: membership.RaftAttributes{PeerURLs: []string{"http://localhost:2380"}},
|
||||
}
|
||||
ctx, err := json.Marshal(m)
|
||||
if err != nil {
|
||||
plog.Panicf("marshal member should never fail: %v", err)
|
||||
}
|
||||
cc := &raftpb.ConfChange{
|
||||
Type: raftpb.ConfChangeAddNode,
|
||||
NodeID: self,
|
||||
Context: ctx,
|
||||
}
|
||||
e := raftpb.Entry{
|
||||
Type: raftpb.EntryConfChange,
|
||||
Data: pbutil.MustMarshal(cc),
|
||||
Term: term,
|
||||
Index: next,
|
||||
}
|
||||
ents = append(ents, e)
|
||||
}
|
||||
return ents
|
||||
}
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user