forked from Ivasoft/traefik
Compare commits
36 Commits
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
36c0e63120 | ||
|
|
76465727d9 | ||
|
|
41c64ea81b | ||
|
|
de9eec1c92 | ||
|
|
482afed4a6 | ||
|
|
29e1e9eef2 | ||
|
|
2641832304 | ||
|
|
ccd919aba3 | ||
|
|
1b93551572 | ||
|
|
b9af55fc49 | ||
|
|
e0d92aed6d | ||
|
|
a3372acb6d | ||
|
|
43a510c046 | ||
|
|
7afa33dfa1 | ||
|
|
73c6007730 | ||
|
|
79cd306ac2 | ||
|
|
35b83678bd | ||
|
|
eacb6ea15a | ||
|
|
d88263dbf9 | ||
|
|
b1e3444798 | ||
|
|
f6c6d2bcd0 | ||
|
|
593c0e7ce2 | ||
|
|
e2b42ca57b | ||
|
|
7860534f0c | ||
|
|
fc81d92c88 | ||
|
|
8fbac2e39e | ||
|
|
59f7b2ea98 | ||
|
|
862957c30c | ||
|
|
546f0173ab | ||
|
|
04e3f2f401 | ||
|
|
acc432b5a8 | ||
|
|
13e2358815 | ||
|
|
716eca5976 | ||
|
|
9ae808aac4 | ||
|
|
f149b56063 | ||
|
|
49a9e2a9e0 |
@@ -31,7 +31,7 @@ before_deploy:
|
||||
fi;
|
||||
curl -sI https://github.com/containous/structor/releases/latest | grep -Fi Location | tr -d '\r' | sed "s/tag/download/g" | awk -F " " '{ print $2 "/structor_linux-amd64"}' | wget --output-document=$GOPATH/bin/structor -i -;
|
||||
chmod +x $GOPATH/bin/structor;
|
||||
structor -o containous -r traefik --dockerfile-url="https://raw.githubusercontent.com/containous/traefik/master/docs.Dockerfile" --menu.js-url="https://raw.githubusercontent.com/containous/structor/master/traefik-menu.js.gotmpl" --exp-branch=master --debug;
|
||||
structor -o containous -r traefik --dockerfile-url="https://raw.githubusercontent.com/containous/traefik/master/docs.Dockerfile" --menu.js-url="https://raw.githubusercontent.com/containous/structor/master/traefik-menu.js.gotmpl" --rqts-url="https://raw.githubusercontent.com/containous/structor/master/requirements-override.txt" --exp-branch=master --debug;
|
||||
fi
|
||||
deploy:
|
||||
- provider: releases
|
||||
@@ -54,7 +54,7 @@ deploy:
|
||||
on:
|
||||
repo: containous/traefik
|
||||
- provider: pages
|
||||
edge: true
|
||||
edge: false
|
||||
github_token: ${GITHUB_TOKEN}
|
||||
local_dir: site
|
||||
skip_cleanup: true
|
||||
|
||||
25
CHANGELOG.md
25
CHANGELOG.md
@@ -1,5 +1,30 @@
|
||||
# Change Log
|
||||
|
||||
## [v1.5.4](https://github.com/containous/traefik/tree/v1.5.4) (2018-03-15)
|
||||
[All Commits](https://github.com/containous/traefik/compare/v1.5.3...v1.5.4)
|
||||
|
||||
**Bug fixes:**
|
||||
- **[acme]** Fix panic when parsing resolv.conf ([#2955](https://github.com/containous/traefik/pull/2955) by [ldez](https://github.com/ldez))
|
||||
- **[acme]** Don't failed traefik start if register and subscribe failed on acme ([#2977](https://github.com/containous/traefik/pull/2977) by [Juliens](https://github.com/Juliens))
|
||||
- **[ecs]** Safe access to ECS API pointer values. ([#2983](https://github.com/containous/traefik/pull/2983) by [ldez](https://github.com/ldez))
|
||||
- **[kv]** Add lower-case passHostHeader key support. ([#3015](https://github.com/containous/traefik/pull/3015) by [ldez](https://github.com/ldez))
|
||||
- **[middleware]** Propagate insecure in white list. ([#2981](https://github.com/containous/traefik/pull/2981) by [ldez](https://github.com/ldez))
|
||||
- **[rancher]** Fix Rancher Healthcheck when upgrading a service ([#2962](https://github.com/containous/traefik/pull/2962) by [jmirc](https://github.com/jmirc))
|
||||
- **[websocket]** Capitalize Sec-WebSocket-Protocol Header ([#2975](https://github.com/containous/traefik/pull/2975) by [Juliens](https://github.com/Juliens))
|
||||
- Use goroutine pool in throttleProvider ([#3013](https://github.com/containous/traefik/pull/3013) by [Juliens](https://github.com/Juliens))
|
||||
- Handle quoted strings in UnmarshalJSON ([#3004](https://github.com/containous/traefik/pull/3004) by [Juliens](https://github.com/Juliens))
|
||||
|
||||
**Documentation:**
|
||||
- **[acme]** Clarify some deprecations. ([#2959](https://github.com/containous/traefik/pull/2959) by [ldez](https://github.com/ldez))
|
||||
- **[acme]** Second defaultEntryPoint should be https, not http. ([#2948](https://github.com/containous/traefik/pull/2948) by [GerbenWelter](https://github.com/GerbenWelter))
|
||||
- **[api]** Enhance API, REST, ping documentation. ([#2950](https://github.com/containous/traefik/pull/2950) by [ldez](https://github.com/ldez))
|
||||
- **[k8s]** Add TLS Docs ([#3012](https://github.com/containous/traefik/pull/3012) by [dtomcej](https://github.com/dtomcej))
|
||||
- Enhance Traefik TOML sample. ([#2996](https://github.com/containous/traefik/pull/2996) by [ldez](https://github.com/ldez))
|
||||
- Fix typo in docs ([#2990](https://github.com/containous/traefik/pull/2990) by [mo](https://github.com/mo))
|
||||
- Clarify how setting a frontend priority works ([#2984](https://github.com/containous/traefik/pull/2984) by [jbdoumenjou](https://github.com/jbdoumenjou))
|
||||
- Add [file] in syntax reference ([#3016](https://github.com/containous/traefik/pull/3016) by [ldez](https://github.com/ldez))
|
||||
- Updated the test-it example according to the latest docker version ([#3000](https://github.com/containous/traefik/pull/3000) by [geraldcroes](https://github.com/geraldcroes))
|
||||
|
||||
## [v1.5.3](https://github.com/containous/traefik/tree/v1.5.3) (2018-02-27)
|
||||
[All Commits](https://github.com/containous/traefik/compare/v1.5.2...v1.5.3)
|
||||
|
||||
|
||||
28
Gopkg.lock
generated
28
Gopkg.lock
generated
@@ -214,8 +214,8 @@
|
||||
".",
|
||||
"parse"
|
||||
]
|
||||
revision = "963366c29a7acc2d6e02f4f9bcf260d5a1cf4968"
|
||||
version = "v1.1.1"
|
||||
revision = "b4c2f060875361c070ed2bc300c5929b82f5fa2e"
|
||||
version = "v1.1.2"
|
||||
|
||||
[[projects]]
|
||||
branch = "master"
|
||||
@@ -232,8 +232,8 @@
|
||||
[[projects]]
|
||||
name = "github.com/containous/traefik-extra-service-fabric"
|
||||
packages = ["."]
|
||||
revision = "ca1fb57108293caad285b1c366b763f6c6ab71c9"
|
||||
version = "v1.0.5"
|
||||
revision = "dd5326f23d6e529aa327c10ce1f996079f5d7262"
|
||||
version = "v1.0.6"
|
||||
|
||||
[[projects]]
|
||||
name = "github.com/coreos/bbolt"
|
||||
@@ -645,9 +645,10 @@
|
||||
revision = "e444e69cbd2e2e3e0749a2f3c717cec491552bbf"
|
||||
|
||||
[[projects]]
|
||||
branch = "master"
|
||||
name = "github.com/gorilla/websocket"
|
||||
packages = ["."]
|
||||
revision = "a69d9f6de432e2c6b296a947d8a5ee88f68522cf"
|
||||
revision = "eb925808374e5ca90c83401a40d711dc08c0c0f6"
|
||||
|
||||
[[projects]]
|
||||
name = "github.com/hashicorp/consul"
|
||||
@@ -810,9 +811,10 @@
|
||||
source = "https://github.com/containous/mesos-dns.git"
|
||||
|
||||
[[projects]]
|
||||
branch = "master"
|
||||
name = "github.com/miekg/dns"
|
||||
packages = ["."]
|
||||
revision = "8060d9f51305bbe024b99679454e62f552cd0b0b"
|
||||
revision = "906238edc6eb0ddface4a1923f6d41ef2a5ca59b"
|
||||
|
||||
[[projects]]
|
||||
branch = "master"
|
||||
@@ -1106,28 +1108,35 @@
|
||||
packages = [
|
||||
"bcrypt",
|
||||
"blowfish",
|
||||
"ed25519",
|
||||
"ed25519/internal/edwards25519",
|
||||
"ocsp",
|
||||
"pbkdf2",
|
||||
"scrypt"
|
||||
]
|
||||
revision = "4ed45ec682102c643324fae5dff8dab085b6c300"
|
||||
revision = "b080dc9a8c480b08e698fb1219160d598526310f"
|
||||
|
||||
[[projects]]
|
||||
name = "golang.org/x/net"
|
||||
packages = [
|
||||
"bpf",
|
||||
"context",
|
||||
"context/ctxhttp",
|
||||
"http2",
|
||||
"http2/hpack",
|
||||
"idna",
|
||||
"internal/iana",
|
||||
"internal/socket",
|
||||
"internal/timeseries",
|
||||
"ipv4",
|
||||
"ipv6",
|
||||
"lex/httplex",
|
||||
"proxy",
|
||||
"publicsuffix",
|
||||
"trace",
|
||||
"websocket"
|
||||
]
|
||||
revision = "c8c74377599bd978aee1cf3b9b63a8634051cec2"
|
||||
revision = "894f8ed5849b15b810ae41e9590a0d05395bba27"
|
||||
|
||||
[[projects]]
|
||||
name = "golang.org/x/oauth2"
|
||||
@@ -1232,6 +1241,7 @@
|
||||
name = "gopkg.in/fsnotify.v1"
|
||||
packages = ["."]
|
||||
revision = "629574ca2a5df945712d3079857300b5e4da0236"
|
||||
source = "github.com/fsnotify/fsnotify"
|
||||
version = "v1.4.2"
|
||||
|
||||
[[projects]]
|
||||
@@ -1390,6 +1400,6 @@
|
||||
[solve-meta]
|
||||
analyzer-name = "dep"
|
||||
analyzer-version = 1
|
||||
inputs-digest = "7122deb7b2056ecfa444c5fb64437c1b0723afae528a331c4b032b841039383b"
|
||||
inputs-digest = "2211d5f1d7b7137b83976ba0a92441ef7a80c8a858eabb7d9a5102d7b3dfbe4f"
|
||||
solver-name = "gps-cdcl"
|
||||
solver-version = 1
|
||||
|
||||
19
Gopkg.toml
19
Gopkg.toml
@@ -68,7 +68,7 @@ ignored = ["github.com/sirupsen/logrus"]
|
||||
|
||||
[[constraint]]
|
||||
name = "github.com/containous/traefik-extra-service-fabric"
|
||||
version = "1.0.5"
|
||||
version = "1.0.6"
|
||||
|
||||
[[constraint]]
|
||||
name = "github.com/coreos/go-systemd"
|
||||
@@ -162,6 +162,7 @@ ignored = ["github.com/sirupsen/logrus"]
|
||||
|
||||
[[constraint]]
|
||||
name = "gopkg.in/fsnotify.v1"
|
||||
source = "github.com/fsnotify/fsnotify"
|
||||
version = "1.4.2"
|
||||
|
||||
[[constraint]]
|
||||
@@ -173,10 +174,6 @@ ignored = ["github.com/sirupsen/logrus"]
|
||||
revision = "6018b68f96b839edfbe3fb48668853f5dbad88a3"
|
||||
source = "github.com/ijc25/Gotty"
|
||||
|
||||
[[override]]
|
||||
name = "github.com/gorilla/websocket"
|
||||
revision = "a69d9f6de432e2c6b296a947d8a5ee88f68522cf"
|
||||
|
||||
[[override]]
|
||||
# always keep this override
|
||||
name = "github.com/mailgun/timetools"
|
||||
@@ -191,6 +188,18 @@ ignored = ["github.com/sirupsen/logrus"]
|
||||
name = "github.com/coreos/bbolt"
|
||||
revision = "32c383e75ce054674c53b5a07e55de85332aee14"
|
||||
|
||||
[[override]]
|
||||
branch = "master"
|
||||
name = "github.com/miekg/dns"
|
||||
|
||||
[[override]]
|
||||
name = "golang.org/x/crypto"
|
||||
revision = "b080dc9a8c480b08e698fb1219160d598526310f"
|
||||
|
||||
[[override]]
|
||||
name = "golang.org/x/net"
|
||||
revision = "894f8ed5849b15b810ae41e9590a0d05395bba27"
|
||||
|
||||
[prune]
|
||||
non-go = true
|
||||
go-tests = true
|
||||
|
||||
215
README.md
215
README.md
@@ -12,8 +12,9 @@
|
||||
[](https://twitter.com/intent/follow?screen_name=traefikproxy)
|
||||
|
||||
|
||||
Træfik (pronounced like _traffic_) is a modern HTTP reverse proxy and load balancer made to deploy microservices with ease.
|
||||
It supports several backends ([Docker](https://www.docker.com/), [Swarm mode](https://docs.docker.com/engine/swarm/), [Kubernetes](https://kubernetes.io), [Marathon](https://mesosphere.github.io/marathon/), [Consul](https://www.consul.io/), [Etcd](https://coreos.com/etcd/), [Rancher](https://rancher.com), [Amazon ECS](https://aws.amazon.com/ecs), and a lot more) to manage its configuration automatically and dynamically.
|
||||
Træfik is a modern HTTP reverse proxy and load balancer that makes deploying microservices easy.
|
||||
Træfik integrates with your existing infrastructure components ([Docker](https://www.docker.com/), [Swarm mode](https://docs.docker.com/engine/swarm/), [Kubernetes](https://kubernetes.io), [Marathon](https://mesosphere.github.io/marathon/), [Consul](https://www.consul.io/), [Etcd](https://coreos.com/etcd/), [Rancher](https://rancher.com), [Amazon ECS](https://aws.amazon.com/ecs), ...) and configures itself automatically and dynamically.
|
||||
Telling Træfik where your orchestrator is could be the _only_ configuration step you need to do.
|
||||
|
||||
---
|
||||
|
||||
@@ -36,60 +37,101 @@ It supports several backends ([Docker](https://www.docker.com/), [Swarm mode](ht
|
||||
|
||||
## Overview
|
||||
|
||||
Imagine that you have deployed a bunch of microservices on your infrastructure. You probably used a service registry (like etcd or consul) and/or an orchestrator (swarm, Mesos/Marathon) to manage all these services.
|
||||
If you want your users to access some of your microservices from the Internet, you will have to use a reverse proxy and configure it using virtual hosts or prefix paths:
|
||||
Imagine that you have deployed a bunch of microservices with the help of an orchestrator (like Swarm or Kubernetes) or a service registry (like etcd or consul).
|
||||
Now you want users to access these microservices, and you need a reverse proxy.
|
||||
|
||||
- domain `api.domain.com` will point the microservice `api` in your private network
|
||||
- path `domain.com/web` will point the microservice `web` in your private network
|
||||
- domain `backoffice.domain.com` will point the microservices `backoffice` in your private network, load-balancing between your multiple instances
|
||||
Traditional reverse-proxies require that you configure _each_ route that will connect paths and subdomains to _each_ microservice.
|
||||
In an environment where you add, remove, kill, upgrade, or scale your services _many_ times a day, the task of keeping the routes up to date becomes tedious.
|
||||
|
||||
Microservices are often deployed in dynamic environments where services are added, removed, killed, upgraded or scaled many times a day.
|
||||
**This is when Træfik can help you!**
|
||||
|
||||
Traditional reverse-proxies are not natively dynamic. You can't change their configuration and hot-reload easily.
|
||||
Træfik listens to your service registry/orchestrator API and instantly generates the routes so your microservices are connected to the outside world -- without further intervention from your part.
|
||||
|
||||
Here enters Træfik.
|
||||
**Run Træfik and let it do the work for you!**
|
||||
_(But if you'd rather configure some of your routes manually, Træfik supports that too!)_
|
||||
|
||||

|
||||
|
||||
Træfik can listen to your service registry/orchestrator API, and knows each time a microservice is added, removed, killed or upgraded, and can generate its configuration automatically.
|
||||
Routes to your services will be created instantly.
|
||||
|
||||
Run it and forget it!
|
||||
|
||||
|
||||
## Features
|
||||
|
||||
- [It's fast](https://docs.traefik.io/benchmarks)
|
||||
- No dependency hell, single binary made with go
|
||||
- [Tiny](https://microbadger.com/images/traefik) [official](https://hub.docker.com/r/_/traefik/) docker image
|
||||
- Rest API
|
||||
- Hot-reloading of configuration. No need to restart the process
|
||||
- Continuously updates its configuration (No restarts!)
|
||||
- Supports multiple load balancing algorithms
|
||||
- Provides HTTPS to your microservices by leveraging [Let's Encrypt](https://letsencrypt.org)
|
||||
- Circuit breakers, retry
|
||||
- Round Robin, rebalancer load-balancers
|
||||
- Metrics (Rest, Prometheus, Datadog, Statsd, InfluxDB)
|
||||
- Clean AngularJS Web UI
|
||||
- Websocket, HTTP/2, GRPC ready
|
||||
- Access Logs (JSON, CLF)
|
||||
- [Let's Encrypt](https://letsencrypt.org) support (Automatic HTTPS with renewal)
|
||||
- [Proxy Protocol](https://www.haproxy.org/download/1.8/doc/proxy-protocol.txt) support
|
||||
- High Availability with cluster mode (beta)
|
||||
- See the magic through its clean web UI
|
||||
- Websocket, HTTP/2, GRPC ready
|
||||
- Provides metrics (Rest, Prometheus, Datadog, Statsd, InfluxDB)
|
||||
- Keeps access logs (JSON, CLF)
|
||||
- [Fast](https://docs.traefik.io/benchmarks) ... which is nice
|
||||
- Exposes a Rest API
|
||||
- Packaged as a single binary file (made with :heart: with go) and available as a [tiny](https://microbadger.com/images/traefik) [official](https://hub.docker.com/r/_/traefik/) docker image
|
||||
|
||||
## Supported backends
|
||||
|
||||
- [Docker](https://www.docker.com/) / [Swarm mode](https://docs.docker.com/engine/swarm/)
|
||||
- [Kubernetes](https://kubernetes.io)
|
||||
- [Mesos](https://github.com/apache/mesos) / [Marathon](https://mesosphere.github.io/marathon/)
|
||||
- [Rancher](https://rancher.com) (API, Metadata)
|
||||
- [Consul](https://www.consul.io/) / [Etcd](https://coreos.com/etcd/) / [Zookeeper](https://zookeeper.apache.org) / [BoltDB](https://github.com/boltdb/bolt)
|
||||
- [Eureka](https://github.com/Netflix/eureka)
|
||||
- [Amazon ECS](https://aws.amazon.com/ecs)
|
||||
- [Amazon DynamoDB](https://aws.amazon.com/dynamodb)
|
||||
- File
|
||||
- Rest API
|
||||
## Supported Backends
|
||||
|
||||
- [Docker](docs/configuration/backends/docker/) / [Swarm mode](docs/configuration/backends/docker/#docker-swarm-mode)
|
||||
- [Kubernetes](docs/configuration/backends/kubernetes/)
|
||||
- [Mesos](docs/configuration/backends/mesos/) / [Marathon](docs/configuration/backends/marathon/)
|
||||
- [Rancher](docs/configuration/backends/rancher/) (API, Metadata)
|
||||
- [Azure Service Fabric](docs/configuration/backends/servicefabric/)
|
||||
- [Consul Catalog](docs/configuration/backends/consulcatalog/)
|
||||
- [Consul](docs/configuration/backends/consul/) / [Etcd](docs/configuration/backends/etcd/) / [Zookeeper](docs/configuration/backends/zookeeper/) / [BoltDB](docs/configuration/backends/boltdb/)
|
||||
- [Eureka](docs/configuration/backends/eureka/)
|
||||
- [Amazon ECS](docs/configuration/backends/ecs/)
|
||||
- [Amazon DynamoDB](docs/configuration/backends/dynamodb/)
|
||||
- [File](docs/configuration/backends/file/)
|
||||
- [Rest](docs/configuration/backends/rest/)
|
||||
|
||||
## Quickstart
|
||||
|
||||
You can have a quick look at Træfik in this [Katacoda tutorial](https://www.katacoda.com/courses/traefik/deploy-load-balancer) that shows how to load balance requests between multiple Docker containers. If you are looking for a more comprehensive and real use-case example, you can also check [Play-With-Docker](http://training.play-with-docker.com/traefik-load-balancing/) to see how to load balance between multiple nodes.
|
||||
To get your hands on Træfik, you can use the [5-Minute Quickstart](http://docs.traefik.io/#the-trfik-quickstart-using-docker) in our documentation (you will need Docker).
|
||||
|
||||
Alternatively, if you don't want to install anything on your computer, you can try Træfik online in this great [Katacoda tutorial](https://www.katacoda.com/courses/traefik/deploy-load-balancer) that shows how to load balance requests between multiple Docker containers.
|
||||
|
||||
If you are looking for a more comprehensive and real use-case example, you can also check [Play-With-Docker](http://training.play-with-docker.com/traefik-load-balancing/) to see how to load balance between multiple nodes.
|
||||
|
||||
## Web UI
|
||||
|
||||
You can access the simple HTML frontend of Træfik.
|
||||
|
||||

|
||||

|
||||
|
||||
## Documentation
|
||||
|
||||
You can find the complete documentation at [https://docs.traefik.io](https://docs.traefik.io).
|
||||
A collection of contributions around Træfik can be found at [https://awesome.traefik.io](https://awesome.traefik.io).
|
||||
|
||||
## Support
|
||||
|
||||
To get community support, you can:
|
||||
- join the Træfik community Slack channel: [](https://traefik.herokuapp.com)
|
||||
- use [Stack Overflow](https://stackoverflow.com/questions/tagged/traefik) (using the `traefik` tag)
|
||||
|
||||
If you need commercial support, please contact [Containo.us](https://containo.us) by mail: <mailto:support@containo.us>.
|
||||
|
||||
## Download
|
||||
|
||||
- Grab the latest binary from the [releases](https://github.com/containous/traefik/releases) page and run it with the [sample configuration file](https://raw.githubusercontent.com/containous/traefik/master/traefik.sample.toml):
|
||||
|
||||
```shell
|
||||
./traefik --configFile=traefik.toml
|
||||
```
|
||||
|
||||
- Or use the official tiny Docker image and run it with the [sample configuration file](https://raw.githubusercontent.com/containous/traefik/master/traefik.sample.toml):
|
||||
|
||||
```shell
|
||||
docker run -d -p 8080:8080 -p 80:80 -v $PWD/traefik.toml:/etc/traefik/traefik.toml traefik
|
||||
```
|
||||
|
||||
- Or get the sources:
|
||||
|
||||
```shell
|
||||
git clone https://github.com/containous/traefik
|
||||
```
|
||||
|
||||
## Introductory Videos
|
||||
|
||||
Here is a talk given by [Emile Vauge](https://github.com/emilevauge) at [GopherCon 2017](https://gophercon.com/).
|
||||
You will learn Træfik basics in less than 10 minutes.
|
||||
@@ -101,81 +143,26 @@ You will learn fundamental Træfik features and see some demos with Kubernetes.
|
||||
|
||||
[](https://www.youtube.com/watch?v=aFtpIShV60I)
|
||||
|
||||
|
||||
## Web UI
|
||||
|
||||
You can access the simple HTML frontend of Træfik.
|
||||
|
||||

|
||||

|
||||
|
||||
|
||||
## Test it
|
||||
|
||||
- The simple way: grab the latest binary from the [releases](https://github.com/containous/traefik/releases) page and just run it with the [sample configuration file](https://raw.githubusercontent.com/containous/traefik/master/traefik.sample.toml):
|
||||
|
||||
```shell
|
||||
./traefik --configFile=traefik.toml
|
||||
```
|
||||
|
||||
- Use the tiny Docker image and just run it with the [sample configuration file](https://raw.githubusercontent.com/containous/traefik/master/traefik.sample.toml):
|
||||
|
||||
```shell
|
||||
docker run -d -p 8080:8080 -p 80:80 -v $PWD/traefik.toml:/etc/traefik/traefik.toml traefik
|
||||
```
|
||||
|
||||
- From sources:
|
||||
|
||||
```shell
|
||||
git clone https://github.com/containous/traefik
|
||||
```
|
||||
|
||||
|
||||
## Documentation
|
||||
|
||||
You can find the complete documentation at [https://docs.traefik.io](https://docs.traefik.io).
|
||||
A collection of contributions around Træfik can be found at [https://awesome.traefik.io](https://awesome.traefik.io).
|
||||
|
||||
|
||||
## Support
|
||||
|
||||
To get basic support, you can:
|
||||
- join the Træfik community Slack channel: [](https://traefik.herokuapp.com)
|
||||
- use [Stack Overflow](https://stackoverflow.com/questions/tagged/traefik) (using the `traefik` tag)
|
||||
|
||||
If you prefer commercial support, please contact [containo.us](https://containo.us) by mail: <mailto:support@containo.us>.
|
||||
|
||||
|
||||
## Release cycle
|
||||
|
||||
- Release: We try to release a new version every 2 months
|
||||
- i.e.: 1.3.0, 1.4.0, 1.5.0
|
||||
- Release candidate: we do RC (1.**x**.0-rc**y**) before the final release (1.**x**.0)
|
||||
- i.e.: 1.1.0-rc1 -> 1.1.0-rc2 -> 1.1.0-rc3 -> 1.1.0-rc4 -> 1.1.0
|
||||
- Bug-fixes: For each version we release bug fixes
|
||||
- i.e.: 1.1.1, 1.1.2, 1.1.3
|
||||
- those versions contain only bug-fixes
|
||||
- no additional features are delivered in those versions
|
||||
- Each version is supported until the next one is released
|
||||
- i.e.: 1.1.x will be supported until 1.2.0 is out
|
||||
- We use [Semantic Versioning](http://semver.org/)
|
||||
|
||||
|
||||
## Contributing
|
||||
|
||||
Please refer to [contributing documentation](CONTRIBUTING.md).
|
||||
|
||||
|
||||
### Code of Conduct
|
||||
|
||||
Please note that this project is released with a [Contributor Code of Conduct](CODE_OF_CONDUCT.md).
|
||||
By participating in this project you agree to abide by its terms.
|
||||
|
||||
|
||||
## Maintainers
|
||||
|
||||
[Information about process and maintainers](MAINTAINER.md)
|
||||
|
||||
## Contributing
|
||||
|
||||
If you'd like to contribute to the project, refer to the [contributing documentation](CONTRIBUTING.md).
|
||||
|
||||
Please note that this project is released with a [Contributor Code of Conduct](CODE_OF_CONDUCT.md).
|
||||
By participating in this project, you agree to abide by its terms.
|
||||
|
||||
## Release Cycle
|
||||
|
||||
- We release a new version (e.g. 1.1.0, 1.2.0, 1.3.0) every other month.
|
||||
- Release Candidates are available before the release (e.g. 1.1.0-rc1, 1.1.0-rc2, 1.1.0-rc3, 1.1.0-rc4, before 1.1.0)
|
||||
- Bug-fixes (e.g. 1.1.1, 1.1.2, 1.2.1, 1.2.3) are released as needed (no additional features are delivered in those versions, bug-fixes only)
|
||||
|
||||
Each version is supported until the next one is released (e.g. 1.1.x will be supported until 1.2.0 is out)
|
||||
|
||||
We use [Semantic Versioning](http://semver.org/)
|
||||
|
||||
## Plumbing
|
||||
|
||||
@@ -184,11 +171,11 @@ By participating in this project you agree to abide by its terms.
|
||||
- [Negroni](https://github.com/urfave/negroni): web middlewares made simple
|
||||
- [Lego](https://github.com/xenolf/lego): the best [Let's Encrypt](https://letsencrypt.org) library in go
|
||||
|
||||
|
||||
## Credits
|
||||
|
||||
Kudos to [Peka](http://peka.byethost11.com/photoblog/) for his awesome work on the logo .
|
||||
Traefik's logo licensed under the Creative Commons 3.0 Attributions license.
|
||||
|
||||
Traefik's logo is licensed under the Creative Commons 3.0 Attributions license.
|
||||
|
||||
Traefik's logo was inspired by the gopher stickers made by Takuya Ueda (https://twitter.com/tenntenn).
|
||||
The original Go gopher was designed by Renee French (http://reneefrench.blogspot.com/).
|
||||
The original Go gopher was designed by Renee French (http://reneefrench.blogspot.com/).
|
||||
10
acme/acme.go
10
acme/acme.go
@@ -335,7 +335,7 @@ func (a *ACME) CreateLocalConfig(tlsConfig *tls.Config, certs *safe.Safe, checkO
|
||||
a.client, err = a.buildACMEClient(account)
|
||||
if err != nil {
|
||||
log.Errorf(`Failed to build ACME client: %s
|
||||
Let's Encrypt functionality will be limited until traefik is restarted.`, err)
|
||||
Let's Encrypt functionality will be limited until Traefik is restarted.`, err)
|
||||
return nil
|
||||
}
|
||||
|
||||
@@ -344,7 +344,9 @@ Let's Encrypt functionality will be limited until traefik is restarted.`, err)
|
||||
log.Info("Register...")
|
||||
reg, err := a.client.Register()
|
||||
if err != nil {
|
||||
return err
|
||||
log.Errorf(`Failed to register user: %s
|
||||
Let's Encrypt functionality will be limited until Traefik is restarted.`, err)
|
||||
return nil
|
||||
}
|
||||
account.Registration = reg
|
||||
}
|
||||
@@ -357,7 +359,9 @@ Let's Encrypt functionality will be limited until traefik is restarted.`, err)
|
||||
// Let's Encrypt Subscriber Agreement renew ?
|
||||
reg, err := a.client.QueryRegistration()
|
||||
if err != nil {
|
||||
return err
|
||||
log.Errorf(`Failed to renew subscriber agreement: %s
|
||||
Let's Encrypt functionality will be limited until Traefik is restarted.`, err)
|
||||
return nil
|
||||
}
|
||||
account.Registration = reg
|
||||
err = a.client.AgreeToTOS()
|
||||
|
||||
@@ -575,7 +575,13 @@ var _templatesKvTmpl = []byte(`{{$frontends := List .Prefix "/frontends/" }}
|
||||
{{$entryPoints := GetList . "/entrypoints"}}
|
||||
[frontends."{{$frontend}}"]
|
||||
backend = "{{Get "" . "/backend"}}"
|
||||
{{ $passHostHeader := Get "" . "/passhostheader"}}
|
||||
{{if $passHostHeader}}
|
||||
passHostHeader = {{ $passHostHeader }}
|
||||
{{else}}
|
||||
# keep for compatibility reason
|
||||
passHostHeader = {{Get "true" . "/passHostHeader"}}
|
||||
{{end}}
|
||||
priority = {{Get "0" . "/priority"}}
|
||||
entryPoints = [{{range $entryPoints}}
|
||||
"{{.}}",
|
||||
|
||||
@@ -1,4 +1,4 @@
|
||||
FROM alpine
|
||||
FROM alpine:3.14
|
||||
|
||||
ENV PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin:/root/.local/bin
|
||||
|
||||
|
||||
@@ -234,27 +234,26 @@ The following rules are both `Matchers` and `Modifiers`, so the `Matcher` portio
|
||||
#### Priorities
|
||||
|
||||
By default, routes will be sorted (in descending order) using rules length (to avoid path overlap):
|
||||
`PathPrefix:/12345` will be matched before `PathPrefix:/1234` that will be matched before `PathPrefix:/1`.
|
||||
`PathPrefix:/foo;Host:foo.com` (length == 28) will be matched before `PathPrefixStrip:/foobar` (length == 23) will be matched before `PathPrefix:/foo,/bar` (length == 20).
|
||||
|
||||
You can customize priority by frontend. The priority value is added to the rule length during sorting:
|
||||
You can customize priority by frontend. The priority value override the rule length during sorting:
|
||||
|
||||
```toml
|
||||
[frontends]
|
||||
[frontends.frontend1]
|
||||
backend = "backend1"
|
||||
priority = 10
|
||||
priority = 20
|
||||
passHostHeader = true
|
||||
[frontends.frontend1.routes.test_1]
|
||||
rule = "PathPrefix:/to"
|
||||
[frontends.frontend2]
|
||||
priority = 5
|
||||
backend = "backend2"
|
||||
passHostHeader = true
|
||||
[frontends.frontend2.routes.test_1]
|
||||
rule = "PathPrefix:/toto"
|
||||
```
|
||||
|
||||
Here, `frontend1` will be matched before `frontend2` (`(3 + 10 == 13) > (4 + 5 == 9)`).
|
||||
Here, `frontend1` will be matched before `frontend2` (`20 > 16`).
|
||||
|
||||
#### Custom headers
|
||||
|
||||
|
||||
@@ -243,7 +243,7 @@ entryPoint = "https"
|
||||
Specify the entryPoint to use during the challenges.
|
||||
|
||||
```toml
|
||||
defaultEntryPoints = ["http", "http"]
|
||||
defaultEntryPoints = ["http", "https"]
|
||||
|
||||
[entryPoints]
|
||||
[entryPoints.http]
|
||||
@@ -397,11 +397,9 @@ Each domain & SANs will lead to a certificate request.
|
||||
### `dnsProvider` (Deprecated)
|
||||
|
||||
!!! danger "DEPRECATED"
|
||||
This option is deprecated.
|
||||
Please refer to [DNS challenge provider section](/configuration/acme/#provider)
|
||||
This option is deprecated, use [dnsChallenge.provider](/configuration/acme/#acmednschallenge) instead.
|
||||
|
||||
### `delayDontCheckDNS` (Deprecated)
|
||||
|
||||
!!! danger "DEPRECATED"
|
||||
This option is deprecated.
|
||||
Please refer to [DNS challenge delayBeforeCheck section](/configuration/acme/#delaybeforecheck)
|
||||
This option is deprecated, use [dnsChallenge.delayBeforeCheck](/configuration/acme/#acmednschallenge) instead.
|
||||
|
||||
@@ -11,14 +11,14 @@
|
||||
# Default: "traefik"
|
||||
#
|
||||
entryPoint = "traefik"
|
||||
|
||||
|
||||
# Enabled Dashboard
|
||||
#
|
||||
# Optional
|
||||
# Default: true
|
||||
#
|
||||
dashboard = true
|
||||
|
||||
|
||||
# Enable debug mode.
|
||||
# This will install HTTP handlers to expose Go expvars under /debug/vars and
|
||||
# pprof profiling data under /debug/pprof.
|
||||
@@ -43,7 +43,7 @@ For more customization, see [entry points](/configuration/entrypoints/) document
|
||||
| Path | Method | Description |
|
||||
|-----------------------------------------------------------------|------------------|-------------------------------------------|
|
||||
| `/` | `GET` | Provides a simple HTML frontend of Træfik |
|
||||
| `/health` | `GET` | json health metrics |
|
||||
| `/health` | `GET` | JSON health metrics |
|
||||
| `/api` | `GET` | Configuration for all providers |
|
||||
| `/api/providers` | `GET` | Providers |
|
||||
| `/api/providers/{provider}` | `GET`, `PUT` | Get or update provider (1) |
|
||||
@@ -62,7 +62,102 @@ For more customization, see [entry points](/configuration/entrypoints/) document
|
||||
For compatibility reason, when you activate the rest provider, you can use `web` or `rest` as `provider` value.
|
||||
But be careful, in the configuration for all providers the key is still `web`.
|
||||
|
||||
### Provider configurations
|
||||
### Address / Port
|
||||
|
||||
You can define a custom address/port like this:
|
||||
|
||||
```toml
|
||||
defaultEntryPoints = ["http"]
|
||||
|
||||
[entryPoints]
|
||||
[entryPoints.http]
|
||||
address = ":80"
|
||||
|
||||
[entryPoints.foo]
|
||||
address = ":8082"
|
||||
|
||||
[entryPoints.bar]
|
||||
address = ":8083"
|
||||
|
||||
[ping]
|
||||
entryPoint = "foo"
|
||||
|
||||
[api]
|
||||
entryPoint = "bar"
|
||||
```
|
||||
|
||||
In the above example, you would access a regular path, administration panel, and health-check as follows:
|
||||
|
||||
* Regular path: `http://hostname:80/path`
|
||||
* Admin Panel: `http://hostname:8083/`
|
||||
* Ping URL: `http://hostname:8082/ping`
|
||||
|
||||
In the above example, it is _very_ important to create a named dedicated entry point, and do **not** include it in `defaultEntryPoints`.
|
||||
Otherwise, you are likely to expose _all_ services via that entry point.
|
||||
|
||||
### Custom Path
|
||||
|
||||
You can define a custom path like this:
|
||||
|
||||
```toml
|
||||
defaultEntryPoints = ["http"]
|
||||
|
||||
[entryPoints]
|
||||
[entryPoints.http]
|
||||
address = ":80"
|
||||
|
||||
[entryPoints.foo]
|
||||
address = ":8080"
|
||||
|
||||
[entryPoints.bar]
|
||||
address = ":8081"
|
||||
|
||||
# Activate API and Dashboard
|
||||
[api]
|
||||
entryPoint = "bar"
|
||||
dashboard = true
|
||||
|
||||
[file]
|
||||
[backends]
|
||||
[backends.backend1]
|
||||
[backends.backend1.servers.server1]
|
||||
url = "http://127.0.0.1:8081"
|
||||
|
||||
[frontends]
|
||||
[frontends.frontend1]
|
||||
entryPoints = ["foo"]
|
||||
backend = "backend1"
|
||||
[frontends.frontend1.routes.test_1]
|
||||
rule = "PathPrefixStrip:/yourprefix;PathPrefix:/yourprefix"
|
||||
```
|
||||
|
||||
### Authentication
|
||||
|
||||
You can define the authentication like this:
|
||||
|
||||
```toml
|
||||
defaultEntryPoints = ["http"]
|
||||
|
||||
[entryPoints]
|
||||
[entryPoints.http]
|
||||
address = ":80"
|
||||
|
||||
[entryPoints.foo]
|
||||
address=":8080"
|
||||
[entryPoints.foo.auth]
|
||||
[entryPoints.foo.auth.basic]
|
||||
users = [
|
||||
"test:$apr1$H6uskkkW$IgXLP6ewTrSuBkTrqE8wj/",
|
||||
"test2:$apr1$d9hr9HBB$4HxwgUir3HP4EsggP/QNo0",
|
||||
]
|
||||
|
||||
[api]
|
||||
entrypoint="foo"
|
||||
```
|
||||
|
||||
For more information, see [entry points](/configuration/entrypoints/) .
|
||||
|
||||
### Provider call example
|
||||
|
||||
```shell
|
||||
curl -s "http://localhost:8080/api" | jq .
|
||||
|
||||
@@ -145,10 +145,10 @@ To enable constraints see [backend-specific constraints section](/configuration/
|
||||
|
||||
## Labels: overriding default behaviour
|
||||
|
||||
!!! note
|
||||
If you use a compose file, labels should be defined in the `deploy` part of your service.
|
||||
#### Using Docker with Swarm Mode
|
||||
|
||||
This behavior is only enabled for docker-compose version 3+ ([Compose file reference](https://docs.docker.com/compose/compose-file/#labels-1)).
|
||||
If you use a compose file with the Swarm mode, labels should be defined in the `deploy` part of your service.
|
||||
This behavior is only enabled for docker-compose version 3+ ([Compose file reference](https://docs.docker.com/compose/compose-file/#labels-1)).
|
||||
|
||||
```yaml
|
||||
version: "3"
|
||||
@@ -159,37 +159,52 @@ services:
|
||||
traefik.docker.network: traefik
|
||||
```
|
||||
|
||||
#### Using Docker Compose
|
||||
|
||||
If you are intending to use only Docker Compose commands (e.g. `docker-compose up --scale whoami=2 -d`), labels should be under your service, otherwise they will be ignored.
|
||||
|
||||
```yaml
|
||||
version: "3"
|
||||
services:
|
||||
whoami:
|
||||
labels:
|
||||
traefik.docker.network: traefik
|
||||
```
|
||||
|
||||
### On Containers
|
||||
|
||||
Labels can be used on containers to override default behaviour.
|
||||
|
||||
| Label | Description |
|
||||
|------------------------------------------------------------|---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|
|
||||
| `traefik.backend=foo` | Give the name `foo` to the generated backend for this container. |
|
||||
| `traefik.backend.maxconn.amount=10` | Set a maximum number of connections to the backend. Must be used in conjunction with the below label to take effect. |
|
||||
| `traefik.backend.maxconn.extractorfunc=client.ip` | Set the function to be used against the request to determine what to limit maximum connections to the backend by. Must be used in conjunction with the above label to take effect. |
|
||||
| `traefik.backend.loadbalancer.method=drr` | Override the default `wrr` load balancer algorithm |
|
||||
| `traefik.backend.loadbalancer.stickiness=true` | Enable backend sticky sessions |
|
||||
| `traefik.backend.loadbalancer.stickiness.cookieName=NAME` | Manually set the cookie name for sticky sessions |
|
||||
| `traefik.backend.loadbalancer.sticky=true` | Enable backend sticky sessions (DEPRECATED) |
|
||||
| `traefik.backend.loadbalancer.swarm=true` | Use Swarm's inbuilt load balancer (only relevant under Swarm Mode). |
|
||||
| `traefik.backend.circuitbreaker.expression=EXPR` | Create a [circuit breaker](/basics/#backends) to be used against the backend |
|
||||
| `traefik.port=80` | Register this port. Useful when the container exposes multiples ports. |
|
||||
| `traefik.protocol=https` | Override the default `http` protocol |
|
||||
| `traefik.weight=10` | Assign this weight to the container |
|
||||
| `traefik.enable=false` | Disable this container in Træfik |
|
||||
| `traefik.frontend.rule=EXPR` | Override the default frontend rule. Default: `Host:{containerName}.{domain}` or `Host:{service}.{project_name}.{domain}` if you are using `docker-compose`. |
|
||||
| `traefik.frontend.passHostHeader=true` | Forward client `Host` header to the backend. |
|
||||
| `traefik.frontend.priority=10` | Override default frontend priority |
|
||||
| `traefik.frontend.entryPoints=http,https` | Assign this frontend to entry points `http` and `https`. Overrides `defaultEntryPoints` |
|
||||
| `traefik.frontend.auth.basic=EXPR` | Sets basic authentication for that frontend in CSV format: `User:Hash,User:Hash` |
|
||||
| `traefik.frontend.whitelistSourceRange:RANGE` | List of IP-Ranges which are allowed to access. An unset or empty list allows all Source-IPs to access. If one of the Net-Specifications are invalid, the whole list is invalid and allows all Source-IPs to access. |
|
||||
| `traefik.docker.network` | Set the docker network to use for connections to this container. If a container is linked to several networks, be sure to set the proper network name (you can check with `docker inspect <container_id>`) otherwise it will randomly pick one (depending on how docker is returning them). For instance when deploying docker `stack` from compose files, the compose defined networks will be prefixed with the `stack` name. |
|
||||
| `traefik.frontend.redirect.entryPoint=https` | Enables Redirect to another entryPoint for that frontend (e.g. HTTPS) |
|
||||
| `traefik.frontend.redirect.regex=^http://localhost/(.*)` | Redirect to another URL for that frontend. Must be set with `traefik.frontend.redirect.replacement`. |
|
||||
| `traefik.frontend.redirect.replacement=http://mydomain/$1` | Redirect to another URL for that frontend. Must be set with `traefik.frontend.redirect.regex`. |
|
||||
|
||||
| Label | Description |
|
||||
|------------------------------------------------------------|---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|
|
||||
| `traefik.backend=foo` | Give the name `foo` to the generated backend for this container. |
|
||||
| `traefik.backend.maxconn.amount=10` | Set a maximum number of connections to the backend. Must be used in conjunction with the below label to take effect. |
|
||||
| `traefik.backend.maxconn.extractorfunc=client.ip` | Set the function to be used against the request to determine what to limit maximum connections to the backend by. Must be used in conjunction with the above label to take effect. |
|
||||
| `traefik.backend.loadbalancer.method=drr` | Override the default `wrr` load balancer algorithm |
|
||||
| `traefik.backend.loadbalancer.stickiness=true` | Enable backend sticky sessions |
|
||||
| `traefik.backend.loadbalancer.stickiness.cookieName=NAME` | Manually set the cookie name for sticky sessions |
|
||||
| `traefik.backend.loadbalancer.sticky=true` | Enable backend sticky sessions (DEPRECATED) |
|
||||
| `traefik.backend.loadbalancer.swarm=true` | Use Swarm's inbuilt load balancer (only relevant under Swarm Mode). |
|
||||
| `traefik.backend.circuitbreaker.expression=EXPR` | Create a [circuit breaker](/basics/#backends) to be used against the backend |
|
||||
| `traefik.port=80` | Register this port. Useful when the container exposes multiples ports. |
|
||||
| `traefik.protocol=https` | Override the default `http` protocol |
|
||||
| `traefik.weight=10` | Assign this weight to the container |
|
||||
| `traefik.enable=false` | Disable this container in Træfik |
|
||||
| `traefik.frontend.rule=EXPR` | Override the default frontend rule. Default: `Host:{containerName}.{domain}` or `Host:{service}.{project_name}.{domain}` if you are using `docker-compose`. |
|
||||
| `traefik.frontend.passHostHeader=true` | Forward client `Host` header to the backend. |
|
||||
| `traefik.frontend.priority=10` | Override default frontend priority |
|
||||
| `traefik.frontend.entryPoints=http,https` | Assign this frontend to entry points `http` and `https`. Overrides `defaultEntryPoints` |
|
||||
| `traefik.frontend.auth.basic=EXPR` | Sets basic authentication for that frontend in CSV format: `User:Hash,User:Hash` |
|
||||
| `traefik.frontend.whitelistSourceRange:RANGE` | List of IP-Ranges which are allowed to access. An unset or empty list allows all Source-IPs to access. If one of the Net-Specifications are invalid, the whole list is invalid and allows all Source-IPs to access. |
|
||||
| `traefik.docker.network` | Set the docker network to use for connections to this container. [1] |
|
||||
| `traefik.frontend.redirect.entryPoint=https` | Enables Redirect to another entryPoint for that frontend (e.g. HTTPS) |
|
||||
| `traefik.frontend.redirect.regex=^http://localhost/(.*)` | Redirect to another URL for that frontend. Must be set with `traefik.frontend.redirect.replacement`. |
|
||||
| `traefik.frontend.redirect.replacement=http://mydomain/$1` | Redirect to another URL for that frontend. Must be set with `traefik.frontend.redirect.regex`. |
|
||||
|
||||
[1] `traefik.docker.network`:
|
||||
If a container is linked to several networks, be sure to set the proper network name (you can check with `docker inspect <container_id>`) otherwise it will randomly pick one (depending on how docker is returning them).
|
||||
For instance when deploying docker `stack` from compose files, the compose defined networks will be prefixed with the `stack` name.
|
||||
Or if your service references external network use it's name instead.
|
||||
|
||||
#### Security Headers
|
||||
|
||||
|
||||
@@ -5,6 +5,8 @@ Træfik can be configured with a file.
|
||||
## Reference
|
||||
|
||||
```toml
|
||||
[file]
|
||||
|
||||
# Backends
|
||||
[backends]
|
||||
|
||||
|
||||
@@ -94,6 +94,17 @@ A label selector can be defined to filter on specific Ingress objects only.
|
||||
|
||||
See [label-selectors](https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/#label-selectors) for details.
|
||||
|
||||
### TLS communication between Traefik and backend pods
|
||||
|
||||
Traefik automatically requests endpoint information based on the service provided in the ingress spec.
|
||||
Although traefik will connect directly to the endpoints (pods), it still checks the service port to see if TLS communication is required.
|
||||
If the service port defined in the ingress spec is 443, then the backend communication protocol is assumed to be TLS, and will connect via TLS automatically.
|
||||
|
||||
!!! note
|
||||
Please note that by enabling TLS communication between traefik and your pods, you will have to have trusted certificates that have the proper trust chain and IP subject name.
|
||||
If this is not an option, you may need to skip TLS certificate verification.
|
||||
See the [InsecureSkipVerify](/configuration/commons/#main-section) setting for more details.
|
||||
|
||||
## Annotations
|
||||
|
||||
### General annotations
|
||||
|
||||
@@ -29,9 +29,10 @@ Træfik can be configured:
|
||||
|
||||
|
||||
```shell
|
||||
curl -XPUT @file "http://localhost:8080/api"
|
||||
curl -XPUT @file "http://localhost:8080/api/providers/rest"
|
||||
```
|
||||
with `@file`
|
||||
|
||||
with `@file`:
|
||||
```json
|
||||
{
|
||||
"frontends": {
|
||||
@@ -88,4 +89,4 @@ with `@file`
|
||||
}
|
||||
}
|
||||
}
|
||||
```
|
||||
```
|
||||
|
||||
@@ -1,33 +1,33 @@
|
||||
# Service Fabric Backend
|
||||
# Azure Service Fabric Backend
|
||||
|
||||
Træfik can be configured to use Service Fabric as a backend configuration.
|
||||
Træfik can be configured to use Azure Service Fabric as a backend configuration.
|
||||
|
||||
See [this repository for an example deployment package and further documentation.](https://aka.ms/traefikonsf)
|
||||
|
||||
## Service Fabric
|
||||
## Azure Service Fabric
|
||||
|
||||
```toml
|
||||
################################################################
|
||||
# Service Fabric provider
|
||||
# Azure Service Fabric provider
|
||||
################################################################
|
||||
|
||||
# Enable Service Fabric configuration backend
|
||||
# Enable Azure Service Fabric configuration backend
|
||||
[serviceFabric]
|
||||
|
||||
# Service Fabric Management Endpoint
|
||||
# Azure Service Fabric Management Endpoint
|
||||
#
|
||||
# Required
|
||||
#
|
||||
clusterManagementUrl = "https://localhost:19080"
|
||||
|
||||
# Service Fabric Management Endpoint API Version
|
||||
# Azure Service Fabric Management Endpoint API Version
|
||||
#
|
||||
# Required
|
||||
# Default: "3.0"
|
||||
#
|
||||
apiVersion = "3.0"
|
||||
|
||||
# Service Fabric Polling Interval (in seconds)
|
||||
# Azure Service Fabric Polling Interval (in seconds)
|
||||
#
|
||||
# Required
|
||||
# Default: 10
|
||||
|
||||
@@ -386,41 +386,6 @@ curl -s "http://localhost:8080/api" | jq .
|
||||
|
||||
### Deprecation compatibility
|
||||
|
||||
#### Path
|
||||
|
||||
As the web provider is deprecated, you can handle the `Path` option like this:
|
||||
|
||||
```toml
|
||||
defaultEntryPoints = ["http"]
|
||||
|
||||
[entryPoints]
|
||||
[entryPoints.http]
|
||||
address = ":80"
|
||||
|
||||
[entryPoints.dashboard]
|
||||
address = ":8080"
|
||||
|
||||
[entryPoints.api]
|
||||
address = ":8081"
|
||||
|
||||
# Activate API and Dashboard
|
||||
[api]
|
||||
entryPoint = "api"
|
||||
|
||||
[file]
|
||||
[backends]
|
||||
[backends.backend1]
|
||||
[backends.backend1.servers.server1]
|
||||
url = "http://127.0.0.1:8081"
|
||||
|
||||
[frontends]
|
||||
[frontends.frontend1]
|
||||
entryPoints = ["dashboard"]
|
||||
backend = "backend1"
|
||||
[frontends.frontend1.routes.test_1]
|
||||
rule = "PathPrefixStrip:/yourprefix;PathPrefix:/yourprefix"
|
||||
```
|
||||
|
||||
#### Address
|
||||
|
||||
As the web provider is deprecated, you can handle the `Address` option like this:
|
||||
@@ -432,28 +397,64 @@ defaultEntryPoints = ["http"]
|
||||
[entryPoints.http]
|
||||
address = ":80"
|
||||
|
||||
[entryPoints.ping]
|
||||
[entryPoints.foo]
|
||||
address = ":8082"
|
||||
|
||||
[entryPoints.api]
|
||||
[entryPoints.bar]
|
||||
address = ":8083"
|
||||
|
||||
[ping]
|
||||
entryPoint = "ping"
|
||||
entryPoint = "foo"
|
||||
|
||||
[api]
|
||||
entryPoint = "api"
|
||||
entryPoint = "bar"
|
||||
```
|
||||
|
||||
In the above example, you would access a regular path, administration panel, and health-check as follows:
|
||||
|
||||
* Regular path: `http://hostname:80/foo`
|
||||
* Regular path: `http://hostname:80/path`
|
||||
* Admin Panel: `http://hostname:8083/`
|
||||
* Ping URL: `http://hostname:8082/ping`
|
||||
|
||||
In the above example, it is _very_ important to create a named dedicated entry point, and do **not** include it in `defaultEntryPoints`.
|
||||
Otherwise, you are likely to expose _all_ services via that entry point.
|
||||
|
||||
#### Path
|
||||
|
||||
As the web provider is deprecated, you can handle the `Path` option like this:
|
||||
|
||||
```toml
|
||||
defaultEntryPoints = ["http"]
|
||||
|
||||
[entryPoints]
|
||||
[entryPoints.http]
|
||||
address = ":80"
|
||||
|
||||
[entryPoints.foo]
|
||||
address = ":8080"
|
||||
|
||||
[entryPoints.bar]
|
||||
address = ":8081"
|
||||
|
||||
# Activate API and Dashboard
|
||||
[api]
|
||||
entryPoint = "bar"
|
||||
dashboard = true
|
||||
|
||||
[file]
|
||||
[backends]
|
||||
[backends.backend1]
|
||||
[backends.backend1.servers.server1]
|
||||
url = "http://127.0.0.1:8081"
|
||||
|
||||
[frontends]
|
||||
[frontends.frontend1]
|
||||
entryPoints = ["foo"]
|
||||
backend = "backend1"
|
||||
[frontends.frontend1.routes.test_1]
|
||||
rule = "PathPrefixStrip:/yourprefix;PathPrefix:/yourprefix"
|
||||
```
|
||||
|
||||
#### Authentication
|
||||
|
||||
As the web provider is deprecated, you can handle the `auth` option like this:
|
||||
@@ -465,17 +466,17 @@ defaultEntryPoints = ["http"]
|
||||
[entryPoints.http]
|
||||
address = ":80"
|
||||
|
||||
[entryPoints.api]
|
||||
[entryPoints.foo]
|
||||
address=":8080"
|
||||
[entryPoints.api.auth]
|
||||
[entryPoints.api.auth.basic]
|
||||
[entryPoints.foo.auth]
|
||||
[entryPoints.foo.auth.basic]
|
||||
users = [
|
||||
"test:$apr1$H6uskkkW$IgXLP6ewTrSuBkTrqE8wj/",
|
||||
"test2:$apr1$d9hr9HBB$4HxwgUir3HP4EsggP/QNo0",
|
||||
]
|
||||
|
||||
[api]
|
||||
entrypoint="api"
|
||||
entrypoint="foo"
|
||||
```
|
||||
|
||||
For more information, see [entry points](/configuration/entrypoints/) .
|
||||
|
||||
@@ -21,24 +21,67 @@
|
||||
!!! warning
|
||||
Even if you have authentication configured on entry point, the `/ping` path of the api is excluded from authentication.
|
||||
|
||||
## Example
|
||||
## Examples
|
||||
|
||||
The `/ping` health-check URL is enabled with the command-line `--ping` or config file option `[ping]`.
|
||||
Thus, if you have a regular path for `/foo` and an entrypoint on `:80`, you would access them as follows:
|
||||
|
||||
* Regular path: `http://hostname:80/foo`
|
||||
* Admin panel: `http://hostname:8080/`
|
||||
* Ping URL: `http://hostname:8080/ping`
|
||||
|
||||
However, for security reasons, you may want to be able to expose the `/ping` health-check URL to outside health-checkers, e.g. an Internet service or cloud load-balancer, _without_ exposing your administration panel's port.
|
||||
In many environments, the security staff may not _allow_ you to expose it.
|
||||
|
||||
You have two options:
|
||||
|
||||
* Enable `/ping` on a regular entry point
|
||||
* Enable `/ping` on a dedicated port
|
||||
|
||||
### Ping health check on a regular entry point
|
||||
|
||||
To proxy `/ping` from a regular entry point to the administration one without exposing the panel, do the following:
|
||||
|
||||
```toml
|
||||
defaultEntryPoints = ["http"]
|
||||
|
||||
[entryPoints]
|
||||
[entryPoints.http]
|
||||
address = ":80"
|
||||
|
||||
[ping]
|
||||
entryPoint = "http"
|
||||
|
||||
```shell
|
||||
curl -sv "http://localhost:8080/ping"
|
||||
```
|
||||
```shell
|
||||
* Trying ::1...
|
||||
* Connected to localhost (::1) port 8080 (#0)
|
||||
> GET /ping HTTP/1.1
|
||||
> Host: localhost:8080
|
||||
> User-Agent: curl/7.43.0
|
||||
> Accept: */*
|
||||
>
|
||||
< HTTP/1.1 200 OK
|
||||
< Date: Thu, 25 Aug 2016 01:35:36 GMT
|
||||
< Content-Length: 2
|
||||
< Content-Type: text/plain; charset=utf-8
|
||||
<
|
||||
* Connection #0 to host localhost left intact
|
||||
OK
|
||||
```
|
||||
|
||||
The above link `ping` on the `http` entry point and then expose it on port `80`
|
||||
|
||||
### Enable ping health check on dedicated port
|
||||
|
||||
If you do not want to or cannot expose the health-check on a regular entry point - e.g. your security rules do not allow it, or you have a conflicting path - then you can enable health-check on its own entry point.
|
||||
Use the following configuration:
|
||||
|
||||
```toml
|
||||
defaultEntryPoints = ["http"]
|
||||
|
||||
[entryPoints]
|
||||
[entryPoints.http]
|
||||
address = ":80"
|
||||
[entryPoints.ping]
|
||||
address = ":8082"
|
||||
|
||||
[ping]
|
||||
entryPoint = "ping"
|
||||
```
|
||||
|
||||
The above is similar to the previous example, but instead of enabling `/ping` on the _default_ entry point, we enable it on a _dedicated_ entry point.
|
||||
|
||||
In the above example, you would access a regular path and health-check as follows:
|
||||
|
||||
* Regular path: `http://hostname:80/foo`
|
||||
* Ping URL: `http://hostname:8082/ping`
|
||||
|
||||
Note the dedicated port `:8082` for `/ping`.
|
||||
|
||||
In the above example, it is _very_ important to create a named dedicated entry point, and do **not** include it in `defaultEntryPoints`.
|
||||
Otherwise, you are likely to expose _all_ services via this entry point.
|
||||
|
||||
288
docs/index.md
288
docs/index.md
@@ -10,65 +10,165 @@
|
||||
[](https://twitter.com/intent/follow?screen_name=traefikproxy)
|
||||
|
||||
|
||||
Træfik (pronounced like _traffic_) is a modern HTTP reverse proxy and load balancer made to deploy microservices with ease.
|
||||
It supports several backends ([Docker](https://www.docker.com/), [Swarm mode](https://docs.docker.com/engine/swarm/), [Kubernetes](https://kubernetes.io), [Marathon](https://mesosphere.github.io/marathon/), [Consul](https://www.consul.io/), [Etcd](https://coreos.com/etcd/), [Rancher](https://rancher.com), [Amazon ECS](https://aws.amazon.com/ecs), and a lot more) to manage its configuration automatically and dynamically.
|
||||
Træfik is a modern HTTP reverse proxy and load balancer that makes deploying microservices easy.
|
||||
Træfik integrates with your existing infrastructure components ([Docker](https://www.docker.com/), [Swarm mode](https://docs.docker.com/engine/swarm/), [Kubernetes](https://kubernetes.io), [Marathon](https://mesosphere.github.io/marathon/), [Consul](https://www.consul.io/), [Etcd](https://coreos.com/etcd/), [Rancher](https://rancher.com), [Amazon ECS](https://aws.amazon.com/ecs), ...) and configures itself automatically and dynamically.
|
||||
Telling Træfik where your orchestrator is could be the _only_ configuration step you need to do.
|
||||
|
||||
## Overview
|
||||
|
||||
Imagine that you have deployed a bunch of microservices on your infrastructure. You probably used a service registry (like etcd or consul) and/or an orchestrator (swarm, Mesos/Marathon) to manage all these services.
|
||||
If you want your users to access some of your microservices from the Internet, you will have to use a reverse proxy and configure it using virtual hosts or prefix paths:
|
||||
Imagine that you have deployed a bunch of microservices with the help of an orchestrator (like Swarm or Kubernetes) or a service registry (like etcd or consul).
|
||||
Now you want users to access these microservices, and you need a reverse proxy.
|
||||
|
||||
- domain `api.domain.com` will point the microservice `api` in your private network
|
||||
- path `domain.com/web` will point the microservice `web` in your private network
|
||||
- domain `backoffice.domain.com` will point the microservices `backoffice` in your private network, load-balancing between your multiple instances
|
||||
Traditional reverse-proxies require that you configure _each_ route that will connect paths and subdomains to _each_ microservice. In an environment where you add, remove, kill, upgrade, or scale your services _many_ times a day, the task of keeping the routes up to date becomes tedious.
|
||||
|
||||
Microservices are often deployed in dynamic environments where services are added, removed, killed, upgraded or scaled many times a day.
|
||||
**This is when Træfik can help you!**
|
||||
|
||||
Traditional reverse-proxies are not natively dynamic. You can't change their configuration and hot-reload easily.
|
||||
Træfik listens to your service registry/orchestrator API and instantly generates the routes so your microservices are connected to the outside world -- without further intervention from your part.
|
||||
|
||||
Here enters Træfik.
|
||||
**Run Træfik and let it do the work for you!**
|
||||
_(But if you'd rather configure some of your routes manually, Træfik supports that too!)_
|
||||
|
||||

|
||||
|
||||
Træfik can listen to your service registry/orchestrator API, and knows each time a microservice is added, removed, killed or upgraded, and can generate its configuration automatically.
|
||||
Routes to your services will be created instantly.
|
||||
|
||||
Run it and forget it!
|
||||
|
||||
## Features
|
||||
|
||||
- [It's fast](/benchmarks)
|
||||
- No dependency hell, single binary made with go
|
||||
- [Tiny](https://microbadger.com/images/traefik) [official](https://hub.docker.com/r/_/traefik/) docker image
|
||||
- Rest API
|
||||
- Hot-reloading of configuration. No need to restart the process
|
||||
- Continuously updates its configuration (No restarts!)
|
||||
- Supports multiple load balancing algorithms
|
||||
- Provides HTTPS to your microservices by leveraging [Let's Encrypt](https://letsencrypt.org)
|
||||
- Circuit breakers, retry
|
||||
- Round Robin, rebalancer load-balancers
|
||||
- Metrics (Rest, Prometheus, Datadog, Statsd, InfluxDB)
|
||||
- Clean AngularJS Web UI
|
||||
- High Availability with cluster mode (beta)
|
||||
- See the magic through its clean web UI
|
||||
- Websocket, HTTP/2, GRPC ready
|
||||
- Access Logs (JSON, CLF)
|
||||
- [Let's Encrypt](https://letsencrypt.org) support (Automatic HTTPS with renewal)
|
||||
- High Availability with cluster mode
|
||||
- Provides metrics (Rest, Prometheus, Datadog, Statsd, InfluxDB)
|
||||
- Keeps access logs (JSON, CLF)
|
||||
- [Fast](/benchmarks) ... which is nice
|
||||
- Exposes a Rest API
|
||||
- Packaged as a single binary file (made with :heart: with go) and available as a [tiny](https://microbadger.com/images/traefik) [official](https://hub.docker.com/r/_/traefik/) docker image
|
||||
|
||||
|
||||
## Supported backends
|
||||
|
||||
- [Docker](https://www.docker.com/) / [Swarm mode](https://docs.docker.com/engine/swarm/)
|
||||
- [Kubernetes](https://kubernetes.io)
|
||||
- [Mesos](https://github.com/apache/mesos) / [Marathon](https://mesosphere.github.io/marathon/)
|
||||
- [Rancher](https://rancher.com) (API, Metadata)
|
||||
- [Consul](https://www.consul.io/) / [Etcd](https://coreos.com/etcd/) / [Zookeeper](https://zookeeper.apache.org) / [BoltDB](https://github.com/boltdb/bolt)
|
||||
- [Eureka](https://github.com/Netflix/eureka)
|
||||
- [Amazon ECS](https://aws.amazon.com/ecs)
|
||||
- [Amazon DynamoDB](https://aws.amazon.com/dynamodb)
|
||||
- File
|
||||
- Rest API
|
||||
- [Docker](/configuration/backends/docker/) / [Swarm mode](/configuration/backends/docker/#docker-swarm-mode)
|
||||
- [Kubernetes](/configuration/backends/kubernetes/)
|
||||
- [Mesos](/configuration/backends/mesos/) / [Marathon](/configuration/backends/marathon/)
|
||||
- [Rancher](/configuration/backends/rancher/) (API, Metadata)
|
||||
- [Azure Service Fabric](/configuration/backends/servicefabric/)
|
||||
- [Consul Catalog](/configuration/backends/consulcatalog/)
|
||||
- [Consul](/configuration/backends/consul/) / [Etcd](/configuration/backends/etcd/) / [Zookeeper](/configuration/backends/zookeeper/) / [BoltDB](/configuration/backends/boltdb/)
|
||||
- [Eureka](/configuration/backends/eureka/)
|
||||
- [Amazon ECS](/configuration/backends/ecs/)
|
||||
- [Amazon DynamoDB](/configuration/backends/dynamodb/)
|
||||
- [File](/configuration/backends/file/)
|
||||
- [Rest](/configuration/backends/rest/)
|
||||
|
||||
## The Træfik Quickstart (Using Docker)
|
||||
|
||||
## Quickstart
|
||||
In this quickstart, we'll use [Docker compose](https://docs.docker.com/compose) to create our demo infrastructure.
|
||||
|
||||
You can have a quick look at Træfik in this [Katacoda tutorial](https://www.katacoda.com/courses/traefik/deploy-load-balancer) that shows how to load balance requests between multiple Docker containers.
|
||||
To save some time, you can clone [Træfik's repository](https://github.com/containous/traefik) and use the quickstart files located in the [examples/quickstart](https://github.com/containous/traefik/tree/master/examples/quickstart/) directory.
|
||||
|
||||
### 1 — Launch Træfik — Tell It to Listen to Docker
|
||||
|
||||
Create a `docker-compose.yml` file where you will define a `reverse-proxy` service that uses the official Træfik image:
|
||||
|
||||
```yaml
|
||||
version: '3'
|
||||
|
||||
services:
|
||||
reverse-proxy:
|
||||
image: traefik #The official Traefik docker image
|
||||
command: --api --docker #Enables the web UI and tells Træfik to listen to docker
|
||||
ports:
|
||||
- "80:80" #The HTTP port
|
||||
- "8080:8080" #The Web UI (enabled by --api)
|
||||
volumes:
|
||||
- /var/run/docker.sock:/var/run/docker.sock #So that Traefik can listen to the Docker events
|
||||
```
|
||||
|
||||
**That's it. Now you can launch Træfik!**
|
||||
|
||||
Start your `reverse-proxy` with the following command:
|
||||
|
||||
```shell
|
||||
docker-compose up -d reverse-proxy
|
||||
```
|
||||
|
||||
You can open a browser and go to [http://localhost:8080](http://localhost:8080) to see Træfik's dashboard (we'll go back there once we have launched a service in step 2).
|
||||
|
||||
### 2 — Launch a Service — Træfik Detects It and Creates a Route for You
|
||||
|
||||
Now that we have a Træfik instance up and running, we will deploy new services.
|
||||
|
||||
Edit your `docker-compose.yml` file and add the following at the end of your file.
|
||||
|
||||
```yaml
|
||||
# ...
|
||||
whoami:
|
||||
image: emilevauge/whoami #A container that exposes an API to show it's IP address
|
||||
labels:
|
||||
- "traefik.frontend.rule=Host:whoami.docker.localhost"
|
||||
```
|
||||
|
||||
The above defines `whoami`: a simple web service that outputs information about the machine it is deployed on (its IP address, host, and so on).
|
||||
|
||||
Start the `whoami` service with the following command:
|
||||
|
||||
```shell
|
||||
docker-compose up -d whoami
|
||||
```
|
||||
|
||||
Go back to your browser ([http://localhost:8080](http://localhost:8080)) and see that Træfik has automatically detected the new container and updated its own configuration.
|
||||
|
||||
When Traefik detects new services, it creates the corresponding routes so you can call them ... _let's see!_ (Here, we're using curl)
|
||||
|
||||
```shell
|
||||
curl -H Host:whoami.docker.localhost http://127.0.0.1
|
||||
```
|
||||
|
||||
_Shows the following output:_
|
||||
```yaml
|
||||
Hostname: 8656c8ddca6c
|
||||
IP: 172.27.0.3
|
||||
#...
|
||||
```
|
||||
|
||||
### 3 — Launch More Instances — Traefik Load Balances Them
|
||||
|
||||
Run more instances of your `whoami` service with the following command:
|
||||
|
||||
```shell
|
||||
docker-compose up -d --scale whoami=2
|
||||
```
|
||||
|
||||
Go back to your browser ([http://localhost:8080](http://localhost:8080)) and see that Træfik has automatically detected the new instance of the container.
|
||||
|
||||
Finally, see that Træfik load-balances between the two instances of your services by running twice the following command:
|
||||
|
||||
```shell
|
||||
curl -H Host:whoami.docker.localhost http://127.0.0.1
|
||||
```
|
||||
|
||||
The output will show alternatively one of the followings:
|
||||
|
||||
```yaml
|
||||
Hostname: 8656c8ddca6c
|
||||
IP: 172.27.0.3
|
||||
#...
|
||||
```
|
||||
|
||||
```yaml
|
||||
Hostname: 8458f154e1f1
|
||||
IP: 172.27.0.4
|
||||
# ...
|
||||
```
|
||||
|
||||
### 4 — Enjoy Træfik's Magic
|
||||
|
||||
Now that you have a basic understanding of how Træfik can automatically create the routes to your services and load balance them, it might be time to dive into [the documentation](https://docs.traefik.io/) and let Træfik work for you! Whatever your infrastructure is, there is probably [an available Træfik backend](https://docs.traefik.io/configuration/backends/available) that will do the job.
|
||||
|
||||
Our recommendation would be to see for yourself how simple it is to enable HTTPS with [Træfik's let's encrypt integration](https://docs.traefik.io/user-guide/examples/#lets-encrypt-support) using the dedicated [user guide](https://docs.traefik.io/user-guide/docker-and-lets-encrypt/).
|
||||
|
||||
## Resources
|
||||
|
||||
Here is a talk given by [Emile Vauge](https://github.com/emilevauge) at [GopherCon 2017](https://gophercon.com).
|
||||
You will learn Træfik basics in less than 10 minutes.
|
||||
@@ -80,9 +180,9 @@ You will learn fundamental Træfik features and see some demos with Kubernetes.
|
||||
|
||||
[](https://www.youtube.com/watch?v=aFtpIShV60I)
|
||||
|
||||
## Get it
|
||||
## Downloads
|
||||
|
||||
### Binary
|
||||
### The Official Binary File
|
||||
|
||||
You can grab the latest binary from the [releases](https://github.com/containous/traefik/releases) page and just run it with the [sample configuration file](https://raw.githubusercontent.com/containous/traefik/master/traefik.sample.toml):
|
||||
|
||||
@@ -90,114 +190,10 @@ You can grab the latest binary from the [releases](https://github.com/containous
|
||||
./traefik -c traefik.toml
|
||||
```
|
||||
|
||||
### Docker
|
||||
### The Official Docker Image
|
||||
|
||||
Using the tiny Docker image:
|
||||
|
||||
```shell
|
||||
docker run -d -p 8080:8080 -p 80:80 -v $PWD/traefik.toml:/etc/traefik/traefik.toml traefik
|
||||
```
|
||||
|
||||
## Test it
|
||||
|
||||
You can test Træfik easily using [Docker compose](https://docs.docker.com/compose), with this `docker-compose.yml` file in a folder named `traefik`:
|
||||
|
||||
```yaml
|
||||
version: '2'
|
||||
|
||||
services:
|
||||
proxy:
|
||||
image: traefik
|
||||
command: --api --docker --docker.domain=docker.localhost --logLevel=DEBUG
|
||||
networks:
|
||||
- webgateway
|
||||
ports:
|
||||
- "80:80"
|
||||
- "8080:8080"
|
||||
volumes:
|
||||
- /var/run/docker.sock:/var/run/docker.sock
|
||||
- /dev/null:/traefik.toml
|
||||
|
||||
networks:
|
||||
webgateway:
|
||||
driver: bridge
|
||||
```
|
||||
|
||||
Start it from within the `traefik` folder:
|
||||
|
||||
```shell
|
||||
docker-compose up -d
|
||||
```
|
||||
|
||||
In a browser, you may open [http://localhost:8080](http://localhost:8080) to access Træfik's dashboard and observe the following magic.
|
||||
|
||||
Now, create a folder named `test` and create a `docker-compose.yml` in it with this content:
|
||||
|
||||
```yaml
|
||||
version: '2'
|
||||
|
||||
services:
|
||||
whoami:
|
||||
image: emilevauge/whoami
|
||||
networks:
|
||||
- web
|
||||
labels:
|
||||
- "traefik.backend=whoami"
|
||||
- "traefik.frontend.rule=Host:whoami.docker.localhost"
|
||||
|
||||
networks:
|
||||
web:
|
||||
external:
|
||||
name: traefik_webgateway
|
||||
```
|
||||
|
||||
Then, start and scale it in the `test` folder:
|
||||
|
||||
```shell
|
||||
docker-compose up -d
|
||||
docker-compose scale whoami=2
|
||||
```
|
||||
|
||||
Finally, test load-balancing between the two services `test_whoami_1` and `test_whoami_2`:
|
||||
|
||||
```shell
|
||||
curl -H Host:whoami.docker.localhost http://127.0.0.1
|
||||
```
|
||||
|
||||
```yaml
|
||||
Hostname: ef194d07634a
|
||||
IP: 127.0.0.1
|
||||
IP: ::1
|
||||
IP: 172.17.0.4
|
||||
IP: fe80::42:acff:fe11:4
|
||||
GET / HTTP/1.1
|
||||
Host: 172.17.0.4:80
|
||||
User-Agent: curl/7.35.0
|
||||
Accept: */*
|
||||
Accept-Encoding: gzip
|
||||
X-Forwarded-For: 172.17.0.1
|
||||
X-Forwarded-Host: 172.17.0.4:80
|
||||
X-Forwarded-Proto: http
|
||||
X-Forwarded-Server: dbb60406010d
|
||||
```
|
||||
|
||||
```shell
|
||||
curl -H Host:whoami.docker.localhost http://127.0.0.1
|
||||
```
|
||||
|
||||
```yaml
|
||||
Hostname: 6c3c5df0c79a
|
||||
IP: 127.0.0.1
|
||||
IP: ::1
|
||||
IP: 172.17.0.3
|
||||
IP: fe80::42:acff:fe11:3
|
||||
GET / HTTP/1.1
|
||||
Host: 172.17.0.3:80
|
||||
User-Agent: curl/7.35.0
|
||||
Accept: */*
|
||||
Accept-Encoding: gzip
|
||||
X-Forwarded-For: 172.17.0.1
|
||||
X-Forwarded-Host: 172.17.0.3:80
|
||||
X-Forwarded-Proto: http
|
||||
X-Forwarded-Server: dbb60406010d
|
||||
```
|
||||
```
|
||||
@@ -50,7 +50,7 @@ version: '2'
|
||||
|
||||
services:
|
||||
traefik:
|
||||
image: traefik:1.3.5
|
||||
image: traefik:1.5.4
|
||||
restart: always
|
||||
ports:
|
||||
- 80:80
|
||||
|
||||
@@ -155,7 +155,7 @@ This configuration allows generating a Let's Encrypt certificate (thanks to `HTT
|
||||
!!! note
|
||||
This option simplifies the configuration but :
|
||||
|
||||
* TLS handshakes will be slow when requesting a host name certificate for the first time, this can leads to DDoS attacks.
|
||||
* TLS handshakes will be slow when requesting a hostname certificate for the first time, which can lead to DDoS attacks.
|
||||
* Let's Encrypt have rate limiting: https://letsencrypt.org/docs/rate-limits
|
||||
|
||||
That's why, it's better to use the `onHostRule` option if possible.
|
||||
@@ -335,68 +335,3 @@ providersThrottleDuration = "5s"
|
||||
[respondingTimeouts]
|
||||
idleTimeout = "360s"
|
||||
```
|
||||
|
||||
## Ping Health Check
|
||||
|
||||
The `/ping` health-check URL is enabled with the command-line `--ping` or config file option `[ping]`.
|
||||
Thus, if you have a regular path for `/foo` and an entrypoint on `:80`, you would access them as follows:
|
||||
|
||||
* Regular path: `http://hostname:80/foo`
|
||||
* Admin panel: `http://hostname:8080/`
|
||||
* Ping URL: `http://hostname:8080/ping`
|
||||
|
||||
However, for security reasons, you may want to be able to expose the `/ping` health-check URL to outside health-checkers, e.g. an Internet service or cloud load-balancer, _without_ exposing your administration panel's port.
|
||||
In many environments, the security staff may not _allow_ you to expose it.
|
||||
|
||||
You have two options:
|
||||
|
||||
* Enable `/ping` on a regular entry point
|
||||
* Enable `/ping` on a dedicated port
|
||||
|
||||
### Enable ping health check on a regular entry point
|
||||
|
||||
To proxy `/ping` from a regular entry point to the administration one without exposing the panel, do the following:
|
||||
|
||||
```toml
|
||||
defaultEntryPoints = ["http"]
|
||||
|
||||
[entryPoints]
|
||||
[entryPoints.http]
|
||||
address = ":80"
|
||||
|
||||
[ping]
|
||||
entryPoint = "http"
|
||||
|
||||
```
|
||||
|
||||
The above link `ping` on the `http` entry point and then expose it on port `80`
|
||||
|
||||
### Enable ping health check on dedicated port
|
||||
|
||||
If you do not want to or cannot expose the health-check on a regular entry point - e.g. your security rules do not allow it, or you have a conflicting path - then you can enable health-check on its own entry point.
|
||||
Use the following configuration:
|
||||
|
||||
```toml
|
||||
defaultEntryPoints = ["http"]
|
||||
|
||||
[entryPoints]
|
||||
[entryPoints.http]
|
||||
address = ":80"
|
||||
[entryPoints.ping]
|
||||
address = ":8082"
|
||||
|
||||
[ping]
|
||||
entryPoint = "ping"
|
||||
```
|
||||
|
||||
The above is similar to the previous example, but instead of enabling `/ping` on the _default_ entry point, we enable it on a _dedicated_ entry point.
|
||||
|
||||
In the above example, you would access a regular path and health-check as follows:
|
||||
|
||||
* Regular path: `http://hostname:80/foo`
|
||||
* Ping URL: `http://hostname:8082/ping`
|
||||
|
||||
Note the dedicated port `:8082` for `/ping`.
|
||||
|
||||
In the above example, it is _very_ important to create a named dedicated entry point, and do **not** include it in `defaultEntryPoints`.
|
||||
Otherwise, you are likely to expose _all_ services via this entry point.
|
||||
|
||||
@@ -184,9 +184,9 @@ spec:
|
||||
securityContext:
|
||||
privileged: true
|
||||
args:
|
||||
- -d
|
||||
- --api
|
||||
- --kubernetes
|
||||
- --logLevel=INFO
|
||||
---
|
||||
kind: Service
|
||||
apiVersion: v1
|
||||
|
||||
@@ -328,7 +328,7 @@ And there, the same dynamic configuration in a KV Store (using `prefix = "traefi
|
||||
| Key | Value |
|
||||
|----------------------------------------------------|--------------------|
|
||||
| `/traefik/frontends/frontend2/backend` | `backend1` |
|
||||
| `/traefik/frontends/frontend2/passHostHeader` | `true` |
|
||||
| `/traefik/frontends/frontend2/passhostheader` | `true` |
|
||||
| `/traefik/frontends/frontend2/priority` | `10` |
|
||||
| `/traefik/frontends/frontend2/entrypoints` | `http,https` |
|
||||
| `/traefik/frontends/frontend2/routes/test_2/rule` | `PathPrefix:/test` |
|
||||
|
||||
@@ -5,11 +5,15 @@ traefikLogsFile = "log/traefik.log"
|
||||
accessLogsFile = "log/access.log"
|
||||
logLevel = "DEBUG"
|
||||
|
||||
[entryPoints]
|
||||
[entryPoints.api]
|
||||
address = ":7888"
|
||||
|
||||
################################################################
|
||||
# Web configuration backend
|
||||
# API configuration
|
||||
################################################################
|
||||
[web]
|
||||
address = ":7888"
|
||||
[api]
|
||||
entryPoint = "api"
|
||||
|
||||
################################################################
|
||||
# File configuration backend
|
||||
|
||||
@@ -5,11 +5,15 @@ traefikLogsFile = "log/traefik.log"
|
||||
accessLogsFile = "log/access.log"
|
||||
logLevel = "DEBUG"
|
||||
|
||||
[entryPoints]
|
||||
[entryPoints.api]
|
||||
address = ":7888"
|
||||
|
||||
################################################################
|
||||
# Web configuration backend
|
||||
# API configuration
|
||||
################################################################
|
||||
[web]
|
||||
address = ":7888"
|
||||
[api]
|
||||
entryPoint = "api"
|
||||
|
||||
################################################################
|
||||
# File configuration backend
|
||||
|
||||
@@ -11,7 +11,6 @@ defaultEntryPoints = ["http", "https"]
|
||||
address = ":443"
|
||||
[entryPoints.https.tls]
|
||||
|
||||
|
||||
[acme]
|
||||
email = "test@traefik.io"
|
||||
storage = "/etc/traefik/conf/acme.json"
|
||||
@@ -19,12 +18,10 @@ entryPoint = "https"
|
||||
onDemand = false
|
||||
OnHostRule = true
|
||||
caServer = "http://traefik.localhost.com:4000/directory"
|
||||
[acme.httpChallenge]
|
||||
entryPoint="http"
|
||||
[acme.httpChallenge]
|
||||
entryPoint="http"
|
||||
|
||||
|
||||
[web]
|
||||
address = ":8080"
|
||||
[api]
|
||||
|
||||
[docker]
|
||||
endpoint = "unix:///var/run/docker.sock"
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
traefik:
|
||||
image: traefik
|
||||
command: --web --rancher --rancher.domain=rancher.localhost --rancher.endpoint=http://example.com --rancher.accesskey=XXXXXXX --rancher.secretkey=YYYYYY --logLevel=DEBUG
|
||||
command: --api --rancher --rancher.domain=rancher.localhost --rancher.endpoint=http://example.com --rancher.accesskey=XXXXXXX --rancher.secretkey=YYYYYY --logLevel=DEBUG
|
||||
ports:
|
||||
- "80:80"
|
||||
- "443:443"
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
traefik:
|
||||
image: traefik
|
||||
command: -c /dev/null --web --docker --docker.domain=docker.localhost --logLevel=DEBUG
|
||||
command: -c /dev/null --api --docker --docker.domain=docker.localhost --logLevel=DEBUG
|
||||
ports:
|
||||
- "80:80"
|
||||
- "8080:8080"
|
||||
|
||||
@@ -29,8 +29,9 @@ spec:
|
||||
- image: traefik
|
||||
name: traefik-ingress-lb
|
||||
args:
|
||||
- --web
|
||||
- --api
|
||||
- --kubernetes
|
||||
- --logLevel=INFO
|
||||
---
|
||||
kind: Service
|
||||
apiVersion: v1
|
||||
|
||||
@@ -34,9 +34,9 @@ spec:
|
||||
securityContext:
|
||||
privileged: true
|
||||
args:
|
||||
- -d
|
||||
- --web
|
||||
- --api
|
||||
- --kubernetes
|
||||
- --logLevel=INFO
|
||||
---
|
||||
kind: Service
|
||||
apiVersion: v1
|
||||
|
||||
106
examples/quickstart/README.md
Normal file
106
examples/quickstart/README.md
Normal file
@@ -0,0 +1,106 @@
|
||||
## The Træfik Quickstart (Using Docker)
|
||||
|
||||
In this quickstart, we'll use [Docker compose](https://docs.docker.com/compose) to create our demo infrastructure.
|
||||
|
||||
To save some time, you can clone [Træfik's repository](https://github.com/containous/traefik) and use the quickstart files located in the [examples/quickstart](https://github.com/containous/traefik/tree/master/examples/quickstart/) directory.
|
||||
|
||||
### 1 — Launch Træfik — Tell It to Listen to Docker
|
||||
|
||||
Create a `docker-compose.yml` file where you will define a `reverse-proxy` service that uses the official Træfik image:
|
||||
|
||||
```yaml
|
||||
version: '3'
|
||||
|
||||
services:
|
||||
reverse-proxy:
|
||||
image: traefik #The official Traefik docker image
|
||||
command: --api --docker #Enables the web UI and tells Træfik to listen to docker
|
||||
ports:
|
||||
- "80:80" #The HTTP port
|
||||
- "8080:8080" #The Web UI (enabled by --api)
|
||||
volumes:
|
||||
- /var/run/docker.sock:/var/run/docker.sock #So that Traefik can listen to the Docker events
|
||||
```
|
||||
|
||||
**That's it. Now you can launch Træfik!**
|
||||
|
||||
Start your `reverse-proxy` with the following command:
|
||||
|
||||
```shell
|
||||
docker-compose up -d reverse-proxy
|
||||
```
|
||||
|
||||
You can open a browser and go to [http://localhost:8080](http://localhost:8080) to see Træfik's dashboard (we'll go back there once we have launched a service in step 2).
|
||||
|
||||
### 2 — Launch a Service — Træfik Detects It and Creates a Route for You
|
||||
|
||||
Now that we have a Træfik instance up and running, we will deploy new services.
|
||||
|
||||
Edit your `docker-compose.yml` file and add the following at the end of your file.
|
||||
|
||||
```yaml
|
||||
# ...
|
||||
whoami:
|
||||
image: emilevauge/whoami #A container that exposes an API to show it's IP address
|
||||
labels:
|
||||
- "traefik.frontend.rule=Host:whoami.docker.localhost"
|
||||
```
|
||||
|
||||
The above defines `whoami`: a simple web service that outputs information about the machine it is deployed on (its IP address, host, and so on).
|
||||
|
||||
Start the `whoami` service with the following command:
|
||||
|
||||
```shell
|
||||
docker-compose up -d whoami
|
||||
```
|
||||
|
||||
Go back to your browser ([http://localhost:8080](http://localhost:8080)) and see that Træfik has automatically detected the new container and updated its own configuration.
|
||||
|
||||
When Traefik detects new services, it creates the corresponding routes so you can call them ... _let's see!_ (Here, we're using curl)
|
||||
|
||||
```shell
|
||||
curl -H Host:whoami.docker.localhost http://127.0.0.1
|
||||
```
|
||||
|
||||
_Shows the following output:_
|
||||
```yaml
|
||||
Hostname: 8656c8ddca6c
|
||||
IP: 172.27.0.3
|
||||
#...
|
||||
```
|
||||
|
||||
### 3 — Launch More Instances — Traefik Load Balances Them
|
||||
|
||||
Run more instances of your `whoami` service with the following command:
|
||||
|
||||
```shell
|
||||
docker-compose up -d --scale whoami=2
|
||||
```
|
||||
|
||||
Go back to your browser ([http://localhost:8080](http://localhost:8080)) and see that Træfik has automatically detected the new instance of the container.
|
||||
|
||||
Finally, see that Træfik load-balances between the two instances of your services by running twice the following command:
|
||||
|
||||
```shell
|
||||
curl -H Host:whoami.docker.localhost http://127.0.0.1
|
||||
```
|
||||
|
||||
The output will show alternatively one of the followings:
|
||||
|
||||
```yaml
|
||||
Hostname: 8656c8ddca6c
|
||||
IP: 172.27.0.3
|
||||
#...
|
||||
```
|
||||
|
||||
```yaml
|
||||
Hostname: 8458f154e1f1
|
||||
IP: 172.27.0.4
|
||||
# ...
|
||||
```
|
||||
|
||||
### 4 — Enjoy Træfik's Magic
|
||||
|
||||
Now that you have a basic understanding of how Træfik can automatically create the routes to your services and load balance them, it might be time to dive into [the documentation](https://docs.traefik.io/) and let Træfik work for you! Whatever your infrastructure is, there is probably [an available Træfik backend](https://docs.traefik.io/configuration/backends/available) that will do the job.
|
||||
|
||||
Our recommendation would be to see for yourself how simple it is to enable HTTPS with [Træfik's let's encrypt integration](https://docs.traefik.io/user-guide/examples/#lets-encrypt-support) using the dedicated [user guide](https://docs.traefik.io/user-guide/docker-and-lets-encrypt/).
|
||||
18
examples/quickstart/docker-compose.yml
Normal file
18
examples/quickstart/docker-compose.yml
Normal file
@@ -0,0 +1,18 @@
|
||||
version: '3'
|
||||
|
||||
services:
|
||||
#The reverse proxy service (Træfik)
|
||||
reverse-proxy:
|
||||
image: traefik #The official Traefik docker image
|
||||
command: --api --docker #Enables the web UI and tells Træfik to listen to docker
|
||||
ports:
|
||||
- "80:80" #The HTTP port
|
||||
- "8080:8080" #The Web UI (enabled by --api)
|
||||
volumes:
|
||||
- /var/run/docker.sock:/var/run/docker.sock #So that Traefik can listen to the Docker events
|
||||
|
||||
#A container that exposes a simple API
|
||||
whoami:
|
||||
image: emilevauge/whoami #A container that exposes an API to show it's IP address
|
||||
labels:
|
||||
- "traefik.frontend.rule=Host:whoami.docker.localhost"
|
||||
78
integration/docker_compose_test.go
Normal file
78
integration/docker_compose_test.go
Normal file
@@ -0,0 +1,78 @@
|
||||
package integration
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"io/ioutil"
|
||||
"net/http"
|
||||
"os"
|
||||
"time"
|
||||
|
||||
"github.com/containous/traefik/integration/try"
|
||||
"github.com/containous/traefik/testhelpers"
|
||||
"github.com/containous/traefik/types"
|
||||
"github.com/go-check/check"
|
||||
checker "github.com/vdemeester/shakers"
|
||||
)
|
||||
|
||||
const (
|
||||
composeProject = "minimal"
|
||||
)
|
||||
|
||||
// Docker test suites
|
||||
type DockerComposeSuite struct {
|
||||
BaseSuite
|
||||
}
|
||||
|
||||
func (s *DockerComposeSuite) SetUpSuite(c *check.C) {
|
||||
s.createComposeProject(c, composeProject)
|
||||
s.composeProject.Start(c)
|
||||
}
|
||||
|
||||
func (s *DockerComposeSuite) TearDownSuite(c *check.C) {
|
||||
// shutdown and delete compose project
|
||||
if s.composeProject != nil {
|
||||
s.composeProject.Stop(c)
|
||||
}
|
||||
}
|
||||
|
||||
func (s *DockerComposeSuite) TestComposeScale(c *check.C) {
|
||||
var serviceCount = 2
|
||||
var composeService = "whoami1"
|
||||
|
||||
s.composeProject.Scale(c, composeService, serviceCount)
|
||||
|
||||
file := s.adaptFileForHost(c, "fixtures/docker/simple.toml")
|
||||
defer os.Remove(file)
|
||||
|
||||
cmd, display := s.traefikCmd(withConfigFile(file))
|
||||
defer display(c)
|
||||
err := cmd.Start()
|
||||
c.Assert(err, checker.IsNil)
|
||||
defer cmd.Process.Kill()
|
||||
|
||||
req := testhelpers.MustNewRequest(http.MethodGet, "http://127.0.0.1:8000/whoami", nil)
|
||||
req.Host = "my.super.host"
|
||||
|
||||
_, err = try.ResponseUntilStatusCode(req, 1500*time.Millisecond, http.StatusOK)
|
||||
c.Assert(err, checker.IsNil)
|
||||
|
||||
resp, err := http.Get("http://127.0.0.1:8080/api/providers/docker")
|
||||
c.Assert(err, checker.IsNil)
|
||||
|
||||
defer resp.Body.Close()
|
||||
body, err := ioutil.ReadAll(resp.Body)
|
||||
c.Assert(err, checker.IsNil)
|
||||
|
||||
var provider types.Configuration
|
||||
c.Assert(json.Unmarshal(body, &provider), checker.IsNil)
|
||||
|
||||
// check that we have only one backend with n servers
|
||||
c.Assert(provider.Backends, checker.HasLen, 1)
|
||||
|
||||
myBackend := provider.Backends["backend-"+composeService+"-integrationtest"+composeProject]
|
||||
c.Assert(myBackend, checker.NotNil)
|
||||
c.Assert(myBackend.Servers, checker.HasLen, serviceCount)
|
||||
|
||||
// check that we have only one frontend
|
||||
c.Assert(provider.Frontends, checker.HasLen, 1)
|
||||
}
|
||||
@@ -17,8 +17,8 @@ entryPoint = "https"
|
||||
onDemand = {{.OnDemand}}
|
||||
OnHostRule = {{.OnHostRule}}
|
||||
caServer = "http://{{.BoulderHost}}:4000/directory"
|
||||
[acme.httpchallenge]
|
||||
entrypoint="http"
|
||||
[acme.httpchallenge]
|
||||
entrypoint="http"
|
||||
|
||||
[file]
|
||||
|
||||
|
||||
@@ -20,8 +20,8 @@ entryPoint = "https"
|
||||
onDemand = {{.OnDemand}}
|
||||
OnHostRule = {{.OnHostRule}}
|
||||
caServer = "http://{{.BoulderHost}}:4000/directory"
|
||||
[acme.httpchallenge]
|
||||
entrypoint="http"
|
||||
[acme.httpchallenge]
|
||||
entrypoint="http"
|
||||
|
||||
[file]
|
||||
|
||||
|
||||
@@ -11,6 +11,6 @@
|
||||
|
||||
[[tls]]
|
||||
entryPoints = ["https"]
|
||||
[tls.certificate]
|
||||
certFile = "fixtures/acme/ssl/wildcard.crt"
|
||||
keyFile = "fixtures/acme/ssl/wildcard.key"
|
||||
[tls.certificate]
|
||||
certFile = "fixtures/acme/ssl/wildcard.crt"
|
||||
keyFile = "fixtures/acme/ssl/wildcard.key"
|
||||
4
integration/resources/compose/minimal.yml
Normal file
4
integration/resources/compose/minimal.yml
Normal file
@@ -0,0 +1,4 @@
|
||||
whoami1:
|
||||
image: emilevauge/whoami
|
||||
labels:
|
||||
- traefik.frontend.rule=PathPrefix:/whoami
|
||||
@@ -16,9 +16,6 @@ theme:
|
||||
include_sidebar: true
|
||||
favicon: img/traefik.icon.png
|
||||
logo: img/traefik.logo.png
|
||||
palette:
|
||||
primary: 'blue'
|
||||
accent: 'light blue'
|
||||
feature:
|
||||
tabs: false
|
||||
palette:
|
||||
@@ -83,7 +80,7 @@ pages:
|
||||
- 'Backend: Mesos': 'configuration/backends/mesos.md'
|
||||
- 'Backend: Rancher': 'configuration/backends/rancher.md'
|
||||
- 'Backend: Rest': 'configuration/backends/rest.md'
|
||||
- 'Backend: Service Fabric': 'configuration/backends/servicefabric.md'
|
||||
- 'Backend: Azure Service Fabric': 'configuration/backends/servicefabric.md'
|
||||
- 'Backend: Zookeeper': 'configuration/backends/zookeeper.md'
|
||||
- 'API / Dashboard': 'configuration/api.md'
|
||||
- 'Ping': 'configuration/ping.md'
|
||||
|
||||
@@ -6,6 +6,7 @@ import (
|
||||
"sort"
|
||||
"strconv"
|
||||
"strings"
|
||||
"sync"
|
||||
"text/template"
|
||||
"time"
|
||||
|
||||
@@ -135,7 +136,7 @@ func getChangedIntKeys(currState []int, prevState []int) ([]int, []int) {
|
||||
return fun.Keys(addedKeys).([]int), fun.Keys(removedKeys).([]int)
|
||||
}
|
||||
|
||||
func (p *CatalogProvider) watchHealthState(stopCh <-chan struct{}, watchCh chan<- map[string][]string, errorCh chan<- error) {
|
||||
func (p *CatalogProvider) watchHealthState(stopCh <-chan struct{}, watchCh chan<- map[string][]string, notifyError func(error)) {
|
||||
health := p.client.Health()
|
||||
catalog := p.client.Catalog()
|
||||
|
||||
@@ -156,7 +157,7 @@ func (p *CatalogProvider) watchHealthState(stopCh <-chan struct{}, watchCh chan<
|
||||
healthyState, meta, err := health.State("passing", options)
|
||||
if err != nil {
|
||||
log.WithError(err).Error("Failed to retrieve health checks")
|
||||
errorCh <- err
|
||||
notifyError(err)
|
||||
return
|
||||
}
|
||||
|
||||
@@ -180,7 +181,7 @@ func (p *CatalogProvider) watchHealthState(stopCh <-chan struct{}, watchCh chan<
|
||||
data, _, err := catalog.Services(&api.QueryOptions{})
|
||||
if err != nil {
|
||||
log.Errorf("Failed to list services: %s", err)
|
||||
errorCh <- err
|
||||
notifyError(err)
|
||||
return
|
||||
}
|
||||
|
||||
@@ -214,7 +215,7 @@ type Service struct {
|
||||
Ports []int
|
||||
}
|
||||
|
||||
func (p *CatalogProvider) watchCatalogServices(stopCh <-chan struct{}, watchCh chan<- map[string][]string, errorCh chan<- error) {
|
||||
func (p *CatalogProvider) watchCatalogServices(stopCh <-chan struct{}, watchCh chan<- map[string][]string, notifyError func(error)) {
|
||||
catalog := p.client.Catalog()
|
||||
|
||||
safe.Go(func() {
|
||||
@@ -233,7 +234,7 @@ func (p *CatalogProvider) watchCatalogServices(stopCh <-chan struct{}, watchCh c
|
||||
data, meta, err := catalog.Services(options)
|
||||
if err != nil {
|
||||
log.Errorf("Failed to list services: %s", err)
|
||||
errorCh <- err
|
||||
notifyError(err)
|
||||
return
|
||||
}
|
||||
|
||||
@@ -249,7 +250,7 @@ func (p *CatalogProvider) watchCatalogServices(stopCh <-chan struct{}, watchCh c
|
||||
nodes, _, err := catalog.Service(key, "", &api.QueryOptions{})
|
||||
if err != nil {
|
||||
log.Errorf("Failed to get detail of service %s: %s", key, err)
|
||||
errorCh <- err
|
||||
notifyError(err)
|
||||
return
|
||||
}
|
||||
nodesID := getServiceIds(nodes)
|
||||
@@ -572,8 +573,15 @@ func (p *CatalogProvider) watch(configurationChan chan<- types.ConfigMessage, st
|
||||
watchCh := make(chan map[string][]string)
|
||||
errorCh := make(chan error)
|
||||
|
||||
p.watchHealthState(stopCh, watchCh, errorCh)
|
||||
p.watchCatalogServices(stopCh, watchCh, errorCh)
|
||||
var errorOnce sync.Once
|
||||
notifyError := func(err error) {
|
||||
errorOnce.Do(func() {
|
||||
errorCh <- err
|
||||
})
|
||||
}
|
||||
|
||||
p.watchHealthState(stopCh, watchCh, notifyError)
|
||||
p.watchCatalogServices(stopCh, watchCh, notifyError)
|
||||
|
||||
defer close(stopCh)
|
||||
defer close(watchCh)
|
||||
|
||||
@@ -369,11 +369,12 @@ func (p *Provider) loadDockerConfig(containersInspected []dockerData) *types.Con
|
||||
servers := map[string][]dockerData{}
|
||||
serviceNames := make(map[string]struct{})
|
||||
for idx, container := range filteredContainers {
|
||||
if _, exists := serviceNames[container.ServiceName]; !exists {
|
||||
serviceNameKey := getServiceNameKey(container, p.SwarmMode)
|
||||
if _, exists := serviceNames[serviceNameKey]; !exists {
|
||||
frontendName := p.getFrontendName(container, idx)
|
||||
frontends[frontendName] = append(frontends[frontendName], container)
|
||||
if len(container.ServiceName) > 0 {
|
||||
serviceNames[container.ServiceName] = struct{}{}
|
||||
if len(serviceNameKey) > 0 {
|
||||
serviceNames[serviceNameKey] = struct{}{}
|
||||
}
|
||||
}
|
||||
backendName := getBackend(container)
|
||||
@@ -403,6 +404,16 @@ func (p *Provider) loadDockerConfig(containersInspected []dockerData) *types.Con
|
||||
return configuration
|
||||
}
|
||||
|
||||
func getServiceNameKey(container dockerData, swarmMode bool) string {
|
||||
serviceNameKey := container.ServiceName
|
||||
|
||||
if len(container.Labels[labelDockerComposeProject]) > 0 && len(container.Labels[labelDockerComposeService]) > 0 && !swarmMode {
|
||||
serviceNameKey = container.Labels[labelDockerComposeService] + container.Labels[labelDockerComposeProject]
|
||||
}
|
||||
|
||||
return serviceNameKey
|
||||
}
|
||||
|
||||
// Regexp used to extract the name of the service and the name of the property for this service
|
||||
// All properties are under the format traefik.<servicename>.frontent.*= except the port/weight/protocol directly after traefik.<servicename>.
|
||||
var servicesPropertiesRegexp = regexp.MustCompile(`^traefik\.(?P<service_name>.+?)\.(?P<property_name>port|weight|protocol|frontend\.(.*))$`)
|
||||
|
||||
@@ -852,6 +852,94 @@ func TestDockerLoadDockerConfig(t *testing.T) {
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
containers: []docker.ContainerJSON{
|
||||
containerJSON(
|
||||
name("test_0"),
|
||||
labels(map[string]string{
|
||||
labelDockerComposeProject: "myProject",
|
||||
labelDockerComposeService: "myService",
|
||||
}),
|
||||
ports(nat.PortMap{
|
||||
"80/tcp": {},
|
||||
}),
|
||||
withNetwork("bridge", ipv4("127.0.0.1")),
|
||||
),
|
||||
containerJSON(
|
||||
name("test_1"),
|
||||
labels(map[string]string{
|
||||
labelDockerComposeProject: "myProject",
|
||||
labelDockerComposeService: "myService",
|
||||
}),
|
||||
|
||||
ports(nat.PortMap{
|
||||
"80/tcp": {},
|
||||
}),
|
||||
|
||||
withNetwork("bridge", ipv4("127.0.0.2")),
|
||||
),
|
||||
containerJSON(
|
||||
name("test_2"),
|
||||
labels(map[string]string{
|
||||
labelDockerComposeProject: "myProject",
|
||||
labelDockerComposeService: "myService2",
|
||||
}),
|
||||
|
||||
ports(nat.PortMap{
|
||||
"80/tcp": {},
|
||||
}),
|
||||
|
||||
withNetwork("bridge", ipv4("127.0.0.3")),
|
||||
),
|
||||
},
|
||||
expectedFrontends: map[string]*types.Frontend{
|
||||
"frontend-Host-myService-myProject-docker-localhost-0": {
|
||||
Backend: "backend-myService-myProject",
|
||||
PassHostHeader: true,
|
||||
EntryPoints: []string{},
|
||||
BasicAuth: []string{},
|
||||
Routes: map[string]types.Route{
|
||||
"route-frontend-Host-myService-myProject-docker-localhost-0": {
|
||||
Rule: "Host:myService.myProject.docker.localhost",
|
||||
},
|
||||
},
|
||||
},
|
||||
"frontend-Host-myService2-myProject-docker-localhost-2": {
|
||||
Backend: "backend-myService2-myProject",
|
||||
PassHostHeader: true,
|
||||
EntryPoints: []string{},
|
||||
BasicAuth: []string{},
|
||||
Routes: map[string]types.Route{
|
||||
"route-frontend-Host-myService2-myProject-docker-localhost-2": {
|
||||
Rule: "Host:myService2.myProject.docker.localhost",
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
expectedBackends: map[string]*types.Backend{
|
||||
"backend-myService2-myProject": {
|
||||
Servers: map[string]types.Server{
|
||||
"server-test_2": {
|
||||
URL: "http://127.0.0.3:80",
|
||||
Weight: 0,
|
||||
},
|
||||
},
|
||||
CircuitBreaker: nil,
|
||||
},
|
||||
"backend-myService-myProject": {
|
||||
Servers: map[string]types.Server{
|
||||
"server-test_0": {
|
||||
URL: "http://127.0.0.1:80",
|
||||
Weight: 0,
|
||||
}, "server-test_1": {
|
||||
URL: "http://127.0.0.2:80",
|
||||
Weight: 0,
|
||||
},
|
||||
},
|
||||
CircuitBreaker: nil,
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
for caseID, test := range testCases {
|
||||
|
||||
@@ -308,12 +308,12 @@ func (p *Provider) listInstances(ctx context.Context, client *awsClient) ([]ecsI
|
||||
byTaskDefinition := make(map[string]int)
|
||||
|
||||
for _, task := range tasks {
|
||||
if _, found := byContainerInstance[*task.ContainerInstanceArn]; !found {
|
||||
byContainerInstance[*task.ContainerInstanceArn] = len(containerInstanceArns)
|
||||
if _, found := byContainerInstance[aws.StringValue(task.ContainerInstanceArn)]; !found {
|
||||
byContainerInstance[aws.StringValue(task.ContainerInstanceArn)] = len(containerInstanceArns)
|
||||
containerInstanceArns = append(containerInstanceArns, task.ContainerInstanceArn)
|
||||
}
|
||||
if _, found := byTaskDefinition[*task.TaskDefinitionArn]; !found {
|
||||
byTaskDefinition[*task.TaskDefinitionArn] = len(taskDefinitionArns)
|
||||
if _, found := byTaskDefinition[aws.StringValue(task.TaskDefinitionArn)]; !found {
|
||||
byTaskDefinition[aws.StringValue(task.TaskDefinitionArn)] = len(taskDefinitionArns)
|
||||
taskDefinitionArns = append(taskDefinitionArns, task.TaskDefinitionArn)
|
||||
}
|
||||
}
|
||||
@@ -327,11 +327,10 @@ func (p *Provider) listInstances(ctx context.Context, client *awsClient) ([]ecsI
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
for _, task := range tasks {
|
||||
|
||||
machineIdx := byContainerInstance[*task.ContainerInstanceArn]
|
||||
taskDefIdx := byTaskDefinition[*task.TaskDefinitionArn]
|
||||
machineIdx := byContainerInstance[aws.StringValue(task.ContainerInstanceArn)]
|
||||
taskDefIdx := byTaskDefinition[aws.StringValue(task.TaskDefinitionArn)]
|
||||
|
||||
for _, container := range task.Containers {
|
||||
|
||||
@@ -345,8 +344,8 @@ func (p *Provider) listInstances(ctx context.Context, client *awsClient) ([]ecsI
|
||||
}
|
||||
|
||||
instances = append(instances, ecsInstance{
|
||||
fmt.Sprintf("%s-%s", strings.Replace(*task.Group, ":", "-", 1), *container.Name),
|
||||
(*task.TaskArn)[len(*task.TaskArn)-12:],
|
||||
fmt.Sprintf("%s-%s", strings.Replace(aws.StringValue(task.Group), ":", "-", 1), *container.Name),
|
||||
(aws.StringValue(task.TaskArn))[len(aws.StringValue(task.TaskArn))-12:],
|
||||
task,
|
||||
taskDefinition,
|
||||
container,
|
||||
@@ -381,7 +380,7 @@ func (p *Provider) lookupEc2Instances(ctx context.Context, client *awsClient, cl
|
||||
|
||||
containerResp := req.Data.(*ecs.DescribeContainerInstancesOutput)
|
||||
for i, container := range containerResp.ContainerInstances {
|
||||
order[*container.Ec2InstanceId] = order[*container.ContainerInstanceArn]
|
||||
order[aws.StringValue(container.Ec2InstanceId)] = order[aws.StringValue(container.ContainerInstanceArn)]
|
||||
instanceIds[i] = container.Ec2InstanceId
|
||||
}
|
||||
}
|
||||
@@ -399,7 +398,7 @@ func (p *Provider) lookupEc2Instances(ctx context.Context, client *awsClient, cl
|
||||
for _, r := range instancesResp.Reservations {
|
||||
for _, i := range r.Instances {
|
||||
if i.InstanceId != nil {
|
||||
instances[order[*i.InstanceId]] = i
|
||||
instances[order[aws.StringValue(i.InstanceId)]] = i
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -426,7 +425,7 @@ func (p *Provider) lookupTaskDefinitions(ctx context.Context, client *awsClient,
|
||||
|
||||
func (p *Provider) label(i ecsInstance, k string) string {
|
||||
if v, found := i.containerDefinition.DockerLabels[k]; found {
|
||||
return *v
|
||||
return aws.StringValue(v)
|
||||
}
|
||||
return ""
|
||||
}
|
||||
@@ -565,14 +564,14 @@ func (p *Provider) getProtocol(i ecsInstance) string {
|
||||
}
|
||||
|
||||
func (p *Provider) getHost(i ecsInstance) string {
|
||||
return *i.machine.PrivateIpAddress
|
||||
return aws.StringValue(i.machine.PrivateIpAddress)
|
||||
}
|
||||
|
||||
func (p *Provider) getPort(i ecsInstance) string {
|
||||
if port := p.label(i, types.LabelPort); port != "" {
|
||||
return port
|
||||
}
|
||||
return strconv.FormatInt(*i.container.NetworkBindings[0].HostPort, 10)
|
||||
return strconv.FormatInt(aws.Int64Value(i.container.NetworkBindings[0].HostPort), 10)
|
||||
}
|
||||
|
||||
func (p *Provider) getWeight(i ecsInstance) string {
|
||||
|
||||
@@ -283,7 +283,7 @@ func containerFilter(name, healthState, state string) bool {
|
||||
return false
|
||||
}
|
||||
|
||||
if state != "" && state != "running" && state != "updating-running" {
|
||||
if state != "" && state != "running" && state != "updating-running" && state != "upgraded" {
|
||||
log.Debugf("Filtering container %s with state of %s", name, state)
|
||||
return false
|
||||
}
|
||||
@@ -319,7 +319,7 @@ func (p *Provider) serviceFilter(service rancherData) bool {
|
||||
return false
|
||||
}
|
||||
|
||||
if service.State != "" && service.State != "active" && service.State != "updating-active" && service.State != "upgraded" {
|
||||
if service.State != "" && service.State != "active" && service.State != "updating-active" && service.State != "upgraded" && service.State != "upgrading" {
|
||||
log.Debugf("Filtering service %s with state of %s", service.Name, service.State)
|
||||
return false
|
||||
}
|
||||
|
||||
@@ -1,4 +1,39 @@
|
||||
mkdocs>=0.17.2
|
||||
pymdown-extensions>=1.4
|
||||
mkdocs-bootswatch>=0.4.0
|
||||
mkdocs-material>=2.2.6
|
||||
mkdocs==0.17.5
|
||||
pymdown-extensions==4.12
|
||||
mkdocs-bootswatch==0.5.0
|
||||
mkdocs-material==2.9.4
|
||||
|
||||
appdirs==1.4.4
|
||||
CacheControl==0.12.6
|
||||
certifi==2020.12.5
|
||||
chardet==4.0.0
|
||||
click==8.1.3
|
||||
colorama==0.4.4
|
||||
contextlib2==0.6.0
|
||||
distlib==0.3.1
|
||||
distro==1.5.0
|
||||
html5lib==1.1
|
||||
idna==3.2
|
||||
importlib-metadata==4.12.0
|
||||
Jinja2==3.1.2
|
||||
livereload==2.6.3
|
||||
lockfile==0.12.2
|
||||
Markdown==3.3.7
|
||||
MarkupSafe==2.1.1
|
||||
msgpack==1.0.2
|
||||
ordered-set==4.0.2
|
||||
packaging==20.9
|
||||
pep517==0.10.0
|
||||
progress==1.5
|
||||
Pygments==2.12.0
|
||||
pymdown-extensions==4.12
|
||||
pyparsing==2.4.7
|
||||
PyYAML==6.0
|
||||
requests==2.25.1
|
||||
retrying==1.3.3
|
||||
six==1.15.0
|
||||
toml==0.10.2
|
||||
tornado==4.5.3
|
||||
urllib3==1.26.5
|
||||
webencodings==0.5.1
|
||||
zipp==3.8.1
|
||||
|
||||
@@ -367,7 +367,7 @@ func (s *Server) preLoadConfiguration(configMsg types.ConfigMessage) {
|
||||
providerConfigUpdateCh = make(chan types.ConfigMessage)
|
||||
s.providerConfigUpdateMap[configMsg.ProviderName] = providerConfigUpdateCh
|
||||
s.routinesPool.Go(func(stop chan bool) {
|
||||
throttleProviderConfigReload(providersThrottleDuration, s.configurationValidatedChan, providerConfigUpdateCh, stop)
|
||||
s.throttleProviderConfigReload(providersThrottleDuration, s.configurationValidatedChan, providerConfigUpdateCh, stop)
|
||||
})
|
||||
}
|
||||
providerConfigUpdateCh <- configMsg
|
||||
@@ -378,11 +378,11 @@ func (s *Server) preLoadConfiguration(configMsg types.ConfigMessage) {
|
||||
// It will immediately publish a new configuration and then only publish the next configuration after the throttle duration.
|
||||
// Note that in the case it receives N new configs in the timeframe of the throttle duration after publishing,
|
||||
// it will publish the last of the newly received configurations.
|
||||
func throttleProviderConfigReload(throttle time.Duration, publish chan<- types.ConfigMessage, in <-chan types.ConfigMessage, stop chan bool) {
|
||||
func (s *Server) throttleProviderConfigReload(throttle time.Duration, publish chan<- types.ConfigMessage, in <-chan types.ConfigMessage, stop chan bool) {
|
||||
ring := channels.NewRingChannel(1)
|
||||
defer ring.Close()
|
||||
|
||||
safe.Go(func() {
|
||||
s.routinesPool.Go(func(stop chan bool) {
|
||||
for {
|
||||
select {
|
||||
case <-stop:
|
||||
|
||||
@@ -297,7 +297,10 @@ func TestThrottleProviderConfigReload(t *testing.T) {
|
||||
stop <- true
|
||||
}()
|
||||
|
||||
go throttleProviderConfigReload(throttleDuration, publishConfig, providerConfig, stop)
|
||||
globalConfig := configuration.GlobalConfiguration{}
|
||||
server := NewServer(globalConfig)
|
||||
|
||||
go server.throttleProviderConfigReload(throttleDuration, publishConfig, providerConfig, stop)
|
||||
|
||||
publishedConfigCount := 0
|
||||
stopConsumeConfigs := make(chan bool)
|
||||
|
||||
@@ -53,7 +53,13 @@
|
||||
{{$entryPoints := GetList . "/entrypoints"}}
|
||||
[frontends."{{$frontend}}"]
|
||||
backend = "{{Get "" . "/backend"}}"
|
||||
{{ $passHostHeader := Get "" . "/passhostheader"}}
|
||||
{{if $passHostHeader}}
|
||||
passHostHeader = {{ $passHostHeader }}
|
||||
{{else}}
|
||||
# keep for compatibility reason
|
||||
passHostHeader = {{Get "true" . "/passHostHeader"}}
|
||||
{{end}}
|
||||
priority = {{Get "0" . "/priority"}}
|
||||
entryPoints = [{{range $entryPoints}}
|
||||
"{{.}}",
|
||||
|
||||
@@ -71,7 +71,7 @@ func derCert(privKey *rsa.PrivateKey, expiration time.Time, domain string) ([]by
|
||||
}
|
||||
|
||||
if expiration.IsZero() {
|
||||
expiration = time.Now().Add(365)
|
||||
expiration = time.Now().Add(365 * (24 * time.Hour))
|
||||
}
|
||||
|
||||
template := x509.Certificate{
|
||||
|
||||
@@ -14,7 +14,7 @@
|
||||
# Optional
|
||||
# Default: "ERROR"
|
||||
#
|
||||
# logLevel = "ERROR"
|
||||
# logLevel = "DEBUG"
|
||||
|
||||
# Entrypoints to be used by frontends that do not specify any entrypoint.
|
||||
# Each frontend can specify its own entrypoints.
|
||||
@@ -24,6 +24,10 @@
|
||||
#
|
||||
# defaultEntryPoints = ["http", "https"]
|
||||
|
||||
################################################################
|
||||
# Entrypoints configuration
|
||||
################################################################
|
||||
|
||||
# Entrypoints definition
|
||||
#
|
||||
# Optional
|
||||
@@ -32,6 +36,10 @@
|
||||
[entryPoints.http]
|
||||
address = ":80"
|
||||
|
||||
################################################################
|
||||
# Traefik logs configuration
|
||||
################################################################
|
||||
|
||||
# Traefik logs
|
||||
# Enabled by default and log to stdout
|
||||
#
|
||||
@@ -54,6 +62,10 @@
|
||||
#
|
||||
# format = "common"
|
||||
|
||||
################################################################
|
||||
# Access logs configuration
|
||||
################################################################
|
||||
|
||||
# Enable access logs
|
||||
# By default it will write to stdout and produce logs in the textual
|
||||
# Common Log Format (CLF), extended with additional fields.
|
||||
@@ -78,17 +90,39 @@
|
||||
# format = "common"
|
||||
|
||||
################################################################
|
||||
# Web configuration backend
|
||||
# API and dashboard configuration
|
||||
################################################################
|
||||
|
||||
# Enable web configuration backend
|
||||
[web]
|
||||
# Enable API and dashboard
|
||||
[api]
|
||||
|
||||
# Web administration port
|
||||
#
|
||||
# Required
|
||||
#
|
||||
address = ":8080"
|
||||
# Name of the related entry point
|
||||
#
|
||||
# Optional
|
||||
# Default: "traefik"
|
||||
#
|
||||
# entryPoint = "traefik"
|
||||
|
||||
# Enabled Dashboard
|
||||
#
|
||||
# Optional
|
||||
# Default: true
|
||||
#
|
||||
# dashboard = false
|
||||
|
||||
################################################################
|
||||
# Ping configuration
|
||||
################################################################
|
||||
|
||||
# Enable ping
|
||||
[ping]
|
||||
|
||||
# Name of the related entry point
|
||||
#
|
||||
# Optional
|
||||
# Default: "traefik"
|
||||
#
|
||||
# entryPoint = "traefik"
|
||||
|
||||
################################################################
|
||||
# Docker configuration backend
|
||||
|
||||
14
vendor/github.com/containous/flaeg/parse/parse.go
generated
vendored
14
vendor/github.com/containous/flaeg/parse/parse.go
generated
vendored
@@ -1,6 +1,7 @@
|
||||
package parse
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"flag"
|
||||
"fmt"
|
||||
"reflect"
|
||||
@@ -203,6 +204,11 @@ func (d *Duration) UnmarshalText(text []byte) error {
|
||||
return d.Set(string(text))
|
||||
}
|
||||
|
||||
// MarshalJSON serializes the given duration value.
|
||||
func (d *Duration) MarshalJSON() ([]byte, error) {
|
||||
return json.Marshal(time.Duration(*d))
|
||||
}
|
||||
|
||||
// UnmarshalJSON deserializes the given text into a duration value.
|
||||
func (d *Duration) UnmarshalJSON(text []byte) error {
|
||||
if v, err := strconv.Atoi(string(text)); err == nil {
|
||||
@@ -210,7 +216,13 @@ func (d *Duration) UnmarshalJSON(text []byte) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
v, err := time.ParseDuration(string(text))
|
||||
// We use json unmarshal on value because we have the quoted version
|
||||
var value string
|
||||
err := json.Unmarshal(text, &value)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
v, err := time.ParseDuration(value)
|
||||
*d = Duration(v)
|
||||
return err
|
||||
}
|
||||
|
||||
16
vendor/github.com/containous/traefik-extra-service-fabric/servicefabric_tmpl.go
generated
vendored
16
vendor/github.com/containous/traefik-extra-service-fabric/servicefabric_tmpl.go
generated
vendored
@@ -58,7 +58,7 @@ const tmpl = `
|
||||
{{range $replica := $partition.Replicas}}
|
||||
{{if isPrimary $replica}}
|
||||
|
||||
{{$backendName := getBackendName $service.Name $partition}}
|
||||
{{$backendName := getBackendName $service $partition}}
|
||||
[backends."{{$backendName}}".servers."{{$replica.ID}}"]
|
||||
url = "{{getDefaultEndpoint $replica}}"
|
||||
weight = 1
|
||||
@@ -126,13 +126,15 @@ const tmpl = `
|
||||
{{range $partition := $service.Partitions}}
|
||||
{{$partitionId := $partition.PartitionInformation.ID}}
|
||||
|
||||
{{if hasLabel $service "frontend.rule"}}
|
||||
[frontends."{{$service.Name}}/{{$partitionId}}"]
|
||||
backend = "{{getBackendName $service.Name $partition}}"
|
||||
[frontends."{{$service.Name}}/{{$partitionId}}".routes.default]
|
||||
rule = {{getLabelValue $service "frontend.rule.partition.$partitionId" ""}}
|
||||
{{ $rule := getLabelValue $service (print "frontend.rule.partition." $partitionId) "" }}
|
||||
{{if $rule }}
|
||||
[frontends."{{ $service.Name }}/{{ $partitionId }}"]
|
||||
backend = "{{ getBackendName $service $partition }}"
|
||||
|
||||
[frontends."{{ $service.Name }}/{{ $partitionId }}".routes.default]
|
||||
rule = "{{ $rule }}"
|
||||
{{end}}
|
||||
|
||||
{{end}}
|
||||
{{end}}
|
||||
{{end}}
|
||||
{{end}}
|
||||
|
||||
152
vendor/github.com/gorilla/websocket/client.go
generated
vendored
152
vendor/github.com/gorilla/websocket/client.go
generated
vendored
@@ -5,10 +5,8 @@
|
||||
package websocket
|
||||
|
||||
import (
|
||||
"bufio"
|
||||
"bytes"
|
||||
"crypto/tls"
|
||||
"encoding/base64"
|
||||
"errors"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
@@ -88,50 +86,6 @@ type Dialer struct {
|
||||
|
||||
var errMalformedURL = errors.New("malformed ws or wss URL")
|
||||
|
||||
// parseURL parses the URL.
|
||||
//
|
||||
// This function is a replacement for the standard library url.Parse function.
|
||||
// In Go 1.4 and earlier, url.Parse loses information from the path.
|
||||
func parseURL(s string) (*url.URL, error) {
|
||||
// From the RFC:
|
||||
//
|
||||
// ws-URI = "ws:" "//" host [ ":" port ] path [ "?" query ]
|
||||
// wss-URI = "wss:" "//" host [ ":" port ] path [ "?" query ]
|
||||
var u url.URL
|
||||
switch {
|
||||
case strings.HasPrefix(s, "ws://"):
|
||||
u.Scheme = "ws"
|
||||
s = s[len("ws://"):]
|
||||
case strings.HasPrefix(s, "wss://"):
|
||||
u.Scheme = "wss"
|
||||
s = s[len("wss://"):]
|
||||
default:
|
||||
return nil, errMalformedURL
|
||||
}
|
||||
|
||||
if i := strings.Index(s, "?"); i >= 0 {
|
||||
u.RawQuery = s[i+1:]
|
||||
s = s[:i]
|
||||
}
|
||||
|
||||
if i := strings.Index(s, "/"); i >= 0 {
|
||||
u.Opaque = s[i:]
|
||||
s = s[:i]
|
||||
} else {
|
||||
u.Opaque = "/"
|
||||
}
|
||||
|
||||
u.Host = s
|
||||
|
||||
if strings.Contains(u.Host, "@") {
|
||||
// Don't bother parsing user information because user information is
|
||||
// not allowed in websocket URIs.
|
||||
return nil, errMalformedURL
|
||||
}
|
||||
|
||||
return &u, nil
|
||||
}
|
||||
|
||||
func hostPortNoPort(u *url.URL) (hostPort, hostNoPort string) {
|
||||
hostPort = u.Host
|
||||
hostNoPort = u.Host
|
||||
@@ -150,11 +104,15 @@ func hostPortNoPort(u *url.URL) (hostPort, hostNoPort string) {
|
||||
return hostPort, hostNoPort
|
||||
}
|
||||
|
||||
// DefaultDialer is a dialer with all fields set to the default zero values.
|
||||
// DefaultDialer is a dialer with all fields set to the default values.
|
||||
var DefaultDialer = &Dialer{
|
||||
Proxy: http.ProxyFromEnvironment,
|
||||
Proxy: http.ProxyFromEnvironment,
|
||||
HandshakeTimeout: 45 * time.Second,
|
||||
}
|
||||
|
||||
// nilDialer is dialer to use when receiver is nil.
|
||||
var nilDialer Dialer = *DefaultDialer
|
||||
|
||||
// Dial creates a new client connection. Use requestHeader to specify the
|
||||
// origin (Origin), subprotocols (Sec-WebSocket-Protocol) and cookies (Cookie).
|
||||
// Use the response.Header to get the selected subprotocol
|
||||
@@ -167,9 +125,7 @@ var DefaultDialer = &Dialer{
|
||||
func (d *Dialer) Dial(urlStr string, requestHeader http.Header) (*Conn, *http.Response, error) {
|
||||
|
||||
if d == nil {
|
||||
d = &Dialer{
|
||||
Proxy: http.ProxyFromEnvironment,
|
||||
}
|
||||
d = &nilDialer
|
||||
}
|
||||
|
||||
challengeKey, err := generateChallengeKey()
|
||||
@@ -177,7 +133,7 @@ func (d *Dialer) Dial(urlStr string, requestHeader http.Header) (*Conn, *http.Re
|
||||
return nil, nil, err
|
||||
}
|
||||
|
||||
u, err := parseURL(urlStr)
|
||||
u, err := url.Parse(urlStr)
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
@@ -237,6 +193,8 @@ func (d *Dialer) Dial(urlStr string, requestHeader http.Header) (*Conn, *http.Re
|
||||
k == "Sec-Websocket-Extensions" ||
|
||||
(k == "Sec-Websocket-Protocol" && len(d.Subprotocols) > 0):
|
||||
return nil, nil, errors.New("websocket: duplicate header not allowed: " + k)
|
||||
case k == "Sec-Websocket-Protocol":
|
||||
req.Header["Sec-WebSocket-Protocol"] = vs
|
||||
default:
|
||||
req.Header[k] = vs
|
||||
}
|
||||
@@ -246,36 +204,52 @@ func (d *Dialer) Dial(urlStr string, requestHeader http.Header) (*Conn, *http.Re
|
||||
req.Header.Set("Sec-Websocket-Extensions", "permessage-deflate; server_no_context_takeover; client_no_context_takeover")
|
||||
}
|
||||
|
||||
hostPort, hostNoPort := hostPortNoPort(u)
|
||||
|
||||
var proxyURL *url.URL
|
||||
// Check wether the proxy method has been configured
|
||||
if d.Proxy != nil {
|
||||
proxyURL, err = d.Proxy(req)
|
||||
}
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
|
||||
var targetHostPort string
|
||||
if proxyURL != nil {
|
||||
targetHostPort, _ = hostPortNoPort(proxyURL)
|
||||
} else {
|
||||
targetHostPort = hostPort
|
||||
}
|
||||
|
||||
var deadline time.Time
|
||||
if d.HandshakeTimeout != 0 {
|
||||
deadline = time.Now().Add(d.HandshakeTimeout)
|
||||
}
|
||||
|
||||
// Get network dial function.
|
||||
netDial := d.NetDial
|
||||
if netDial == nil {
|
||||
netDialer := &net.Dialer{Deadline: deadline}
|
||||
netDial = netDialer.Dial
|
||||
}
|
||||
|
||||
netConn, err := netDial("tcp", targetHostPort)
|
||||
// If needed, wrap the dial function to set the connection deadline.
|
||||
if !deadline.Equal(time.Time{}) {
|
||||
forwardDial := netDial
|
||||
netDial = func(network, addr string) (net.Conn, error) {
|
||||
c, err := forwardDial(network, addr)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
err = c.SetDeadline(deadline)
|
||||
if err != nil {
|
||||
c.Close()
|
||||
return nil, err
|
||||
}
|
||||
return c, nil
|
||||
}
|
||||
}
|
||||
|
||||
// If needed, wrap the dial function to connect through a proxy.
|
||||
if d.Proxy != nil {
|
||||
proxyURL, err := d.Proxy(req)
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
if proxyURL != nil {
|
||||
dialer, err := proxy_FromURL(proxyURL, netDialerFunc(netDial))
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
netDial = dialer.Dial
|
||||
}
|
||||
}
|
||||
|
||||
hostPort, hostNoPort := hostPortNoPort(u)
|
||||
netConn, err := netDial("tcp", hostPort)
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
@@ -286,42 +260,6 @@ func (d *Dialer) Dial(urlStr string, requestHeader http.Header) (*Conn, *http.Re
|
||||
}
|
||||
}()
|
||||
|
||||
if err := netConn.SetDeadline(deadline); err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
|
||||
if proxyURL != nil {
|
||||
connectHeader := make(http.Header)
|
||||
if user := proxyURL.User; user != nil {
|
||||
proxyUser := user.Username()
|
||||
if proxyPassword, passwordSet := user.Password(); passwordSet {
|
||||
credential := base64.StdEncoding.EncodeToString([]byte(proxyUser + ":" + proxyPassword))
|
||||
connectHeader.Set("Proxy-Authorization", "Basic "+credential)
|
||||
}
|
||||
}
|
||||
connectReq := &http.Request{
|
||||
Method: "CONNECT",
|
||||
URL: &url.URL{Opaque: hostPort},
|
||||
Host: hostPort,
|
||||
Header: connectHeader,
|
||||
}
|
||||
|
||||
connectReq.Write(netConn)
|
||||
|
||||
// Read response.
|
||||
// Okay to use and discard buffered reader here, because
|
||||
// TLS server will not speak until spoken to.
|
||||
br := bufio.NewReader(netConn)
|
||||
resp, err := http.ReadResponse(br, connectReq)
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
if resp.StatusCode != 200 {
|
||||
f := strings.SplitN(resp.Status, " ", 2)
|
||||
return nil, nil, errors.New(f[1])
|
||||
}
|
||||
}
|
||||
|
||||
if u.Scheme == "https" {
|
||||
cfg := cloneTLSConfig(d.TLSClientConfig)
|
||||
if cfg.ServerName == "" {
|
||||
|
||||
70
vendor/github.com/gorilla/websocket/conn.go
generated
vendored
70
vendor/github.com/gorilla/websocket/conn.go
generated
vendored
@@ -76,7 +76,7 @@ const (
|
||||
// is UTF-8 encoded text.
|
||||
PingMessage = 9
|
||||
|
||||
// PongMessage denotes a ping control message. The optional message payload
|
||||
// PongMessage denotes a pong control message. The optional message payload
|
||||
// is UTF-8 encoded text.
|
||||
PongMessage = 10
|
||||
)
|
||||
@@ -100,9 +100,8 @@ func (e *netError) Error() string { return e.msg }
|
||||
func (e *netError) Temporary() bool { return e.temporary }
|
||||
func (e *netError) Timeout() bool { return e.timeout }
|
||||
|
||||
// CloseError represents close frame.
|
||||
// CloseError represents a close message.
|
||||
type CloseError struct {
|
||||
|
||||
// Code is defined in RFC 6455, section 11.7.
|
||||
Code int
|
||||
|
||||
@@ -343,7 +342,8 @@ func (c *Conn) Subprotocol() string {
|
||||
return c.subprotocol
|
||||
}
|
||||
|
||||
// Close closes the underlying network connection without sending or waiting for a close frame.
|
||||
// Close closes the underlying network connection without sending or waiting
|
||||
// for a close message.
|
||||
func (c *Conn) Close() error {
|
||||
return c.conn.Close()
|
||||
}
|
||||
@@ -370,7 +370,7 @@ func (c *Conn) writeFatal(err error) error {
|
||||
return err
|
||||
}
|
||||
|
||||
func (c *Conn) write(frameType int, deadline time.Time, bufs ...[]byte) error {
|
||||
func (c *Conn) write(frameType int, deadline time.Time, buf0, buf1 []byte) error {
|
||||
<-c.mu
|
||||
defer func() { c.mu <- true }()
|
||||
|
||||
@@ -382,15 +382,14 @@ func (c *Conn) write(frameType int, deadline time.Time, bufs ...[]byte) error {
|
||||
}
|
||||
|
||||
c.conn.SetWriteDeadline(deadline)
|
||||
for _, buf := range bufs {
|
||||
if len(buf) > 0 {
|
||||
_, err := c.conn.Write(buf)
|
||||
if err != nil {
|
||||
return c.writeFatal(err)
|
||||
}
|
||||
}
|
||||
if len(buf1) == 0 {
|
||||
_, err = c.conn.Write(buf0)
|
||||
} else {
|
||||
err = c.writeBufs(buf0, buf1)
|
||||
}
|
||||
if err != nil {
|
||||
return c.writeFatal(err)
|
||||
}
|
||||
|
||||
if frameType == CloseMessage {
|
||||
c.writeFatal(ErrCloseSent)
|
||||
}
|
||||
@@ -484,6 +483,9 @@ func (c *Conn) prepWrite(messageType int) error {
|
||||
//
|
||||
// There can be at most one open writer on a connection. NextWriter closes the
|
||||
// previous writer if the application has not already done so.
|
||||
//
|
||||
// All message types (TextMessage, BinaryMessage, CloseMessage, PingMessage and
|
||||
// PongMessage) are supported.
|
||||
func (c *Conn) NextWriter(messageType int) (io.WriteCloser, error) {
|
||||
if err := c.prepWrite(messageType); err != nil {
|
||||
return nil, err
|
||||
@@ -764,7 +766,6 @@ func (c *Conn) SetWriteDeadline(t time.Time) error {
|
||||
// Read methods
|
||||
|
||||
func (c *Conn) advanceFrame() (int, error) {
|
||||
|
||||
// 1. Skip remainder of previous frame.
|
||||
|
||||
if c.readRemaining > 0 {
|
||||
@@ -1033,7 +1034,7 @@ func (c *Conn) SetReadDeadline(t time.Time) error {
|
||||
}
|
||||
|
||||
// SetReadLimit sets the maximum size for a message read from the peer. If a
|
||||
// message exceeds the limit, the connection sends a close frame to the peer
|
||||
// message exceeds the limit, the connection sends a close message to the peer
|
||||
// and returns ErrReadLimit to the application.
|
||||
func (c *Conn) SetReadLimit(limit int64) {
|
||||
c.readLimit = limit
|
||||
@@ -1046,24 +1047,22 @@ func (c *Conn) CloseHandler() func(code int, text string) error {
|
||||
|
||||
// SetCloseHandler sets the handler for close messages received from the peer.
|
||||
// The code argument to h is the received close code or CloseNoStatusReceived
|
||||
// if the close message is empty. The default close handler sends a close frame
|
||||
// back to the peer.
|
||||
// if the close message is empty. The default close handler sends a close
|
||||
// message back to the peer.
|
||||
//
|
||||
// The application must read the connection to process close messages as
|
||||
// described in the section on Control Frames above.
|
||||
// The handler function is called from the NextReader, ReadMessage and message
|
||||
// reader Read methods. The application must read the connection to process
|
||||
// close messages as described in the section on Control Messages above.
|
||||
//
|
||||
// The connection read methods return a CloseError when a close frame is
|
||||
// The connection read methods return a CloseError when a close message is
|
||||
// received. Most applications should handle close messages as part of their
|
||||
// normal error handling. Applications should only set a close handler when the
|
||||
// application must perform some action before sending a close frame back to
|
||||
// application must perform some action before sending a close message back to
|
||||
// the peer.
|
||||
func (c *Conn) SetCloseHandler(h func(code int, text string) error) {
|
||||
if h == nil {
|
||||
h = func(code int, text string) error {
|
||||
message := []byte{}
|
||||
if code != CloseNoStatusReceived {
|
||||
message = FormatCloseMessage(code, "")
|
||||
}
|
||||
message := FormatCloseMessage(code, "")
|
||||
c.WriteControl(CloseMessage, message, time.Now().Add(writeWait))
|
||||
return nil
|
||||
}
|
||||
@@ -1077,11 +1076,12 @@ func (c *Conn) PingHandler() func(appData string) error {
|
||||
}
|
||||
|
||||
// SetPingHandler sets the handler for ping messages received from the peer.
|
||||
// The appData argument to h is the PING frame application data. The default
|
||||
// The appData argument to h is the PING message application data. The default
|
||||
// ping handler sends a pong to the peer.
|
||||
//
|
||||
// The application must read the connection to process ping messages as
|
||||
// described in the section on Control Frames above.
|
||||
// The handler function is called from the NextReader, ReadMessage and message
|
||||
// reader Read methods. The application must read the connection to process
|
||||
// ping messages as described in the section on Control Messages above.
|
||||
func (c *Conn) SetPingHandler(h func(appData string) error) {
|
||||
if h == nil {
|
||||
h = func(message string) error {
|
||||
@@ -1103,11 +1103,12 @@ func (c *Conn) PongHandler() func(appData string) error {
|
||||
}
|
||||
|
||||
// SetPongHandler sets the handler for pong messages received from the peer.
|
||||
// The appData argument to h is the PONG frame application data. The default
|
||||
// The appData argument to h is the PONG message application data. The default
|
||||
// pong handler does nothing.
|
||||
//
|
||||
// The application must read the connection to process ping messages as
|
||||
// described in the section on Control Frames above.
|
||||
// The handler function is called from the NextReader, ReadMessage and message
|
||||
// reader Read methods. The application must read the connection to process
|
||||
// pong messages as described in the section on Control Messages above.
|
||||
func (c *Conn) SetPongHandler(h func(appData string) error) {
|
||||
if h == nil {
|
||||
h = func(string) error { return nil }
|
||||
@@ -1141,7 +1142,14 @@ func (c *Conn) SetCompressionLevel(level int) error {
|
||||
}
|
||||
|
||||
// FormatCloseMessage formats closeCode and text as a WebSocket close message.
|
||||
// An empty message is returned for code CloseNoStatusReceived.
|
||||
func FormatCloseMessage(closeCode int, text string) []byte {
|
||||
if closeCode == CloseNoStatusReceived {
|
||||
// Return empty message because it's illegal to send
|
||||
// CloseNoStatusReceived. Return non-nil value in case application
|
||||
// checks for nil.
|
||||
return []byte{}
|
||||
}
|
||||
buf := make([]byte, 2+len(text))
|
||||
binary.BigEndian.PutUint16(buf, uint16(closeCode))
|
||||
copy(buf[2:], text)
|
||||
|
||||
15
vendor/github.com/gorilla/websocket/conn_write.go
generated
vendored
Normal file
15
vendor/github.com/gorilla/websocket/conn_write.go
generated
vendored
Normal file
@@ -0,0 +1,15 @@
|
||||
// Copyright 2016 The Gorilla WebSocket Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
// +build go1.8
|
||||
|
||||
package websocket
|
||||
|
||||
import "net"
|
||||
|
||||
func (c *Conn) writeBufs(bufs ...[]byte) error {
|
||||
b := net.Buffers(bufs)
|
||||
_, err := b.WriteTo(c.conn)
|
||||
return err
|
||||
}
|
||||
18
vendor/github.com/gorilla/websocket/conn_write_legacy.go
generated
vendored
Normal file
18
vendor/github.com/gorilla/websocket/conn_write_legacy.go
generated
vendored
Normal file
@@ -0,0 +1,18 @@
|
||||
// Copyright 2016 The Gorilla WebSocket Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
// +build !go1.8
|
||||
|
||||
package websocket
|
||||
|
||||
func (c *Conn) writeBufs(bufs ...[]byte) error {
|
||||
for _, buf := range bufs {
|
||||
if len(buf) > 0 {
|
||||
if _, err := c.conn.Write(buf); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
43
vendor/github.com/gorilla/websocket/doc.go
generated
vendored
43
vendor/github.com/gorilla/websocket/doc.go
generated
vendored
@@ -30,10 +30,12 @@
|
||||
// for {
|
||||
// messageType, p, err := conn.ReadMessage()
|
||||
// if err != nil {
|
||||
// log.Println(err)
|
||||
// return
|
||||
// }
|
||||
// if err := conn.WriteMessage(messageType, p); err != nil {
|
||||
// return err
|
||||
// log.Println(err)
|
||||
// return
|
||||
// }
|
||||
// }
|
||||
//
|
||||
@@ -84,20 +86,26 @@
|
||||
// and pong. Call the connection WriteControl, WriteMessage or NextWriter
|
||||
// methods to send a control message to the peer.
|
||||
//
|
||||
// Connections handle received close messages by sending a close message to the
|
||||
// peer and returning a *CloseError from the the NextReader, ReadMessage or the
|
||||
// message Read method.
|
||||
// Connections handle received close messages by calling the handler function
|
||||
// set with the SetCloseHandler method and by returning a *CloseError from the
|
||||
// NextReader, ReadMessage or the message Read method. The default close
|
||||
// handler sends a close message to the peer.
|
||||
//
|
||||
// Connections handle received ping and pong messages by invoking callback
|
||||
// functions set with SetPingHandler and SetPongHandler methods. The callback
|
||||
// functions are called from the NextReader, ReadMessage and the message Read
|
||||
// methods.
|
||||
// Connections handle received ping messages by calling the handler function
|
||||
// set with the SetPingHandler method. The default ping handler sends a pong
|
||||
// message to the peer.
|
||||
//
|
||||
// The default ping handler sends a pong to the peer. The application's reading
|
||||
// goroutine can block for a short time while the handler writes the pong data
|
||||
// to the connection.
|
||||
// Connections handle received pong messages by calling the handler function
|
||||
// set with the SetPongHandler method. The default pong handler does nothing.
|
||||
// If an application sends ping messages, then the application should set a
|
||||
// pong handler to receive the corresponding pong.
|
||||
//
|
||||
// The application must read the connection to process ping, pong and close
|
||||
// The control message handler functions are called from the NextReader,
|
||||
// ReadMessage and message reader Read methods. The default close and ping
|
||||
// handlers can block these methods for a short time when the handler writes to
|
||||
// the connection.
|
||||
//
|
||||
// The application must read the connection to process close, ping and pong
|
||||
// messages sent from the peer. If the application is not otherwise interested
|
||||
// in messages from the peer, then the application should start a goroutine to
|
||||
// read and discard messages from the peer. A simple example is:
|
||||
@@ -136,15 +144,8 @@
|
||||
// method fails the WebSocket handshake with HTTP status 403.
|
||||
//
|
||||
// If the CheckOrigin field is nil, then the Upgrader uses a safe default: fail
|
||||
// the handshake if the Origin request header is present and not equal to the
|
||||
// Host request header.
|
||||
//
|
||||
// An application can allow connections from any origin by specifying a
|
||||
// function that always returns true:
|
||||
//
|
||||
// var upgrader = websocket.Upgrader{
|
||||
// CheckOrigin: func(r *http.Request) bool { return true },
|
||||
// }
|
||||
// the handshake if the Origin request header is present and the Origin host is
|
||||
// not equal to the Host request header.
|
||||
//
|
||||
// The deprecated package-level Upgrade function does not perform origin
|
||||
// checking. The application is responsible for checking the Origin header
|
||||
|
||||
1
vendor/github.com/gorilla/websocket/mask.go
generated
vendored
1
vendor/github.com/gorilla/websocket/mask.go
generated
vendored
@@ -11,7 +11,6 @@ import "unsafe"
|
||||
const wordSize = int(unsafe.Sizeof(uintptr(0)))
|
||||
|
||||
func maskBytes(key [4]byte, pos int, b []byte) int {
|
||||
|
||||
// Mask one byte at a time for small buffers.
|
||||
if len(b) < 2*wordSize {
|
||||
for i := range b {
|
||||
|
||||
77
vendor/github.com/gorilla/websocket/proxy.go
generated
vendored
Normal file
77
vendor/github.com/gorilla/websocket/proxy.go
generated
vendored
Normal file
@@ -0,0 +1,77 @@
|
||||
// Copyright 2017 The Gorilla WebSocket Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package websocket
|
||||
|
||||
import (
|
||||
"bufio"
|
||||
"encoding/base64"
|
||||
"errors"
|
||||
"net"
|
||||
"net/http"
|
||||
"net/url"
|
||||
"strings"
|
||||
)
|
||||
|
||||
type netDialerFunc func(netowrk, addr string) (net.Conn, error)
|
||||
|
||||
func (fn netDialerFunc) Dial(network, addr string) (net.Conn, error) {
|
||||
return fn(network, addr)
|
||||
}
|
||||
|
||||
func init() {
|
||||
proxy_RegisterDialerType("http", func(proxyURL *url.URL, forwardDialer proxy_Dialer) (proxy_Dialer, error) {
|
||||
return &httpProxyDialer{proxyURL: proxyURL, fowardDial: forwardDialer.Dial}, nil
|
||||
})
|
||||
}
|
||||
|
||||
type httpProxyDialer struct {
|
||||
proxyURL *url.URL
|
||||
fowardDial func(network, addr string) (net.Conn, error)
|
||||
}
|
||||
|
||||
func (hpd *httpProxyDialer) Dial(network string, addr string) (net.Conn, error) {
|
||||
hostPort, _ := hostPortNoPort(hpd.proxyURL)
|
||||
conn, err := hpd.fowardDial(network, hostPort)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
connectHeader := make(http.Header)
|
||||
if user := hpd.proxyURL.User; user != nil {
|
||||
proxyUser := user.Username()
|
||||
if proxyPassword, passwordSet := user.Password(); passwordSet {
|
||||
credential := base64.StdEncoding.EncodeToString([]byte(proxyUser + ":" + proxyPassword))
|
||||
connectHeader.Set("Proxy-Authorization", "Basic "+credential)
|
||||
}
|
||||
}
|
||||
|
||||
connectReq := &http.Request{
|
||||
Method: "CONNECT",
|
||||
URL: &url.URL{Opaque: addr},
|
||||
Host: addr,
|
||||
Header: connectHeader,
|
||||
}
|
||||
|
||||
if err := connectReq.Write(conn); err != nil {
|
||||
conn.Close()
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// Read response. It's OK to use and discard buffered reader here becaue
|
||||
// the remote server does not speak until spoken to.
|
||||
br := bufio.NewReader(conn)
|
||||
resp, err := http.ReadResponse(br, connectReq)
|
||||
if err != nil {
|
||||
conn.Close()
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if resp.StatusCode != 200 {
|
||||
conn.Close()
|
||||
f := strings.SplitN(resp.Status, " ", 2)
|
||||
return nil, errors.New(f[1])
|
||||
}
|
||||
return conn, nil
|
||||
}
|
||||
34
vendor/github.com/gorilla/websocket/server.go
generated
vendored
34
vendor/github.com/gorilla/websocket/server.go
generated
vendored
@@ -44,8 +44,12 @@ type Upgrader struct {
|
||||
Error func(w http.ResponseWriter, r *http.Request, status int, reason error)
|
||||
|
||||
// CheckOrigin returns true if the request Origin header is acceptable. If
|
||||
// CheckOrigin is nil, the host in the Origin header must not be set or
|
||||
// must match the host of the request.
|
||||
// CheckOrigin is nil, then a safe default is used: return false if the
|
||||
// Origin request header is present and the origin host is not equal to
|
||||
// request Host header.
|
||||
//
|
||||
// A CheckOrigin function should carefully validate the request origin to
|
||||
// prevent cross-site request forgery.
|
||||
CheckOrigin func(r *http.Request) bool
|
||||
|
||||
// EnableCompression specify if the server should attempt to negotiate per
|
||||
@@ -76,7 +80,7 @@ func checkSameOrigin(r *http.Request) bool {
|
||||
if err != nil {
|
||||
return false
|
||||
}
|
||||
return u.Host == r.Host
|
||||
return equalASCIIFold(u.Host, r.Host)
|
||||
}
|
||||
|
||||
func (u *Upgrader) selectSubprotocol(r *http.Request, responseHeader http.Header) string {
|
||||
@@ -104,32 +108,34 @@ func (u *Upgrader) selectSubprotocol(r *http.Request, responseHeader http.Header
|
||||
// If the upgrade fails, then Upgrade replies to the client with an HTTP error
|
||||
// response.
|
||||
func (u *Upgrader) Upgrade(w http.ResponseWriter, r *http.Request, responseHeader http.Header) (*Conn, error) {
|
||||
if r.Method != "GET" {
|
||||
return u.returnError(w, r, http.StatusMethodNotAllowed, "websocket: not a websocket handshake: request method is not GET")
|
||||
}
|
||||
|
||||
if _, ok := responseHeader["Sec-Websocket-Extensions"]; ok {
|
||||
return u.returnError(w, r, http.StatusInternalServerError, "websocket: application specific 'Sec-Websocket-Extensions' headers are unsupported")
|
||||
}
|
||||
const badHandshake = "websocket: the client is not using the websocket protocol: "
|
||||
|
||||
if !tokenListContainsValue(r.Header, "Connection", "upgrade") {
|
||||
return u.returnError(w, r, http.StatusBadRequest, "websocket: not a websocket handshake: 'upgrade' token not found in 'Connection' header")
|
||||
return u.returnError(w, r, http.StatusBadRequest, badHandshake+"'upgrade' token not found in 'Connection' header")
|
||||
}
|
||||
|
||||
if !tokenListContainsValue(r.Header, "Upgrade", "websocket") {
|
||||
return u.returnError(w, r, http.StatusBadRequest, "websocket: not a websocket handshake: 'websocket' token not found in 'Upgrade' header")
|
||||
return u.returnError(w, r, http.StatusBadRequest, badHandshake+"'websocket' token not found in 'Upgrade' header")
|
||||
}
|
||||
|
||||
if r.Method != "GET" {
|
||||
return u.returnError(w, r, http.StatusMethodNotAllowed, badHandshake+"request method is not GET")
|
||||
}
|
||||
|
||||
if !tokenListContainsValue(r.Header, "Sec-Websocket-Version", "13") {
|
||||
return u.returnError(w, r, http.StatusBadRequest, "websocket: unsupported version: 13 not found in 'Sec-Websocket-Version' header")
|
||||
}
|
||||
|
||||
if _, ok := responseHeader["Sec-Websocket-Extensions"]; ok {
|
||||
return u.returnError(w, r, http.StatusInternalServerError, "websocket: application specific 'Sec-Websocket-Extensions' headers are unsupported")
|
||||
}
|
||||
|
||||
checkOrigin := u.CheckOrigin
|
||||
if checkOrigin == nil {
|
||||
checkOrigin = checkSameOrigin
|
||||
}
|
||||
if !checkOrigin(r) {
|
||||
return u.returnError(w, r, http.StatusForbidden, "websocket: 'Origin' header value not allowed")
|
||||
return u.returnError(w, r, http.StatusForbidden, "websocket: request origin not allowed by Upgrader.CheckOrigin")
|
||||
}
|
||||
|
||||
challengeKey := r.Header.Get("Sec-Websocket-Key")
|
||||
@@ -237,7 +243,7 @@ func (u *Upgrader) Upgrade(w http.ResponseWriter, r *http.Request, responseHeade
|
||||
// of the same origin policy check is:
|
||||
//
|
||||
// if req.Header.Get("Origin") != "http://"+req.Host {
|
||||
// http.Error(w, "Origin not allowed", 403)
|
||||
// http.Error(w, "Origin not allowed", http.StatusForbidden)
|
||||
// return
|
||||
// }
|
||||
//
|
||||
|
||||
29
vendor/github.com/gorilla/websocket/util.go
generated
vendored
29
vendor/github.com/gorilla/websocket/util.go
generated
vendored
@@ -11,6 +11,7 @@ import (
|
||||
"io"
|
||||
"net/http"
|
||||
"strings"
|
||||
"unicode/utf8"
|
||||
)
|
||||
|
||||
var keyGUID = []byte("258EAFA5-E914-47DA-95CA-C5AB0DC85B11")
|
||||
@@ -127,8 +128,31 @@ func nextTokenOrQuoted(s string) (value string, rest string) {
|
||||
return "", ""
|
||||
}
|
||||
|
||||
// equalASCIIFold returns true if s is equal to t with ASCII case folding.
|
||||
func equalASCIIFold(s, t string) bool {
|
||||
for s != "" && t != "" {
|
||||
sr, size := utf8.DecodeRuneInString(s)
|
||||
s = s[size:]
|
||||
tr, size := utf8.DecodeRuneInString(t)
|
||||
t = t[size:]
|
||||
if sr == tr {
|
||||
continue
|
||||
}
|
||||
if 'A' <= sr && sr <= 'Z' {
|
||||
sr = sr + 'a' - 'A'
|
||||
}
|
||||
if 'A' <= tr && tr <= 'Z' {
|
||||
tr = tr + 'a' - 'A'
|
||||
}
|
||||
if sr != tr {
|
||||
return false
|
||||
}
|
||||
}
|
||||
return s == t
|
||||
}
|
||||
|
||||
// tokenListContainsValue returns true if the 1#token header with the given
|
||||
// name contains token.
|
||||
// name contains a token equal to value with ASCII case folding.
|
||||
func tokenListContainsValue(header http.Header, name string, value string) bool {
|
||||
headers:
|
||||
for _, s := range header[name] {
|
||||
@@ -142,7 +166,7 @@ headers:
|
||||
if s != "" && s[0] != ',' {
|
||||
continue headers
|
||||
}
|
||||
if strings.EqualFold(t, value) {
|
||||
if equalASCIIFold(t, value) {
|
||||
return true
|
||||
}
|
||||
if s == "" {
|
||||
@@ -156,7 +180,6 @@ headers:
|
||||
|
||||
// parseExtensiosn parses WebSocket extensions from a header.
|
||||
func parseExtensions(header http.Header) []map[string]string {
|
||||
|
||||
// From RFC 6455:
|
||||
//
|
||||
// Sec-WebSocket-Extensions = extension-list
|
||||
|
||||
473
vendor/github.com/gorilla/websocket/x_net_proxy.go
generated
vendored
Normal file
473
vendor/github.com/gorilla/websocket/x_net_proxy.go
generated
vendored
Normal file
@@ -0,0 +1,473 @@
|
||||
// Code generated by golang.org/x/tools/cmd/bundle. DO NOT EDIT.
|
||||
//go:generate bundle -o x_net_proxy.go golang.org/x/net/proxy
|
||||
|
||||
// Package proxy provides support for a variety of protocols to proxy network
|
||||
// data.
|
||||
//
|
||||
|
||||
package websocket
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"io"
|
||||
"net"
|
||||
"net/url"
|
||||
"os"
|
||||
"strconv"
|
||||
"strings"
|
||||
"sync"
|
||||
)
|
||||
|
||||
type proxy_direct struct{}
|
||||
|
||||
// Direct is a direct proxy: one that makes network connections directly.
|
||||
var proxy_Direct = proxy_direct{}
|
||||
|
||||
func (proxy_direct) Dial(network, addr string) (net.Conn, error) {
|
||||
return net.Dial(network, addr)
|
||||
}
|
||||
|
||||
// A PerHost directs connections to a default Dialer unless the host name
|
||||
// requested matches one of a number of exceptions.
|
||||
type proxy_PerHost struct {
|
||||
def, bypass proxy_Dialer
|
||||
|
||||
bypassNetworks []*net.IPNet
|
||||
bypassIPs []net.IP
|
||||
bypassZones []string
|
||||
bypassHosts []string
|
||||
}
|
||||
|
||||
// NewPerHost returns a PerHost Dialer that directs connections to either
|
||||
// defaultDialer or bypass, depending on whether the connection matches one of
|
||||
// the configured rules.
|
||||
func proxy_NewPerHost(defaultDialer, bypass proxy_Dialer) *proxy_PerHost {
|
||||
return &proxy_PerHost{
|
||||
def: defaultDialer,
|
||||
bypass: bypass,
|
||||
}
|
||||
}
|
||||
|
||||
// Dial connects to the address addr on the given network through either
|
||||
// defaultDialer or bypass.
|
||||
func (p *proxy_PerHost) Dial(network, addr string) (c net.Conn, err error) {
|
||||
host, _, err := net.SplitHostPort(addr)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return p.dialerForRequest(host).Dial(network, addr)
|
||||
}
|
||||
|
||||
func (p *proxy_PerHost) dialerForRequest(host string) proxy_Dialer {
|
||||
if ip := net.ParseIP(host); ip != nil {
|
||||
for _, net := range p.bypassNetworks {
|
||||
if net.Contains(ip) {
|
||||
return p.bypass
|
||||
}
|
||||
}
|
||||
for _, bypassIP := range p.bypassIPs {
|
||||
if bypassIP.Equal(ip) {
|
||||
return p.bypass
|
||||
}
|
||||
}
|
||||
return p.def
|
||||
}
|
||||
|
||||
for _, zone := range p.bypassZones {
|
||||
if strings.HasSuffix(host, zone) {
|
||||
return p.bypass
|
||||
}
|
||||
if host == zone[1:] {
|
||||
// For a zone ".example.com", we match "example.com"
|
||||
// too.
|
||||
return p.bypass
|
||||
}
|
||||
}
|
||||
for _, bypassHost := range p.bypassHosts {
|
||||
if bypassHost == host {
|
||||
return p.bypass
|
||||
}
|
||||
}
|
||||
return p.def
|
||||
}
|
||||
|
||||
// AddFromString parses a string that contains comma-separated values
|
||||
// specifying hosts that should use the bypass proxy. Each value is either an
|
||||
// IP address, a CIDR range, a zone (*.example.com) or a host name
|
||||
// (localhost). A best effort is made to parse the string and errors are
|
||||
// ignored.
|
||||
func (p *proxy_PerHost) AddFromString(s string) {
|
||||
hosts := strings.Split(s, ",")
|
||||
for _, host := range hosts {
|
||||
host = strings.TrimSpace(host)
|
||||
if len(host) == 0 {
|
||||
continue
|
||||
}
|
||||
if strings.Contains(host, "/") {
|
||||
// We assume that it's a CIDR address like 127.0.0.0/8
|
||||
if _, net, err := net.ParseCIDR(host); err == nil {
|
||||
p.AddNetwork(net)
|
||||
}
|
||||
continue
|
||||
}
|
||||
if ip := net.ParseIP(host); ip != nil {
|
||||
p.AddIP(ip)
|
||||
continue
|
||||
}
|
||||
if strings.HasPrefix(host, "*.") {
|
||||
p.AddZone(host[1:])
|
||||
continue
|
||||
}
|
||||
p.AddHost(host)
|
||||
}
|
||||
}
|
||||
|
||||
// AddIP specifies an IP address that will use the bypass proxy. Note that
|
||||
// this will only take effect if a literal IP address is dialed. A connection
|
||||
// to a named host will never match an IP.
|
||||
func (p *proxy_PerHost) AddIP(ip net.IP) {
|
||||
p.bypassIPs = append(p.bypassIPs, ip)
|
||||
}
|
||||
|
||||
// AddNetwork specifies an IP range that will use the bypass proxy. Note that
|
||||
// this will only take effect if a literal IP address is dialed. A connection
|
||||
// to a named host will never match.
|
||||
func (p *proxy_PerHost) AddNetwork(net *net.IPNet) {
|
||||
p.bypassNetworks = append(p.bypassNetworks, net)
|
||||
}
|
||||
|
||||
// AddZone specifies a DNS suffix that will use the bypass proxy. A zone of
|
||||
// "example.com" matches "example.com" and all of its subdomains.
|
||||
func (p *proxy_PerHost) AddZone(zone string) {
|
||||
if strings.HasSuffix(zone, ".") {
|
||||
zone = zone[:len(zone)-1]
|
||||
}
|
||||
if !strings.HasPrefix(zone, ".") {
|
||||
zone = "." + zone
|
||||
}
|
||||
p.bypassZones = append(p.bypassZones, zone)
|
||||
}
|
||||
|
||||
// AddHost specifies a host name that will use the bypass proxy.
|
||||
func (p *proxy_PerHost) AddHost(host string) {
|
||||
if strings.HasSuffix(host, ".") {
|
||||
host = host[:len(host)-1]
|
||||
}
|
||||
p.bypassHosts = append(p.bypassHosts, host)
|
||||
}
|
||||
|
||||
// A Dialer is a means to establish a connection.
|
||||
type proxy_Dialer interface {
|
||||
// Dial connects to the given address via the proxy.
|
||||
Dial(network, addr string) (c net.Conn, err error)
|
||||
}
|
||||
|
||||
// Auth contains authentication parameters that specific Dialers may require.
|
||||
type proxy_Auth struct {
|
||||
User, Password string
|
||||
}
|
||||
|
||||
// FromEnvironment returns the dialer specified by the proxy related variables in
|
||||
// the environment.
|
||||
func proxy_FromEnvironment() proxy_Dialer {
|
||||
allProxy := proxy_allProxyEnv.Get()
|
||||
if len(allProxy) == 0 {
|
||||
return proxy_Direct
|
||||
}
|
||||
|
||||
proxyURL, err := url.Parse(allProxy)
|
||||
if err != nil {
|
||||
return proxy_Direct
|
||||
}
|
||||
proxy, err := proxy_FromURL(proxyURL, proxy_Direct)
|
||||
if err != nil {
|
||||
return proxy_Direct
|
||||
}
|
||||
|
||||
noProxy := proxy_noProxyEnv.Get()
|
||||
if len(noProxy) == 0 {
|
||||
return proxy
|
||||
}
|
||||
|
||||
perHost := proxy_NewPerHost(proxy, proxy_Direct)
|
||||
perHost.AddFromString(noProxy)
|
||||
return perHost
|
||||
}
|
||||
|
||||
// proxySchemes is a map from URL schemes to a function that creates a Dialer
|
||||
// from a URL with such a scheme.
|
||||
var proxy_proxySchemes map[string]func(*url.URL, proxy_Dialer) (proxy_Dialer, error)
|
||||
|
||||
// RegisterDialerType takes a URL scheme and a function to generate Dialers from
|
||||
// a URL with that scheme and a forwarding Dialer. Registered schemes are used
|
||||
// by FromURL.
|
||||
func proxy_RegisterDialerType(scheme string, f func(*url.URL, proxy_Dialer) (proxy_Dialer, error)) {
|
||||
if proxy_proxySchemes == nil {
|
||||
proxy_proxySchemes = make(map[string]func(*url.URL, proxy_Dialer) (proxy_Dialer, error))
|
||||
}
|
||||
proxy_proxySchemes[scheme] = f
|
||||
}
|
||||
|
||||
// FromURL returns a Dialer given a URL specification and an underlying
|
||||
// Dialer for it to make network requests.
|
||||
func proxy_FromURL(u *url.URL, forward proxy_Dialer) (proxy_Dialer, error) {
|
||||
var auth *proxy_Auth
|
||||
if u.User != nil {
|
||||
auth = new(proxy_Auth)
|
||||
auth.User = u.User.Username()
|
||||
if p, ok := u.User.Password(); ok {
|
||||
auth.Password = p
|
||||
}
|
||||
}
|
||||
|
||||
switch u.Scheme {
|
||||
case "socks5":
|
||||
return proxy_SOCKS5("tcp", u.Host, auth, forward)
|
||||
}
|
||||
|
||||
// If the scheme doesn't match any of the built-in schemes, see if it
|
||||
// was registered by another package.
|
||||
if proxy_proxySchemes != nil {
|
||||
if f, ok := proxy_proxySchemes[u.Scheme]; ok {
|
||||
return f(u, forward)
|
||||
}
|
||||
}
|
||||
|
||||
return nil, errors.New("proxy: unknown scheme: " + u.Scheme)
|
||||
}
|
||||
|
||||
var (
|
||||
proxy_allProxyEnv = &proxy_envOnce{
|
||||
names: []string{"ALL_PROXY", "all_proxy"},
|
||||
}
|
||||
proxy_noProxyEnv = &proxy_envOnce{
|
||||
names: []string{"NO_PROXY", "no_proxy"},
|
||||
}
|
||||
)
|
||||
|
||||
// envOnce looks up an environment variable (optionally by multiple
|
||||
// names) once. It mitigates expensive lookups on some platforms
|
||||
// (e.g. Windows).
|
||||
// (Borrowed from net/http/transport.go)
|
||||
type proxy_envOnce struct {
|
||||
names []string
|
||||
once sync.Once
|
||||
val string
|
||||
}
|
||||
|
||||
func (e *proxy_envOnce) Get() string {
|
||||
e.once.Do(e.init)
|
||||
return e.val
|
||||
}
|
||||
|
||||
func (e *proxy_envOnce) init() {
|
||||
for _, n := range e.names {
|
||||
e.val = os.Getenv(n)
|
||||
if e.val != "" {
|
||||
return
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// SOCKS5 returns a Dialer that makes SOCKSv5 connections to the given address
|
||||
// with an optional username and password. See RFC 1928 and RFC 1929.
|
||||
func proxy_SOCKS5(network, addr string, auth *proxy_Auth, forward proxy_Dialer) (proxy_Dialer, error) {
|
||||
s := &proxy_socks5{
|
||||
network: network,
|
||||
addr: addr,
|
||||
forward: forward,
|
||||
}
|
||||
if auth != nil {
|
||||
s.user = auth.User
|
||||
s.password = auth.Password
|
||||
}
|
||||
|
||||
return s, nil
|
||||
}
|
||||
|
||||
type proxy_socks5 struct {
|
||||
user, password string
|
||||
network, addr string
|
||||
forward proxy_Dialer
|
||||
}
|
||||
|
||||
const proxy_socks5Version = 5
|
||||
|
||||
const (
|
||||
proxy_socks5AuthNone = 0
|
||||
proxy_socks5AuthPassword = 2
|
||||
)
|
||||
|
||||
const proxy_socks5Connect = 1
|
||||
|
||||
const (
|
||||
proxy_socks5IP4 = 1
|
||||
proxy_socks5Domain = 3
|
||||
proxy_socks5IP6 = 4
|
||||
)
|
||||
|
||||
var proxy_socks5Errors = []string{
|
||||
"",
|
||||
"general failure",
|
||||
"connection forbidden",
|
||||
"network unreachable",
|
||||
"host unreachable",
|
||||
"connection refused",
|
||||
"TTL expired",
|
||||
"command not supported",
|
||||
"address type not supported",
|
||||
}
|
||||
|
||||
// Dial connects to the address addr on the given network via the SOCKS5 proxy.
|
||||
func (s *proxy_socks5) Dial(network, addr string) (net.Conn, error) {
|
||||
switch network {
|
||||
case "tcp", "tcp6", "tcp4":
|
||||
default:
|
||||
return nil, errors.New("proxy: no support for SOCKS5 proxy connections of type " + network)
|
||||
}
|
||||
|
||||
conn, err := s.forward.Dial(s.network, s.addr)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if err := s.connect(conn, addr); err != nil {
|
||||
conn.Close()
|
||||
return nil, err
|
||||
}
|
||||
return conn, nil
|
||||
}
|
||||
|
||||
// connect takes an existing connection to a socks5 proxy server,
|
||||
// and commands the server to extend that connection to target,
|
||||
// which must be a canonical address with a host and port.
|
||||
func (s *proxy_socks5) connect(conn net.Conn, target string) error {
|
||||
host, portStr, err := net.SplitHostPort(target)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
port, err := strconv.Atoi(portStr)
|
||||
if err != nil {
|
||||
return errors.New("proxy: failed to parse port number: " + portStr)
|
||||
}
|
||||
if port < 1 || port > 0xffff {
|
||||
return errors.New("proxy: port number out of range: " + portStr)
|
||||
}
|
||||
|
||||
// the size here is just an estimate
|
||||
buf := make([]byte, 0, 6+len(host))
|
||||
|
||||
buf = append(buf, proxy_socks5Version)
|
||||
if len(s.user) > 0 && len(s.user) < 256 && len(s.password) < 256 {
|
||||
buf = append(buf, 2 /* num auth methods */, proxy_socks5AuthNone, proxy_socks5AuthPassword)
|
||||
} else {
|
||||
buf = append(buf, 1 /* num auth methods */, proxy_socks5AuthNone)
|
||||
}
|
||||
|
||||
if _, err := conn.Write(buf); err != nil {
|
||||
return errors.New("proxy: failed to write greeting to SOCKS5 proxy at " + s.addr + ": " + err.Error())
|
||||
}
|
||||
|
||||
if _, err := io.ReadFull(conn, buf[:2]); err != nil {
|
||||
return errors.New("proxy: failed to read greeting from SOCKS5 proxy at " + s.addr + ": " + err.Error())
|
||||
}
|
||||
if buf[0] != 5 {
|
||||
return errors.New("proxy: SOCKS5 proxy at " + s.addr + " has unexpected version " + strconv.Itoa(int(buf[0])))
|
||||
}
|
||||
if buf[1] == 0xff {
|
||||
return errors.New("proxy: SOCKS5 proxy at " + s.addr + " requires authentication")
|
||||
}
|
||||
|
||||
// See RFC 1929
|
||||
if buf[1] == proxy_socks5AuthPassword {
|
||||
buf = buf[:0]
|
||||
buf = append(buf, 1 /* password protocol version */)
|
||||
buf = append(buf, uint8(len(s.user)))
|
||||
buf = append(buf, s.user...)
|
||||
buf = append(buf, uint8(len(s.password)))
|
||||
buf = append(buf, s.password...)
|
||||
|
||||
if _, err := conn.Write(buf); err != nil {
|
||||
return errors.New("proxy: failed to write authentication request to SOCKS5 proxy at " + s.addr + ": " + err.Error())
|
||||
}
|
||||
|
||||
if _, err := io.ReadFull(conn, buf[:2]); err != nil {
|
||||
return errors.New("proxy: failed to read authentication reply from SOCKS5 proxy at " + s.addr + ": " + err.Error())
|
||||
}
|
||||
|
||||
if buf[1] != 0 {
|
||||
return errors.New("proxy: SOCKS5 proxy at " + s.addr + " rejected username/password")
|
||||
}
|
||||
}
|
||||
|
||||
buf = buf[:0]
|
||||
buf = append(buf, proxy_socks5Version, proxy_socks5Connect, 0 /* reserved */)
|
||||
|
||||
if ip := net.ParseIP(host); ip != nil {
|
||||
if ip4 := ip.To4(); ip4 != nil {
|
||||
buf = append(buf, proxy_socks5IP4)
|
||||
ip = ip4
|
||||
} else {
|
||||
buf = append(buf, proxy_socks5IP6)
|
||||
}
|
||||
buf = append(buf, ip...)
|
||||
} else {
|
||||
if len(host) > 255 {
|
||||
return errors.New("proxy: destination host name too long: " + host)
|
||||
}
|
||||
buf = append(buf, proxy_socks5Domain)
|
||||
buf = append(buf, byte(len(host)))
|
||||
buf = append(buf, host...)
|
||||
}
|
||||
buf = append(buf, byte(port>>8), byte(port))
|
||||
|
||||
if _, err := conn.Write(buf); err != nil {
|
||||
return errors.New("proxy: failed to write connect request to SOCKS5 proxy at " + s.addr + ": " + err.Error())
|
||||
}
|
||||
|
||||
if _, err := io.ReadFull(conn, buf[:4]); err != nil {
|
||||
return errors.New("proxy: failed to read connect reply from SOCKS5 proxy at " + s.addr + ": " + err.Error())
|
||||
}
|
||||
|
||||
failure := "unknown error"
|
||||
if int(buf[1]) < len(proxy_socks5Errors) {
|
||||
failure = proxy_socks5Errors[buf[1]]
|
||||
}
|
||||
|
||||
if len(failure) > 0 {
|
||||
return errors.New("proxy: SOCKS5 proxy at " + s.addr + " failed to connect: " + failure)
|
||||
}
|
||||
|
||||
bytesToDiscard := 0
|
||||
switch buf[3] {
|
||||
case proxy_socks5IP4:
|
||||
bytesToDiscard = net.IPv4len
|
||||
case proxy_socks5IP6:
|
||||
bytesToDiscard = net.IPv6len
|
||||
case proxy_socks5Domain:
|
||||
_, err := io.ReadFull(conn, buf[:1])
|
||||
if err != nil {
|
||||
return errors.New("proxy: failed to read domain length from SOCKS5 proxy at " + s.addr + ": " + err.Error())
|
||||
}
|
||||
bytesToDiscard = int(buf[0])
|
||||
default:
|
||||
return errors.New("proxy: got unknown address type " + strconv.Itoa(int(buf[3])) + " from SOCKS5 proxy at " + s.addr)
|
||||
}
|
||||
|
||||
if cap(buf) < bytesToDiscard {
|
||||
buf = make([]byte, bytesToDiscard)
|
||||
} else {
|
||||
buf = buf[:bytesToDiscard]
|
||||
}
|
||||
if _, err := io.ReadFull(conn, buf); err != nil {
|
||||
return errors.New("proxy: failed to read address from SOCKS5 proxy at " + s.addr + ": " + err.Error())
|
||||
}
|
||||
|
||||
// Also need to discard the port number
|
||||
if _, err := io.ReadFull(conn, buf[:2]); err != nil {
|
||||
return errors.New("proxy: failed to read port from SOCKS5 proxy at " + s.addr + ": " + err.Error())
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
307
vendor/github.com/miekg/dns/client.go
generated
vendored
307
vendor/github.com/miekg/dns/client.go
generated
vendored
@@ -4,10 +4,12 @@ package dns
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"context"
|
||||
"crypto/tls"
|
||||
"encoding/binary"
|
||||
"io"
|
||||
"net"
|
||||
"strings"
|
||||
"time"
|
||||
)
|
||||
|
||||
@@ -18,7 +20,7 @@ const tcpIdleTimeout time.Duration = 8 * time.Second
|
||||
type Conn struct {
|
||||
net.Conn // a net.Conn holding the connection
|
||||
UDPSize uint16 // minimum receive buffer for UDP messages
|
||||
TsigSecret map[string]string // secret(s) for Tsig map[<zonename>]<base64 secret>, zonename must be fully qualified
|
||||
TsigSecret map[string]string // secret(s) for Tsig map[<zonename>]<base64 secret>, zonename must be in canonical form (lowercase, fqdn, see RFC 4034 Section 6.2)
|
||||
rtt time.Duration
|
||||
t time.Time
|
||||
tsigRequestMAC string
|
||||
@@ -26,14 +28,18 @@ type Conn struct {
|
||||
|
||||
// A Client defines parameters for a DNS client.
|
||||
type Client struct {
|
||||
Net string // if "tcp" or "tcp-tls" (DNS over TLS) a TCP query will be initiated, otherwise an UDP one (default is "" for UDP)
|
||||
UDPSize uint16 // minimum receive buffer for UDP messages
|
||||
TLSConfig *tls.Config // TLS connection configuration
|
||||
Timeout time.Duration // a cumulative timeout for dial, write and read, defaults to 0 (disabled) - overrides DialTimeout, ReadTimeout and WriteTimeout when non-zero
|
||||
DialTimeout time.Duration // net.DialTimeout, defaults to 2 seconds - overridden by Timeout when that value is non-zero
|
||||
Net string // if "tcp" or "tcp-tls" (DNS over TLS) a TCP query will be initiated, otherwise an UDP one (default is "" for UDP)
|
||||
UDPSize uint16 // minimum receive buffer for UDP messages
|
||||
TLSConfig *tls.Config // TLS connection configuration
|
||||
Dialer *net.Dialer // a net.Dialer used to set local address, timeouts and more
|
||||
// Timeout is a cumulative timeout for dial, write and read, defaults to 0 (disabled) - overrides DialTimeout, ReadTimeout,
|
||||
// WriteTimeout when non-zero. Can be overridden with net.Dialer.Timeout (see Client.ExchangeWithDialer and
|
||||
// Client.Dialer) or context.Context.Deadline (see the deprecated ExchangeContext)
|
||||
Timeout time.Duration
|
||||
DialTimeout time.Duration // net.DialTimeout, defaults to 2 seconds, or net.Dialer.Timeout if expiring earlier - overridden by Timeout when that value is non-zero
|
||||
ReadTimeout time.Duration // net.Conn.SetReadTimeout value for connections, defaults to 2 seconds - overridden by Timeout when that value is non-zero
|
||||
WriteTimeout time.Duration // net.Conn.SetWriteTimeout value for connections, defaults to 2 seconds - overridden by Timeout when that value is non-zero
|
||||
TsigSecret map[string]string // secret(s) for Tsig map[<zonename>]<base64 secret>, zonename must be fully qualified
|
||||
TsigSecret map[string]string // secret(s) for Tsig map[<zonename>]<base64 secret>, zonename must be in canonical form (lowercase, fqdn, see RFC 4034 Section 6.2)
|
||||
SingleInflight bool // if true suppress multiple outstanding queries for the same Qname, Qtype and Qclass
|
||||
group singleflight
|
||||
}
|
||||
@@ -43,93 +49,11 @@ type Client struct {
|
||||
// will it fall back to TCP in case of truncation.
|
||||
// See client.Exchange for more information on setting larger buffer sizes.
|
||||
func Exchange(m *Msg, a string) (r *Msg, err error) {
|
||||
var co *Conn
|
||||
co, err = DialTimeout("udp", a, dnsTimeout)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
defer co.Close()
|
||||
|
||||
opt := m.IsEdns0()
|
||||
// If EDNS0 is used use that for size.
|
||||
if opt != nil && opt.UDPSize() >= MinMsgSize {
|
||||
co.UDPSize = opt.UDPSize()
|
||||
}
|
||||
|
||||
co.SetWriteDeadline(time.Now().Add(dnsTimeout))
|
||||
if err = co.WriteMsg(m); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
co.SetReadDeadline(time.Now().Add(dnsTimeout))
|
||||
r, err = co.ReadMsg()
|
||||
if err == nil && r.Id != m.Id {
|
||||
err = ErrId
|
||||
}
|
||||
client := Client{Net: "udp"}
|
||||
r, _, err = client.Exchange(m, a)
|
||||
return r, err
|
||||
}
|
||||
|
||||
// ExchangeConn performs a synchronous query. It sends the message m via the connection
|
||||
// c and waits for a reply. The connection c is not closed by ExchangeConn.
|
||||
// This function is going away, but can easily be mimicked:
|
||||
//
|
||||
// co := &dns.Conn{Conn: c} // c is your net.Conn
|
||||
// co.WriteMsg(m)
|
||||
// in, _ := co.ReadMsg()
|
||||
// co.Close()
|
||||
//
|
||||
func ExchangeConn(c net.Conn, m *Msg) (r *Msg, err error) {
|
||||
println("dns: this function is deprecated")
|
||||
co := new(Conn)
|
||||
co.Conn = c
|
||||
if err = co.WriteMsg(m); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
r, err = co.ReadMsg()
|
||||
if err == nil && r.Id != m.Id {
|
||||
err = ErrId
|
||||
}
|
||||
return r, err
|
||||
}
|
||||
|
||||
// Exchange performs a synchronous query. It sends the message m to the address
|
||||
// contained in a and waits for a reply. Basic use pattern with a *dns.Client:
|
||||
//
|
||||
// c := new(dns.Client)
|
||||
// in, rtt, err := c.Exchange(message, "127.0.0.1:53")
|
||||
//
|
||||
// Exchange does not retry a failed query, nor will it fall back to TCP in
|
||||
// case of truncation.
|
||||
// It is up to the caller to create a message that allows for larger responses to be
|
||||
// returned. Specifically this means adding an EDNS0 OPT RR that will advertise a larger
|
||||
// buffer, see SetEdns0. Messsages without an OPT RR will fallback to the historic limit
|
||||
// of 512 bytes.
|
||||
func (c *Client) Exchange(m *Msg, a string) (r *Msg, rtt time.Duration, err error) {
|
||||
if !c.SingleInflight {
|
||||
return c.exchange(m, a)
|
||||
}
|
||||
// This adds a bunch of garbage, TODO(miek).
|
||||
t := "nop"
|
||||
if t1, ok := TypeToString[m.Question[0].Qtype]; ok {
|
||||
t = t1
|
||||
}
|
||||
cl := "nop"
|
||||
if cl1, ok := ClassToString[m.Question[0].Qclass]; ok {
|
||||
cl = cl1
|
||||
}
|
||||
r, rtt, err, shared := c.group.Do(m.Question[0].Name+t+cl, func() (*Msg, time.Duration, error) {
|
||||
return c.exchange(m, a)
|
||||
})
|
||||
if err != nil {
|
||||
return r, rtt, err
|
||||
}
|
||||
if shared {
|
||||
return r.Copy(), rtt, nil
|
||||
}
|
||||
return r, rtt, nil
|
||||
}
|
||||
|
||||
func (c *Client) dialTimeout() time.Duration {
|
||||
if c.Timeout != 0 {
|
||||
return c.Timeout
|
||||
@@ -154,37 +78,88 @@ func (c *Client) writeTimeout() time.Duration {
|
||||
return dnsTimeout
|
||||
}
|
||||
|
||||
func (c *Client) exchange(m *Msg, a string) (r *Msg, rtt time.Duration, err error) {
|
||||
var co *Conn
|
||||
// Dial connects to the address on the named network.
|
||||
func (c *Client) Dial(address string) (conn *Conn, err error) {
|
||||
// create a new dialer with the appropriate timeout
|
||||
var d net.Dialer
|
||||
if c.Dialer == nil {
|
||||
d = net.Dialer{}
|
||||
} else {
|
||||
d = net.Dialer(*c.Dialer)
|
||||
}
|
||||
d.Timeout = c.getTimeoutForRequest(c.writeTimeout())
|
||||
|
||||
network := "udp"
|
||||
tls := false
|
||||
useTLS := false
|
||||
|
||||
switch c.Net {
|
||||
case "tcp-tls":
|
||||
network = "tcp"
|
||||
tls = true
|
||||
useTLS = true
|
||||
case "tcp4-tls":
|
||||
network = "tcp4"
|
||||
tls = true
|
||||
useTLS = true
|
||||
case "tcp6-tls":
|
||||
network = "tcp6"
|
||||
tls = true
|
||||
useTLS = true
|
||||
default:
|
||||
if c.Net != "" {
|
||||
network = c.Net
|
||||
}
|
||||
}
|
||||
|
||||
var deadline time.Time
|
||||
if c.Timeout != 0 {
|
||||
deadline = time.Now().Add(c.Timeout)
|
||||
conn = new(Conn)
|
||||
if useTLS {
|
||||
conn.Conn, err = tls.DialWithDialer(&d, network, address, c.TLSConfig)
|
||||
} else {
|
||||
conn.Conn, err = d.Dial(network, address)
|
||||
}
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return conn, nil
|
||||
}
|
||||
|
||||
// Exchange performs a synchronous query. It sends the message m to the address
|
||||
// contained in a and waits for a reply. Basic use pattern with a *dns.Client:
|
||||
//
|
||||
// c := new(dns.Client)
|
||||
// in, rtt, err := c.Exchange(message, "127.0.0.1:53")
|
||||
//
|
||||
// Exchange does not retry a failed query, nor will it fall back to TCP in
|
||||
// case of truncation.
|
||||
// It is up to the caller to create a message that allows for larger responses to be
|
||||
// returned. Specifically this means adding an EDNS0 OPT RR that will advertise a larger
|
||||
// buffer, see SetEdns0. Messages without an OPT RR will fallback to the historic limit
|
||||
// of 512 bytes
|
||||
// To specify a local address or a timeout, the caller has to set the `Client.Dialer`
|
||||
// attribute appropriately
|
||||
func (c *Client) Exchange(m *Msg, address string) (r *Msg, rtt time.Duration, err error) {
|
||||
if !c.SingleInflight {
|
||||
return c.exchange(m, address)
|
||||
}
|
||||
|
||||
if tls {
|
||||
co, err = DialTimeoutWithTLS(network, a, c.TLSConfig, c.dialTimeout())
|
||||
} else {
|
||||
co, err = DialTimeout(network, a, c.dialTimeout())
|
||||
t := "nop"
|
||||
if t1, ok := TypeToString[m.Question[0].Qtype]; ok {
|
||||
t = t1
|
||||
}
|
||||
cl := "nop"
|
||||
if cl1, ok := ClassToString[m.Question[0].Qclass]; ok {
|
||||
cl = cl1
|
||||
}
|
||||
r, rtt, err, shared := c.group.Do(m.Question[0].Name+t+cl, func() (*Msg, time.Duration, error) {
|
||||
return c.exchange(m, address)
|
||||
})
|
||||
if r != nil && shared {
|
||||
r = r.Copy()
|
||||
}
|
||||
return r, rtt, err
|
||||
}
|
||||
|
||||
func (c *Client) exchange(m *Msg, a string) (r *Msg, rtt time.Duration, err error) {
|
||||
var co *Conn
|
||||
|
||||
co, err = c.Dial(a)
|
||||
|
||||
if err != nil {
|
||||
return nil, 0, err
|
||||
@@ -202,12 +177,13 @@ func (c *Client) exchange(m *Msg, a string) (r *Msg, rtt time.Duration, err erro
|
||||
}
|
||||
|
||||
co.TsigSecret = c.TsigSecret
|
||||
co.SetWriteDeadline(deadlineOrTimeout(deadline, c.writeTimeout()))
|
||||
// write with the appropriate write timeout
|
||||
co.SetWriteDeadline(time.Now().Add(c.getTimeoutForRequest(c.writeTimeout())))
|
||||
if err = co.WriteMsg(m); err != nil {
|
||||
return nil, 0, err
|
||||
}
|
||||
|
||||
co.SetReadDeadline(deadlineOrTimeout(deadline, c.readTimeout()))
|
||||
co.SetReadDeadline(time.Now().Add(c.getTimeoutForRequest(c.readTimeout())))
|
||||
r, err = co.ReadMsg()
|
||||
if err == nil && r.Id != m.Id {
|
||||
err = ErrId
|
||||
@@ -216,8 +192,10 @@ func (c *Client) exchange(m *Msg, a string) (r *Msg, rtt time.Duration, err erro
|
||||
}
|
||||
|
||||
// ReadMsg reads a message from the connection co.
|
||||
// If the received message contains a TSIG record the transaction
|
||||
// signature is verified.
|
||||
// If the received message contains a TSIG record the transaction signature
|
||||
// is verified. This method always tries to return the message, however if an
|
||||
// error is returned there are no guarantees that the returned message is a
|
||||
// valid representation of the packet read.
|
||||
func (co *Conn) ReadMsg() (*Msg, error) {
|
||||
p, err := co.ReadMsgHeader(nil)
|
||||
if err != nil {
|
||||
@@ -226,13 +204,10 @@ func (co *Conn) ReadMsg() (*Msg, error) {
|
||||
|
||||
m := new(Msg)
|
||||
if err := m.Unpack(p); err != nil {
|
||||
// If ErrTruncated was returned, we still want to allow the user to use
|
||||
// If an error was returned, we still want to allow the user to use
|
||||
// the message, but naively they can just check err if they don't want
|
||||
// to use a truncated message
|
||||
if err == ErrTruncated {
|
||||
return m, err
|
||||
}
|
||||
return nil, err
|
||||
// to use an erroneous message
|
||||
return m, err
|
||||
}
|
||||
if t := m.IsTsig(); t != nil {
|
||||
if _, ok := co.TsigSecret[t.Hdr.Name]; !ok {
|
||||
@@ -300,6 +275,18 @@ func tcpMsgLen(t io.Reader) (int, error) {
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
|
||||
// As seen with my local router/switch, returns 1 byte on the above read,
|
||||
// resulting a a ShortRead. Just write it out (instead of loop) and read the
|
||||
// other byte.
|
||||
if n == 1 {
|
||||
n1, err := t.Read(p[1:])
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
n += n1
|
||||
}
|
||||
|
||||
if n != 2 {
|
||||
return 0, ErrShortRead
|
||||
}
|
||||
@@ -400,10 +387,28 @@ func (co *Conn) Write(p []byte) (n int, err error) {
|
||||
n, err := io.Copy(w, bytes.NewReader(p))
|
||||
return int(n), err
|
||||
}
|
||||
n, err = co.Conn.(*net.UDPConn).Write(p)
|
||||
n, err = co.Conn.Write(p)
|
||||
return n, err
|
||||
}
|
||||
|
||||
// Return the appropriate timeout for a specific request
|
||||
func (c *Client) getTimeoutForRequest(timeout time.Duration) time.Duration {
|
||||
var requestTimeout time.Duration
|
||||
if c.Timeout != 0 {
|
||||
requestTimeout = c.Timeout
|
||||
} else {
|
||||
requestTimeout = timeout
|
||||
}
|
||||
// net.Dialer.Timeout has priority if smaller than the timeouts computed so
|
||||
// far
|
||||
if c.Dialer != nil && c.Dialer.Timeout != 0 {
|
||||
if c.Dialer.Timeout < requestTimeout {
|
||||
requestTimeout = c.Dialer.Timeout
|
||||
}
|
||||
}
|
||||
return requestTimeout
|
||||
}
|
||||
|
||||
// Dial connects to the address on the named network.
|
||||
func Dial(network, address string) (conn *Conn, err error) {
|
||||
conn = new(Conn)
|
||||
@@ -414,10 +419,43 @@ func Dial(network, address string) (conn *Conn, err error) {
|
||||
return conn, nil
|
||||
}
|
||||
|
||||
// ExchangeContext performs a synchronous UDP query, like Exchange. It
|
||||
// additionally obeys deadlines from the passed Context.
|
||||
func ExchangeContext(ctx context.Context, m *Msg, a string) (r *Msg, err error) {
|
||||
client := Client{Net: "udp"}
|
||||
r, _, err = client.ExchangeContext(ctx, m, a)
|
||||
// ignorint rtt to leave the original ExchangeContext API unchanged, but
|
||||
// this function will go away
|
||||
return r, err
|
||||
}
|
||||
|
||||
// ExchangeConn performs a synchronous query. It sends the message m via the connection
|
||||
// c and waits for a reply. The connection c is not closed by ExchangeConn.
|
||||
// This function is going away, but can easily be mimicked:
|
||||
//
|
||||
// co := &dns.Conn{Conn: c} // c is your net.Conn
|
||||
// co.WriteMsg(m)
|
||||
// in, _ := co.ReadMsg()
|
||||
// co.Close()
|
||||
//
|
||||
func ExchangeConn(c net.Conn, m *Msg) (r *Msg, err error) {
|
||||
println("dns: ExchangeConn: this function is deprecated")
|
||||
co := new(Conn)
|
||||
co.Conn = c
|
||||
if err = co.WriteMsg(m); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
r, err = co.ReadMsg()
|
||||
if err == nil && r.Id != m.Id {
|
||||
err = ErrId
|
||||
}
|
||||
return r, err
|
||||
}
|
||||
|
||||
// DialTimeout acts like Dial but takes a timeout.
|
||||
func DialTimeout(network, address string, timeout time.Duration) (conn *Conn, err error) {
|
||||
conn = new(Conn)
|
||||
conn.Conn, err = net.DialTimeout(network, address, timeout)
|
||||
client := Client{Net: network, Dialer: &net.Dialer{Timeout: timeout}}
|
||||
conn, err = client.Dial(address)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@@ -426,8 +464,12 @@ func DialTimeout(network, address string, timeout time.Duration) (conn *Conn, er
|
||||
|
||||
// DialWithTLS connects to the address on the named network with TLS.
|
||||
func DialWithTLS(network, address string, tlsConfig *tls.Config) (conn *Conn, err error) {
|
||||
conn = new(Conn)
|
||||
conn.Conn, err = tls.Dial(network, address, tlsConfig)
|
||||
if !strings.HasSuffix(network, "-tls") {
|
||||
network += "-tls"
|
||||
}
|
||||
client := Client{Net: network, TLSConfig: tlsConfig}
|
||||
conn, err = client.Dial(address)
|
||||
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@@ -436,20 +478,29 @@ func DialWithTLS(network, address string, tlsConfig *tls.Config) (conn *Conn, er
|
||||
|
||||
// DialTimeoutWithTLS acts like DialWithTLS but takes a timeout.
|
||||
func DialTimeoutWithTLS(network, address string, tlsConfig *tls.Config, timeout time.Duration) (conn *Conn, err error) {
|
||||
var dialer net.Dialer
|
||||
dialer.Timeout = timeout
|
||||
|
||||
conn = new(Conn)
|
||||
conn.Conn, err = tls.DialWithDialer(&dialer, network, address, tlsConfig)
|
||||
if !strings.HasSuffix(network, "-tls") {
|
||||
network += "-tls"
|
||||
}
|
||||
client := Client{Net: network, Dialer: &net.Dialer{Timeout: timeout}, TLSConfig: tlsConfig}
|
||||
conn, err = client.Dial(address)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return conn, nil
|
||||
}
|
||||
|
||||
func deadlineOrTimeout(deadline time.Time, timeout time.Duration) time.Time {
|
||||
if deadline.IsZero() {
|
||||
return time.Now().Add(timeout)
|
||||
// ExchangeContext acts like Exchange, but honors the deadline on the provided
|
||||
// context, if present. If there is both a context deadline and a configured
|
||||
// timeout on the client, the earliest of the two takes effect.
|
||||
func (c *Client) ExchangeContext(ctx context.Context, m *Msg, a string) (r *Msg, rtt time.Duration, err error) {
|
||||
var timeout time.Duration
|
||||
if deadline, ok := ctx.Deadline(); !ok {
|
||||
timeout = 0
|
||||
} else {
|
||||
timeout = deadline.Sub(time.Now())
|
||||
}
|
||||
return deadline
|
||||
// not passing the context to the underlying calls, as the API does not support
|
||||
// context. For timeouts you should set up Client.Dialer and call Client.Exchange.
|
||||
c.Dialer = &net.Dialer{Timeout: timeout}
|
||||
return c.Exchange(m, a)
|
||||
}
|
||||
|
||||
48
vendor/github.com/miekg/dns/clientconfig.go
generated
vendored
48
vendor/github.com/miekg/dns/clientconfig.go
generated
vendored
@@ -2,6 +2,7 @@ package dns
|
||||
|
||||
import (
|
||||
"bufio"
|
||||
"io"
|
||||
"os"
|
||||
"strconv"
|
||||
"strings"
|
||||
@@ -25,8 +26,13 @@ func ClientConfigFromFile(resolvconf string) (*ClientConfig, error) {
|
||||
return nil, err
|
||||
}
|
||||
defer file.Close()
|
||||
return ClientConfigFromReader(file)
|
||||
}
|
||||
|
||||
// ClientConfigFromReader works like ClientConfigFromFile but takes an io.Reader as argument
|
||||
func ClientConfigFromReader(resolvconf io.Reader) (*ClientConfig, error) {
|
||||
c := new(ClientConfig)
|
||||
scanner := bufio.NewScanner(file)
|
||||
scanner := bufio.NewScanner(resolvconf)
|
||||
c.Servers = make([]string, 0)
|
||||
c.Search = make([]string, 0)
|
||||
c.Port = "53"
|
||||
@@ -73,8 +79,10 @@ func ClientConfigFromFile(resolvconf string) (*ClientConfig, error) {
|
||||
switch {
|
||||
case len(s) >= 6 && s[:6] == "ndots:":
|
||||
n, _ := strconv.Atoi(s[6:])
|
||||
if n < 1 {
|
||||
n = 1
|
||||
if n < 0 {
|
||||
n = 0
|
||||
} else if n > 15 {
|
||||
n = 15
|
||||
}
|
||||
c.Ndots = n
|
||||
case len(s) >= 8 && s[:8] == "timeout:":
|
||||
@@ -83,7 +91,7 @@ func ClientConfigFromFile(resolvconf string) (*ClientConfig, error) {
|
||||
n = 1
|
||||
}
|
||||
c.Timeout = n
|
||||
case len(s) >= 8 && s[:9] == "attempts:":
|
||||
case len(s) >= 9 && s[:9] == "attempts:":
|
||||
n, _ := strconv.Atoi(s[9:])
|
||||
if n < 1 {
|
||||
n = 1
|
||||
@@ -97,3 +105,35 @@ func ClientConfigFromFile(resolvconf string) (*ClientConfig, error) {
|
||||
}
|
||||
return c, nil
|
||||
}
|
||||
|
||||
// NameList returns all of the names that should be queried based on the
|
||||
// config. It is based off of go's net/dns name building, but it does not
|
||||
// check the length of the resulting names.
|
||||
func (c *ClientConfig) NameList(name string) []string {
|
||||
// if this domain is already fully qualified, no append needed.
|
||||
if IsFqdn(name) {
|
||||
return []string{name}
|
||||
}
|
||||
|
||||
// Check to see if the name has more labels than Ndots. Do this before making
|
||||
// the domain fully qualified.
|
||||
hasNdots := CountLabel(name) > c.Ndots
|
||||
// Make the domain fully qualified.
|
||||
name = Fqdn(name)
|
||||
|
||||
// Make a list of names based off search.
|
||||
names := []string{}
|
||||
|
||||
// If name has enough dots, try that first.
|
||||
if hasNdots {
|
||||
names = append(names, name)
|
||||
}
|
||||
for _, s := range c.Search {
|
||||
names = append(names, Fqdn(name+s))
|
||||
}
|
||||
// If we didn't have enough dots, try after suffixes.
|
||||
if !hasNdots {
|
||||
names = append(names, name)
|
||||
}
|
||||
return names
|
||||
}
|
||||
|
||||
188
vendor/github.com/miekg/dns/compress_generate.go
generated
vendored
Normal file
188
vendor/github.com/miekg/dns/compress_generate.go
generated
vendored
Normal file
@@ -0,0 +1,188 @@
|
||||
//+build ignore
|
||||
|
||||
// compression_generate.go is meant to run with go generate. It will use
|
||||
// go/{importer,types} to track down all the RR struct types. Then for each type
|
||||
// it will look to see if there are (compressible) names, if so it will add that
|
||||
// type to compressionLenHelperType and comressionLenSearchType which "fake" the
|
||||
// compression so that Len() is fast.
|
||||
package main
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"fmt"
|
||||
"go/format"
|
||||
"go/importer"
|
||||
"go/types"
|
||||
"log"
|
||||
"os"
|
||||
)
|
||||
|
||||
var packageHdr = `
|
||||
// Code generated by "go run compress_generate.go"; DO NOT EDIT.
|
||||
|
||||
package dns
|
||||
|
||||
`
|
||||
|
||||
// getTypeStruct will take a type and the package scope, and return the
|
||||
// (innermost) struct if the type is considered a RR type (currently defined as
|
||||
// those structs beginning with a RR_Header, could be redefined as implementing
|
||||
// the RR interface). The bool return value indicates if embedded structs were
|
||||
// resolved.
|
||||
func getTypeStruct(t types.Type, scope *types.Scope) (*types.Struct, bool) {
|
||||
st, ok := t.Underlying().(*types.Struct)
|
||||
if !ok {
|
||||
return nil, false
|
||||
}
|
||||
if st.Field(0).Type() == scope.Lookup("RR_Header").Type() {
|
||||
return st, false
|
||||
}
|
||||
if st.Field(0).Anonymous() {
|
||||
st, _ := getTypeStruct(st.Field(0).Type(), scope)
|
||||
return st, true
|
||||
}
|
||||
return nil, false
|
||||
}
|
||||
|
||||
func main() {
|
||||
// Import and type-check the package
|
||||
pkg, err := importer.Default().Import("github.com/miekg/dns")
|
||||
fatalIfErr(err)
|
||||
scope := pkg.Scope()
|
||||
|
||||
var domainTypes []string // Types that have a domain name in them (either compressible or not).
|
||||
var cdomainTypes []string // Types that have a compressible domain name in them (subset of domainType)
|
||||
Names:
|
||||
for _, name := range scope.Names() {
|
||||
o := scope.Lookup(name)
|
||||
if o == nil || !o.Exported() {
|
||||
continue
|
||||
}
|
||||
st, _ := getTypeStruct(o.Type(), scope)
|
||||
if st == nil {
|
||||
continue
|
||||
}
|
||||
if name == "PrivateRR" {
|
||||
continue
|
||||
}
|
||||
|
||||
if scope.Lookup("Type"+o.Name()) == nil && o.Name() != "RFC3597" {
|
||||
log.Fatalf("Constant Type%s does not exist.", o.Name())
|
||||
}
|
||||
|
||||
for i := 1; i < st.NumFields(); i++ {
|
||||
if _, ok := st.Field(i).Type().(*types.Slice); ok {
|
||||
if st.Tag(i) == `dns:"domain-name"` {
|
||||
domainTypes = append(domainTypes, o.Name())
|
||||
continue Names
|
||||
}
|
||||
if st.Tag(i) == `dns:"cdomain-name"` {
|
||||
cdomainTypes = append(cdomainTypes, o.Name())
|
||||
domainTypes = append(domainTypes, o.Name())
|
||||
continue Names
|
||||
}
|
||||
continue
|
||||
}
|
||||
|
||||
switch {
|
||||
case st.Tag(i) == `dns:"domain-name"`:
|
||||
domainTypes = append(domainTypes, o.Name())
|
||||
continue Names
|
||||
case st.Tag(i) == `dns:"cdomain-name"`:
|
||||
cdomainTypes = append(cdomainTypes, o.Name())
|
||||
domainTypes = append(domainTypes, o.Name())
|
||||
continue Names
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
b := &bytes.Buffer{}
|
||||
b.WriteString(packageHdr)
|
||||
|
||||
// compressionLenHelperType - all types that have domain-name/cdomain-name can be used for compressing names
|
||||
|
||||
fmt.Fprint(b, "func compressionLenHelperType(c map[string]int, r RR) {\n")
|
||||
fmt.Fprint(b, "switch x := r.(type) {\n")
|
||||
for _, name := range domainTypes {
|
||||
o := scope.Lookup(name)
|
||||
st, _ := getTypeStruct(o.Type(), scope)
|
||||
|
||||
fmt.Fprintf(b, "case *%s:\n", name)
|
||||
for i := 1; i < st.NumFields(); i++ {
|
||||
out := func(s string) { fmt.Fprintf(b, "compressionLenHelper(c, x.%s)\n", st.Field(i).Name()) }
|
||||
|
||||
if _, ok := st.Field(i).Type().(*types.Slice); ok {
|
||||
switch st.Tag(i) {
|
||||
case `dns:"domain-name"`:
|
||||
fallthrough
|
||||
case `dns:"cdomain-name"`:
|
||||
// For HIP we need to slice over the elements in this slice.
|
||||
fmt.Fprintf(b, `for i := range x.%s {
|
||||
compressionLenHelper(c, x.%s[i])
|
||||
}
|
||||
`, st.Field(i).Name(), st.Field(i).Name())
|
||||
}
|
||||
continue
|
||||
}
|
||||
|
||||
switch {
|
||||
case st.Tag(i) == `dns:"cdomain-name"`:
|
||||
fallthrough
|
||||
case st.Tag(i) == `dns:"domain-name"`:
|
||||
out(st.Field(i).Name())
|
||||
}
|
||||
}
|
||||
}
|
||||
fmt.Fprintln(b, "}\n}\n\n")
|
||||
|
||||
// compressionLenSearchType - search cdomain-tags types for compressible names.
|
||||
|
||||
fmt.Fprint(b, "func compressionLenSearchType(c map[string]int, r RR) (int, bool) {\n")
|
||||
fmt.Fprint(b, "switch x := r.(type) {\n")
|
||||
for _, name := range cdomainTypes {
|
||||
o := scope.Lookup(name)
|
||||
st, _ := getTypeStruct(o.Type(), scope)
|
||||
|
||||
fmt.Fprintf(b, "case *%s:\n", name)
|
||||
j := 1
|
||||
for i := 1; i < st.NumFields(); i++ {
|
||||
out := func(s string, j int) {
|
||||
fmt.Fprintf(b, "k%d, ok%d := compressionLenSearch(c, x.%s)\n", j, j, st.Field(i).Name())
|
||||
}
|
||||
|
||||
// There are no slice types with names that can be compressed.
|
||||
|
||||
switch {
|
||||
case st.Tag(i) == `dns:"cdomain-name"`:
|
||||
out(st.Field(i).Name(), j)
|
||||
j++
|
||||
}
|
||||
}
|
||||
k := "k1"
|
||||
ok := "ok1"
|
||||
for i := 2; i < j; i++ {
|
||||
k += fmt.Sprintf(" + k%d", i)
|
||||
ok += fmt.Sprintf(" && ok%d", i)
|
||||
}
|
||||
fmt.Fprintf(b, "return %s, %s\n", k, ok)
|
||||
}
|
||||
fmt.Fprintln(b, "}\nreturn 0, false\n}\n\n")
|
||||
|
||||
// gofmt
|
||||
res, err := format.Source(b.Bytes())
|
||||
if err != nil {
|
||||
b.WriteTo(os.Stderr)
|
||||
log.Fatal(err)
|
||||
}
|
||||
|
||||
f, err := os.Create("zcompress.go")
|
||||
fatalIfErr(err)
|
||||
defer f.Close()
|
||||
f.Write(res)
|
||||
}
|
||||
|
||||
func fatalIfErr(err error) {
|
||||
if err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
}
|
||||
14
vendor/github.com/miekg/dns/defaults.go
generated
vendored
14
vendor/github.com/miekg/dns/defaults.go
generated
vendored
@@ -13,9 +13,12 @@ const hexDigit = "0123456789abcdef"
|
||||
// SetReply creates a reply message from a request message.
|
||||
func (dns *Msg) SetReply(request *Msg) *Msg {
|
||||
dns.Id = request.Id
|
||||
dns.RecursionDesired = request.RecursionDesired // Copy rd bit
|
||||
dns.Response = true
|
||||
dns.Opcode = OpcodeQuery
|
||||
dns.Opcode = request.Opcode
|
||||
if dns.Opcode == OpcodeQuery {
|
||||
dns.RecursionDesired = request.RecursionDesired // Copy rd bit
|
||||
dns.CheckingDisabled = request.CheckingDisabled // Copy cd bit
|
||||
}
|
||||
dns.Rcode = RcodeSuccess
|
||||
if len(request.Question) > 0 {
|
||||
dns.Question = make([]Question, 1)
|
||||
@@ -270,8 +273,11 @@ func (t Type) String() string {
|
||||
|
||||
// String returns the string representation for the class c.
|
||||
func (c Class) String() string {
|
||||
if c1, ok := ClassToString[uint16(c)]; ok {
|
||||
return c1
|
||||
if s, ok := ClassToString[uint16(c)]; ok {
|
||||
// Only emit mnemonics when they are unambiguous, specically ANY is in both.
|
||||
if _, ok := StringToType[s]; !ok {
|
||||
return s
|
||||
}
|
||||
}
|
||||
return "CLASS" + strconv.Itoa(int(c))
|
||||
}
|
||||
|
||||
9
vendor/github.com/miekg/dns/dns.go
generated
vendored
9
vendor/github.com/miekg/dns/dns.go
generated
vendored
@@ -6,9 +6,12 @@ const (
|
||||
year68 = 1 << 31 // For RFC1982 (Serial Arithmetic) calculations in 32 bits.
|
||||
defaultTtl = 3600 // Default internal TTL.
|
||||
|
||||
DefaultMsgSize = 4096 // DefaultMsgSize is the standard default for messages larger than 512 bytes.
|
||||
MinMsgSize = 512 // MinMsgSize is the minimal size of a DNS packet.
|
||||
MaxMsgSize = 65535 // MaxMsgSize is the largest possible DNS packet.
|
||||
// DefaultMsgSize is the standard default for messages larger than 512 bytes.
|
||||
DefaultMsgSize = 4096
|
||||
// MinMsgSize is the minimal size of a DNS packet.
|
||||
MinMsgSize = 512
|
||||
// MaxMsgSize is the largest possible DNS packet.
|
||||
MaxMsgSize = 65535
|
||||
)
|
||||
|
||||
// Error represents a DNS error.
|
||||
|
||||
92
vendor/github.com/miekg/dns/dnssec.go
generated
vendored
92
vendor/github.com/miekg/dns/dnssec.go
generated
vendored
@@ -19,6 +19,8 @@ import (
|
||||
"sort"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"golang.org/x/crypto/ed25519"
|
||||
)
|
||||
|
||||
// DNSSEC encryption algorithm codes.
|
||||
@@ -38,12 +40,14 @@ const (
|
||||
ECCGOST
|
||||
ECDSAP256SHA256
|
||||
ECDSAP384SHA384
|
||||
ED25519
|
||||
ED448
|
||||
INDIRECT uint8 = 252
|
||||
PRIVATEDNS uint8 = 253 // Private (experimental keys)
|
||||
PRIVATEOID uint8 = 254
|
||||
)
|
||||
|
||||
// Map for algorithm names.
|
||||
// AlgorithmToString is a map of algorithm IDs to algorithm names.
|
||||
var AlgorithmToString = map[uint8]string{
|
||||
RSAMD5: "RSAMD5",
|
||||
DH: "DH",
|
||||
@@ -56,15 +60,17 @@ var AlgorithmToString = map[uint8]string{
|
||||
ECCGOST: "ECC-GOST",
|
||||
ECDSAP256SHA256: "ECDSAP256SHA256",
|
||||
ECDSAP384SHA384: "ECDSAP384SHA384",
|
||||
ED25519: "ED25519",
|
||||
ED448: "ED448",
|
||||
INDIRECT: "INDIRECT",
|
||||
PRIVATEDNS: "PRIVATEDNS",
|
||||
PRIVATEOID: "PRIVATEOID",
|
||||
}
|
||||
|
||||
// Map of algorithm strings.
|
||||
// StringToAlgorithm is the reverse of AlgorithmToString.
|
||||
var StringToAlgorithm = reverseInt8(AlgorithmToString)
|
||||
|
||||
// Map of algorithm crypto hashes.
|
||||
// AlgorithmToHash is a map of algorithm crypto hash IDs to crypto.Hash's.
|
||||
var AlgorithmToHash = map[uint8]crypto.Hash{
|
||||
RSAMD5: crypto.MD5, // Deprecated in RFC 6725
|
||||
RSASHA1: crypto.SHA1,
|
||||
@@ -73,6 +79,7 @@ var AlgorithmToHash = map[uint8]crypto.Hash{
|
||||
ECDSAP256SHA256: crypto.SHA256,
|
||||
ECDSAP384SHA384: crypto.SHA384,
|
||||
RSASHA512: crypto.SHA512,
|
||||
ED25519: crypto.Hash(0),
|
||||
}
|
||||
|
||||
// DNSSEC hashing algorithm codes.
|
||||
@@ -85,7 +92,7 @@ const (
|
||||
SHA512 // Experimental
|
||||
)
|
||||
|
||||
// Map for hash names.
|
||||
// HashToString is a map of hash IDs to names.
|
||||
var HashToString = map[uint8]string{
|
||||
SHA1: "SHA1",
|
||||
SHA256: "SHA256",
|
||||
@@ -94,7 +101,7 @@ var HashToString = map[uint8]string{
|
||||
SHA512: "SHA512",
|
||||
}
|
||||
|
||||
// Map of hash strings.
|
||||
// StringToHash is a map of names to hash IDs.
|
||||
var StringToHash = reverseInt8(HashToString)
|
||||
|
||||
// DNSKEY flag values.
|
||||
@@ -301,17 +308,33 @@ func (rr *RRSIG) Sign(k crypto.Signer, rrset []RR) error {
|
||||
return ErrAlg
|
||||
}
|
||||
|
||||
h := hash.New()
|
||||
h.Write(signdata)
|
||||
h.Write(wire)
|
||||
switch rr.Algorithm {
|
||||
case ED25519:
|
||||
// ed25519 signs the raw message and performs hashing internally.
|
||||
// All other supported signature schemes operate over the pre-hashed
|
||||
// message, and thus ed25519 must be handled separately here.
|
||||
//
|
||||
// The raw message is passed directly into sign and crypto.Hash(0) is
|
||||
// used to signal to the crypto.Signer that the data has not been hashed.
|
||||
signature, err := sign(k, append(signdata, wire...), crypto.Hash(0), rr.Algorithm)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
signature, err := sign(k, h.Sum(nil), hash, rr.Algorithm)
|
||||
if err != nil {
|
||||
return err
|
||||
rr.Signature = toBase64(signature)
|
||||
default:
|
||||
h := hash.New()
|
||||
h.Write(signdata)
|
||||
h.Write(wire)
|
||||
|
||||
signature, err := sign(k, h.Sum(nil), hash, rr.Algorithm)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
rr.Signature = toBase64(signature)
|
||||
}
|
||||
|
||||
rr.Signature = toBase64(signature)
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
@@ -352,6 +375,9 @@ func sign(k crypto.Signer, hashed []byte, hash crypto.Hash, alg uint8) ([]byte,
|
||||
// signature = append(signature, intToBytes(r1, 20)...)
|
||||
// signature = append(signature, intToBytes(s1, 20)...)
|
||||
// rr.Signature = signature
|
||||
|
||||
case ED25519:
|
||||
return signature, nil
|
||||
}
|
||||
|
||||
return nil, ErrAlg
|
||||
@@ -456,6 +482,17 @@ func (rr *RRSIG) Verify(k *DNSKEY, rrset []RR) error {
|
||||
}
|
||||
return ErrSig
|
||||
|
||||
case ED25519:
|
||||
pubkey := k.publicKeyED25519()
|
||||
if pubkey == nil {
|
||||
return ErrKey
|
||||
}
|
||||
|
||||
if ed25519.Verify(pubkey, append(signeddata, wire...), sigbuf) {
|
||||
return nil
|
||||
}
|
||||
return ErrSig
|
||||
|
||||
default:
|
||||
return ErrAlg
|
||||
}
|
||||
@@ -515,7 +552,7 @@ func (k *DNSKEY) publicKeyRSA() *rsa.PublicKey {
|
||||
}
|
||||
// Remainder
|
||||
expo += uint64(keybuf[keyoff])
|
||||
if expo > 2<<31 {
|
||||
if expo > (2<<31)+1 {
|
||||
// Larger expo than supported.
|
||||
// println("dns: F5 primes (or larger) are not supported")
|
||||
return nil
|
||||
@@ -578,6 +615,17 @@ func (k *DNSKEY) publicKeyDSA() *dsa.PublicKey {
|
||||
return pubkey
|
||||
}
|
||||
|
||||
func (k *DNSKEY) publicKeyED25519() ed25519.PublicKey {
|
||||
keybuf, err := fromBase64([]byte(k.PublicKey))
|
||||
if err != nil {
|
||||
return nil
|
||||
}
|
||||
if len(keybuf) != ed25519.PublicKeySize {
|
||||
return nil
|
||||
}
|
||||
return keybuf
|
||||
}
|
||||
|
||||
type wireSlice [][]byte
|
||||
|
||||
func (p wireSlice) Len() int { return len(p) }
|
||||
@@ -615,6 +663,10 @@ func rawSignatureData(rrset []RR, s *RRSIG) (buf []byte, err error) {
|
||||
switch x := r1.(type) {
|
||||
case *NS:
|
||||
x.Ns = strings.ToLower(x.Ns)
|
||||
case *MD:
|
||||
x.Md = strings.ToLower(x.Md)
|
||||
case *MF:
|
||||
x.Mf = strings.ToLower(x.Mf)
|
||||
case *CNAME:
|
||||
x.Target = strings.ToLower(x.Target)
|
||||
case *SOA:
|
||||
@@ -633,6 +685,18 @@ func rawSignatureData(rrset []RR, s *RRSIG) (buf []byte, err error) {
|
||||
x.Email = strings.ToLower(x.Email)
|
||||
case *MX:
|
||||
x.Mx = strings.ToLower(x.Mx)
|
||||
case *RP:
|
||||
x.Mbox = strings.ToLower(x.Mbox)
|
||||
x.Txt = strings.ToLower(x.Txt)
|
||||
case *AFSDB:
|
||||
x.Hostname = strings.ToLower(x.Hostname)
|
||||
case *RT:
|
||||
x.Host = strings.ToLower(x.Host)
|
||||
case *SIG:
|
||||
x.SignerName = strings.ToLower(x.SignerName)
|
||||
case *PX:
|
||||
x.Map822 = strings.ToLower(x.Map822)
|
||||
x.Mapx400 = strings.ToLower(x.Mapx400)
|
||||
case *NAPTR:
|
||||
x.Replacement = strings.ToLower(x.Replacement)
|
||||
case *KX:
|
||||
|
||||
22
vendor/github.com/miekg/dns/dnssec_keygen.go
generated
vendored
22
vendor/github.com/miekg/dns/dnssec_keygen.go
generated
vendored
@@ -8,6 +8,8 @@ import (
|
||||
"crypto/rand"
|
||||
"crypto/rsa"
|
||||
"math/big"
|
||||
|
||||
"golang.org/x/crypto/ed25519"
|
||||
)
|
||||
|
||||
// Generate generates a DNSKEY of the given bit size.
|
||||
@@ -38,6 +40,10 @@ func (k *DNSKEY) Generate(bits int) (crypto.PrivateKey, error) {
|
||||
if bits != 384 {
|
||||
return nil, ErrKeySize
|
||||
}
|
||||
case ED25519:
|
||||
if bits != 256 {
|
||||
return nil, ErrKeySize
|
||||
}
|
||||
}
|
||||
|
||||
switch k.Algorithm {
|
||||
@@ -75,6 +81,13 @@ func (k *DNSKEY) Generate(bits int) (crypto.PrivateKey, error) {
|
||||
}
|
||||
k.setPublicKeyECDSA(priv.PublicKey.X, priv.PublicKey.Y)
|
||||
return priv, nil
|
||||
case ED25519:
|
||||
pub, priv, err := ed25519.GenerateKey(rand.Reader)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
k.setPublicKeyED25519(pub)
|
||||
return priv, nil
|
||||
default:
|
||||
return nil, ErrAlg
|
||||
}
|
||||
@@ -117,6 +130,15 @@ func (k *DNSKEY) setPublicKeyDSA(_Q, _P, _G, _Y *big.Int) bool {
|
||||
return true
|
||||
}
|
||||
|
||||
// Set the public key for Ed25519
|
||||
func (k *DNSKEY) setPublicKeyED25519(_K ed25519.PublicKey) bool {
|
||||
if _K == nil {
|
||||
return false
|
||||
}
|
||||
k.PublicKey = toBase64(_K)
|
||||
return true
|
||||
}
|
||||
|
||||
// Set the public key (the values E and N) for RSA
|
||||
// RFC 3110: Section 2. RSA Public KEY Resource Records
|
||||
func exponentToBuf(_E int) []byte {
|
||||
|
||||
52
vendor/github.com/miekg/dns/dnssec_keyscan.go
generated
vendored
52
vendor/github.com/miekg/dns/dnssec_keyscan.go
generated
vendored
@@ -1,6 +1,7 @@
|
||||
package dns
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"crypto"
|
||||
"crypto/dsa"
|
||||
"crypto/ecdsa"
|
||||
@@ -9,6 +10,8 @@ import (
|
||||
"math/big"
|
||||
"strconv"
|
||||
"strings"
|
||||
|
||||
"golang.org/x/crypto/ed25519"
|
||||
)
|
||||
|
||||
// NewPrivateKey returns a PrivateKey by parsing the string s.
|
||||
@@ -36,7 +39,7 @@ func (k *DNSKEY) ReadPrivateKey(q io.Reader, file string) (crypto.PrivateKey, er
|
||||
return nil, ErrPrivKey
|
||||
}
|
||||
// TODO(mg): check if the pubkey matches the private key
|
||||
algo, err := strconv.Atoi(strings.SplitN(m["algorithm"], " ", 2)[0])
|
||||
algo, err := strconv.ParseUint(strings.SplitN(m["algorithm"], " ", 2)[0], 10, 8)
|
||||
if err != nil {
|
||||
return nil, ErrPrivKey
|
||||
}
|
||||
@@ -86,6 +89,8 @@ func (k *DNSKEY) ReadPrivateKey(q io.Reader, file string) (crypto.PrivateKey, er
|
||||
}
|
||||
priv.PublicKey = *pub
|
||||
return priv, nil
|
||||
case ED25519:
|
||||
return readPrivateKeyED25519(m)
|
||||
default:
|
||||
return nil, ErrPrivKey
|
||||
}
|
||||
@@ -166,13 +171,56 @@ func readPrivateKeyECDSA(m map[string]string) (*ecdsa.PrivateKey, error) {
|
||||
return p, nil
|
||||
}
|
||||
|
||||
func readPrivateKeyED25519(m map[string]string) (ed25519.PrivateKey, error) {
|
||||
var p ed25519.PrivateKey
|
||||
// TODO: validate that the required flags are present
|
||||
for k, v := range m {
|
||||
switch k {
|
||||
case "privatekey":
|
||||
p1, err := fromBase64([]byte(v))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if len(p1) != 32 {
|
||||
return nil, ErrPrivKey
|
||||
}
|
||||
// RFC 8080 and Golang's x/crypto/ed25519 differ as to how the
|
||||
// private keys are represented. RFC 8080 specifies that private
|
||||
// keys be stored solely as the seed value (p1 above) while the
|
||||
// ed25519 package represents them as the seed value concatenated
|
||||
// to the public key, which is derived from the seed value.
|
||||
//
|
||||
// ed25519.GenerateKey reads exactly 32 bytes from the passed in
|
||||
// io.Reader and uses them as the seed. It also derives the
|
||||
// public key and produces a compatible private key.
|
||||
_, p, err = ed25519.GenerateKey(bytes.NewReader(p1))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
case "created", "publish", "activate":
|
||||
/* not used in Go (yet) */
|
||||
}
|
||||
}
|
||||
return p, nil
|
||||
}
|
||||
|
||||
// parseKey reads a private key from r. It returns a map[string]string,
|
||||
// with the key-value pairs, or an error when the file is not correct.
|
||||
func parseKey(r io.Reader, file string) (map[string]string, error) {
|
||||
s := scanInit(r)
|
||||
s, cancel := scanInit(r)
|
||||
m := make(map[string]string)
|
||||
c := make(chan lex)
|
||||
k := ""
|
||||
defer func() {
|
||||
cancel()
|
||||
// zlexer can send up to two tokens, the next one and possibly 1 remainders.
|
||||
// Do a non-blocking read.
|
||||
_, ok := <-c
|
||||
_, ok = <-c
|
||||
if !ok {
|
||||
// too bad
|
||||
}
|
||||
}()
|
||||
// Start the lexer
|
||||
go klexer(s, c)
|
||||
for l := range c {
|
||||
|
||||
8
vendor/github.com/miekg/dns/dnssec_privkey.go
generated
vendored
8
vendor/github.com/miekg/dns/dnssec_privkey.go
generated
vendored
@@ -7,6 +7,8 @@ import (
|
||||
"crypto/rsa"
|
||||
"math/big"
|
||||
"strconv"
|
||||
|
||||
"golang.org/x/crypto/ed25519"
|
||||
)
|
||||
|
||||
const format = "Private-key-format: v1.3\n"
|
||||
@@ -79,6 +81,12 @@ func (r *DNSKEY) PrivateKeyString(p crypto.PrivateKey) string {
|
||||
"Private_value(x): " + priv + "\n" +
|
||||
"Public_value(y): " + pub + "\n"
|
||||
|
||||
case ed25519.PrivateKey:
|
||||
private := toBase64(p[:32])
|
||||
return format +
|
||||
"Algorithm: " + algorithm + "\n" +
|
||||
"PrivateKey: " + private + "\n"
|
||||
|
||||
default:
|
||||
return ""
|
||||
}
|
||||
|
||||
35
vendor/github.com/miekg/dns/doc.go
generated
vendored
35
vendor/github.com/miekg/dns/doc.go
generated
vendored
@@ -1,7 +1,7 @@
|
||||
/*
|
||||
Package dns implements a full featured interface to the Domain Name System.
|
||||
Server- and client-side programming is supported.
|
||||
The package allows complete control over what is send out to the DNS. The package
|
||||
The package allows complete control over what is sent out to the DNS. The package
|
||||
API follows the less-is-more principle, by presenting a small, clean interface.
|
||||
|
||||
The package dns supports (asynchronous) querying/replying, incoming/outgoing zone transfers,
|
||||
@@ -14,7 +14,7 @@ Basic usage pattern for creating a new resource record:
|
||||
|
||||
r := new(dns.MX)
|
||||
r.Hdr = dns.RR_Header{Name: "miek.nl.", Rrtype: dns.TypeMX,
|
||||
Class: dns.ClassINET, Ttl: 3600}
|
||||
Class: dns.ClassINET, Ttl: 3600}
|
||||
r.Preference = 10
|
||||
r.Mx = "mx.miek.nl."
|
||||
|
||||
@@ -22,16 +22,16 @@ Or directly from a string:
|
||||
|
||||
mx, err := dns.NewRR("miek.nl. 3600 IN MX 10 mx.miek.nl.")
|
||||
|
||||
Or when the default TTL (3600) and class (IN) suit you:
|
||||
Or when the default origin (.) and TTL (3600) and class (IN) suit you:
|
||||
|
||||
mx, err := dns.NewRR("miek.nl. MX 10 mx.miek.nl.")
|
||||
mx, err := dns.NewRR("miek.nl MX 10 mx.miek.nl")
|
||||
|
||||
Or even:
|
||||
|
||||
mx, err := dns.NewRR("$ORIGIN nl.\nmiek 1H IN MX 10 mx.miek")
|
||||
|
||||
In the DNS messages are exchanged, these messages contain resource
|
||||
records (sets). Use pattern for creating a message:
|
||||
records (sets). Use pattern for creating a message:
|
||||
|
||||
m := new(dns.Msg)
|
||||
m.SetQuestion("miek.nl.", dns.TypeMX)
|
||||
@@ -51,7 +51,7 @@ The following is slightly more verbose, but more flexible:
|
||||
m1.Question = make([]dns.Question, 1)
|
||||
m1.Question[0] = dns.Question{"miek.nl.", dns.TypeMX, dns.ClassINET}
|
||||
|
||||
After creating a message it can be send.
|
||||
After creating a message it can be sent.
|
||||
Basic use pattern for synchronous querying the DNS at a
|
||||
server configured on 127.0.0.1 and port 53:
|
||||
|
||||
@@ -63,7 +63,23 @@ class) is as easy as setting:
|
||||
|
||||
c.SingleInflight = true
|
||||
|
||||
If these "advanced" features are not needed, a simple UDP query can be send,
|
||||
More advanced options are available using a net.Dialer and the corresponding API.
|
||||
For example it is possible to set a timeout, or to specify a source IP address
|
||||
and port to use for the connection:
|
||||
|
||||
c := new(dns.Client)
|
||||
laddr := net.UDPAddr{
|
||||
IP: net.ParseIP("[::1]"),
|
||||
Port: 12345,
|
||||
Zone: "",
|
||||
}
|
||||
d := net.Dialer{
|
||||
Timeout: 200 * time.Millisecond,
|
||||
LocalAddr: &laddr,
|
||||
}
|
||||
in, rtt, err := c.ExchangeWithDialer(&d, m1, "8.8.8.8:53")
|
||||
|
||||
If these "advanced" features are not needed, a simple UDP query can be sent,
|
||||
with:
|
||||
|
||||
in, err := dns.Exchange(m1, "127.0.0.1:53")
|
||||
@@ -152,6 +168,11 @@ Basic use pattern when querying with a TSIG name "axfr." (note that these key na
|
||||
must be fully qualified - as they are domain names) and the base64 secret
|
||||
"so6ZGir4GPAqINNh9U5c3A==":
|
||||
|
||||
If an incoming message contains a TSIG record it MUST be the last record in
|
||||
the additional section (RFC2845 3.2). This means that you should make the
|
||||
call to SetTsig last, right before executing the query. If you make any
|
||||
changes to the RRset after calling SetTsig() the signature will be incorrect.
|
||||
|
||||
c := new(dns.Client)
|
||||
c.TsigSecret = map[string]string{"axfr.": "so6ZGir4GPAqINNh9U5c3A=="}
|
||||
m := new(dns.Msg)
|
||||
|
||||
98
vendor/github.com/miekg/dns/edns.go
generated
vendored
98
vendor/github.com/miekg/dns/edns.go
generated
vendored
@@ -13,18 +13,18 @@ import (
|
||||
const (
|
||||
EDNS0LLQ = 0x1 // long lived queries: http://tools.ietf.org/html/draft-sekar-dns-llq-01
|
||||
EDNS0UL = 0x2 // update lease draft: http://files.dns-sd.org/draft-sekar-dns-ul.txt
|
||||
EDNS0NSID = 0x3 // nsid (RFC5001)
|
||||
EDNS0NSID = 0x3 // nsid (See RFC 5001)
|
||||
EDNS0DAU = 0x5 // DNSSEC Algorithm Understood
|
||||
EDNS0DHU = 0x6 // DS Hash Understood
|
||||
EDNS0N3U = 0x7 // NSEC3 Hash Understood
|
||||
EDNS0SUBNET = 0x8 // client-subnet (RFC6891)
|
||||
EDNS0SUBNET = 0x8 // client-subnet (See RFC 7871)
|
||||
EDNS0EXPIRE = 0x9 // EDNS0 expire
|
||||
EDNS0COOKIE = 0xa // EDNS0 Cookie
|
||||
EDNS0TCPKEEPALIVE = 0xb // EDNS0 tcp keep alive (RFC7828)
|
||||
EDNS0SUBNETDRAFT = 0x50fa // Don't use! Use EDNS0SUBNET
|
||||
EDNS0LOCALSTART = 0xFDE9 // Beginning of range reserved for local/experimental use (RFC6891)
|
||||
EDNS0LOCALEND = 0xFFFE // End of range reserved for local/experimental use (RFC6891)
|
||||
_DO = 1 << 15 // dnssec ok
|
||||
EDNS0TCPKEEPALIVE = 0xb // EDNS0 tcp keep alive (See RFC 7828)
|
||||
EDNS0PADDING = 0xc // EDNS0 padding (See RFC 7830)
|
||||
EDNS0LOCALSTART = 0xFDE9 // Beginning of range reserved for local/experimental use (See RFC 6891)
|
||||
EDNS0LOCALEND = 0xFFFE // End of range reserved for local/experimental use (See RFC 6891)
|
||||
_DO = 1 << 15 // DNSSEC OK
|
||||
)
|
||||
|
||||
// OPT is the EDNS0 RR appended to messages to convey extra (meta) information.
|
||||
@@ -57,9 +57,6 @@ func (rr *OPT) String() string {
|
||||
}
|
||||
case *EDNS0_SUBNET:
|
||||
s += "\n; SUBNET: " + o.String()
|
||||
if o.(*EDNS0_SUBNET).DraftOption {
|
||||
s += " (draft)"
|
||||
}
|
||||
case *EDNS0_COOKIE:
|
||||
s += "\n; COOKIE: " + o.String()
|
||||
case *EDNS0_UL:
|
||||
@@ -74,6 +71,8 @@ func (rr *OPT) String() string {
|
||||
s += "\n; NSEC3 HASH UNDERSTOOD: " + o.String()
|
||||
case *EDNS0_LOCAL:
|
||||
s += "\n; LOCAL OPT: " + o.String()
|
||||
case *EDNS0_PADDING:
|
||||
s += "\n; PADDING: " + o.String()
|
||||
}
|
||||
}
|
||||
return s
|
||||
@@ -103,15 +102,12 @@ func (rr *OPT) SetVersion(v uint8) {
|
||||
|
||||
// ExtendedRcode returns the EDNS extended RCODE field (the upper 8 bits of the TTL).
|
||||
func (rr *OPT) ExtendedRcode() int {
|
||||
return int((rr.Hdr.Ttl&0xFF000000)>>24) + 15
|
||||
return int((rr.Hdr.Ttl & 0xFF000000) >> 24)
|
||||
}
|
||||
|
||||
// SetExtendedRcode sets the EDNS extended RCODE field.
|
||||
func (rr *OPT) SetExtendedRcode(v uint8) {
|
||||
if v < RcodeBadVers { // Smaller than 16.. Use the 4 bits you have!
|
||||
return
|
||||
}
|
||||
rr.Hdr.Ttl = rr.Hdr.Ttl&0x00FFFFFF | (uint32(v-15) << 24)
|
||||
rr.Hdr.Ttl = rr.Hdr.Ttl&0x00FFFFFF | (uint32(v) << 24)
|
||||
}
|
||||
|
||||
// UDPSize returns the UDP buffer size.
|
||||
@@ -157,7 +153,7 @@ type EDNS0 interface {
|
||||
String() string
|
||||
}
|
||||
|
||||
// The nsid EDNS0 option is used to retrieve a nameserver
|
||||
// EDNS0_NSID option is used to retrieve a nameserver
|
||||
// identifier. When sending a request Nsid must be set to the empty string
|
||||
// The identifier is an opaque string encoded as hex.
|
||||
// Basic use pattern for creating an nsid option:
|
||||
@@ -182,12 +178,13 @@ func (e *EDNS0_NSID) pack() ([]byte, error) {
|
||||
return h, nil
|
||||
}
|
||||
|
||||
func (e *EDNS0_NSID) Option() uint16 { return EDNS0NSID }
|
||||
// Option implements the EDNS0 interface.
|
||||
func (e *EDNS0_NSID) Option() uint16 { return EDNS0NSID } // Option returns the option code.
|
||||
func (e *EDNS0_NSID) unpack(b []byte) error { e.Nsid = hex.EncodeToString(b); return nil }
|
||||
func (e *EDNS0_NSID) String() string { return string(e.Nsid) }
|
||||
|
||||
// EDNS0_SUBNET is the subnet option that is used to give the remote nameserver
|
||||
// an idea of where the client lives. It can then give back a different
|
||||
// an idea of where the client lives. See RFC 7871. It can then give back a different
|
||||
// answer depending on the location or network topology.
|
||||
// Basic use pattern for creating an subnet option:
|
||||
//
|
||||
@@ -197,31 +194,25 @@ func (e *EDNS0_NSID) String() string { return string(e.Nsid) }
|
||||
// e := new(dns.EDNS0_SUBNET)
|
||||
// e.Code = dns.EDNS0SUBNET
|
||||
// e.Family = 1 // 1 for IPv4 source address, 2 for IPv6
|
||||
// e.SourceNetMask = 32 // 32 for IPV4, 128 for IPv6
|
||||
// e.SourceNetmask = 32 // 32 for IPV4, 128 for IPv6
|
||||
// e.SourceScope = 0
|
||||
// e.Address = net.ParseIP("127.0.0.1").To4() // for IPv4
|
||||
// // e.Address = net.ParseIP("2001:7b8:32a::2") // for IPV6
|
||||
// o.Option = append(o.Option, e)
|
||||
//
|
||||
// Note: the spec (draft-ietf-dnsop-edns-client-subnet-00) has some insane logic
|
||||
// for which netmask applies to the address. This code will parse all the
|
||||
// available bits when unpacking (up to optlen). When packing it will apply
|
||||
// SourceNetmask. If you need more advanced logic, patches welcome and good luck.
|
||||
// This code will parse all the available bits when unpacking (up to optlen).
|
||||
// When packing it will apply SourceNetmask. If you need more advanced logic,
|
||||
// patches welcome and good luck.
|
||||
type EDNS0_SUBNET struct {
|
||||
Code uint16 // Always EDNS0SUBNET
|
||||
Family uint16 // 1 for IP, 2 for IP6
|
||||
SourceNetmask uint8
|
||||
SourceScope uint8
|
||||
Address net.IP
|
||||
DraftOption bool // Set to true if using the old (0x50fa) option code
|
||||
}
|
||||
|
||||
func (e *EDNS0_SUBNET) Option() uint16 {
|
||||
if e.DraftOption {
|
||||
return EDNS0SUBNETDRAFT
|
||||
}
|
||||
return EDNS0SUBNET
|
||||
}
|
||||
// Option implements the EDNS0 interface.
|
||||
func (e *EDNS0_SUBNET) Option() uint16 { return EDNS0SUBNET }
|
||||
|
||||
func (e *EDNS0_SUBNET) pack() ([]byte, error) {
|
||||
b := make([]byte, 4)
|
||||
@@ -229,6 +220,12 @@ func (e *EDNS0_SUBNET) pack() ([]byte, error) {
|
||||
b[2] = e.SourceNetmask
|
||||
b[3] = e.SourceScope
|
||||
switch e.Family {
|
||||
case 0:
|
||||
// "dig" sets AddressFamily to 0 if SourceNetmask is also 0
|
||||
// We might don't need to complain either
|
||||
if e.SourceNetmask != 0 {
|
||||
return nil, errors.New("dns: bad address family")
|
||||
}
|
||||
case 1:
|
||||
if e.SourceNetmask > net.IPv4len*8 {
|
||||
return nil, errors.New("dns: bad netmask")
|
||||
@@ -263,6 +260,13 @@ func (e *EDNS0_SUBNET) unpack(b []byte) error {
|
||||
e.SourceNetmask = b[2]
|
||||
e.SourceScope = b[3]
|
||||
switch e.Family {
|
||||
case 0:
|
||||
// "dig" sets AddressFamily to 0 if SourceNetmask is also 0
|
||||
// It's okay to accept such a packet
|
||||
if e.SourceNetmask != 0 {
|
||||
return errors.New("dns: bad address family")
|
||||
}
|
||||
e.Address = net.IPv4(0, 0, 0, 0)
|
||||
case 1:
|
||||
if e.SourceNetmask > net.IPv4len*8 || e.SourceScope > net.IPv4len*8 {
|
||||
return errors.New("dns: bad netmask")
|
||||
@@ -301,7 +305,7 @@ func (e *EDNS0_SUBNET) String() (s string) {
|
||||
return
|
||||
}
|
||||
|
||||
// The Cookie EDNS0 option
|
||||
// The EDNS0_COOKIE option is used to add a DNS Cookie to a message.
|
||||
//
|
||||
// o := new(dns.OPT)
|
||||
// o.Hdr.Name = "."
|
||||
@@ -332,6 +336,7 @@ func (e *EDNS0_COOKIE) pack() ([]byte, error) {
|
||||
return h, nil
|
||||
}
|
||||
|
||||
// Option implements the EDNS0 interface.
|
||||
func (e *EDNS0_COOKIE) Option() uint16 { return EDNS0COOKIE }
|
||||
func (e *EDNS0_COOKIE) unpack(b []byte) error { e.Cookie = hex.EncodeToString(b); return nil }
|
||||
func (e *EDNS0_COOKIE) String() string { return e.Cookie }
|
||||
@@ -353,6 +358,7 @@ type EDNS0_UL struct {
|
||||
Lease uint32
|
||||
}
|
||||
|
||||
// Option implements the EDNS0 interface.
|
||||
func (e *EDNS0_UL) Option() uint16 { return EDNS0UL }
|
||||
func (e *EDNS0_UL) String() string { return strconv.FormatUint(uint64(e.Lease), 10) }
|
||||
|
||||
@@ -382,6 +388,7 @@ type EDNS0_LLQ struct {
|
||||
LeaseLife uint32
|
||||
}
|
||||
|
||||
// Option implements the EDNS0 interface.
|
||||
func (e *EDNS0_LLQ) Option() uint16 { return EDNS0LLQ }
|
||||
|
||||
func (e *EDNS0_LLQ) pack() ([]byte, error) {
|
||||
@@ -413,11 +420,13 @@ func (e *EDNS0_LLQ) String() string {
|
||||
return s
|
||||
}
|
||||
|
||||
// EDNS0_DUA implements the EDNS0 "DNSSEC Algorithm Understood" option. See RFC 6975.
|
||||
type EDNS0_DAU struct {
|
||||
Code uint16 // Always EDNS0DAU
|
||||
AlgCode []uint8
|
||||
}
|
||||
|
||||
// Option implements the EDNS0 interface.
|
||||
func (e *EDNS0_DAU) Option() uint16 { return EDNS0DAU }
|
||||
func (e *EDNS0_DAU) pack() ([]byte, error) { return e.AlgCode, nil }
|
||||
func (e *EDNS0_DAU) unpack(b []byte) error { e.AlgCode = b; return nil }
|
||||
@@ -434,11 +443,13 @@ func (e *EDNS0_DAU) String() string {
|
||||
return s
|
||||
}
|
||||
|
||||
// EDNS0_DHU implements the EDNS0 "DS Hash Understood" option. See RFC 6975.
|
||||
type EDNS0_DHU struct {
|
||||
Code uint16 // Always EDNS0DHU
|
||||
AlgCode []uint8
|
||||
}
|
||||
|
||||
// Option implements the EDNS0 interface.
|
||||
func (e *EDNS0_DHU) Option() uint16 { return EDNS0DHU }
|
||||
func (e *EDNS0_DHU) pack() ([]byte, error) { return e.AlgCode, nil }
|
||||
func (e *EDNS0_DHU) unpack(b []byte) error { e.AlgCode = b; return nil }
|
||||
@@ -455,11 +466,13 @@ func (e *EDNS0_DHU) String() string {
|
||||
return s
|
||||
}
|
||||
|
||||
// EDNS0_N3U implements the EDNS0 "NSEC3 Hash Understood" option. See RFC 6975.
|
||||
type EDNS0_N3U struct {
|
||||
Code uint16 // Always EDNS0N3U
|
||||
AlgCode []uint8
|
||||
}
|
||||
|
||||
// Option implements the EDNS0 interface.
|
||||
func (e *EDNS0_N3U) Option() uint16 { return EDNS0N3U }
|
||||
func (e *EDNS0_N3U) pack() ([]byte, error) { return e.AlgCode, nil }
|
||||
func (e *EDNS0_N3U) unpack(b []byte) error { e.AlgCode = b; return nil }
|
||||
@@ -477,11 +490,13 @@ func (e *EDNS0_N3U) String() string {
|
||||
return s
|
||||
}
|
||||
|
||||
// EDNS0_EXPIRE implementes the EDNS0 option as described in RFC 7314.
|
||||
type EDNS0_EXPIRE struct {
|
||||
Code uint16 // Always EDNS0EXPIRE
|
||||
Expire uint32
|
||||
}
|
||||
|
||||
// Option implements the EDNS0 interface.
|
||||
func (e *EDNS0_EXPIRE) Option() uint16 { return EDNS0EXPIRE }
|
||||
func (e *EDNS0_EXPIRE) String() string { return strconv.FormatUint(uint64(e.Expire), 10) }
|
||||
|
||||
@@ -520,6 +535,7 @@ type EDNS0_LOCAL struct {
|
||||
Data []byte
|
||||
}
|
||||
|
||||
// Option implements the EDNS0 interface.
|
||||
func (e *EDNS0_LOCAL) Option() uint16 { return e.Code }
|
||||
func (e *EDNS0_LOCAL) String() string {
|
||||
return strconv.FormatInt(int64(e.Code), 10) + ":0x" + hex.EncodeToString(e.Data)
|
||||
@@ -543,15 +559,16 @@ func (e *EDNS0_LOCAL) unpack(b []byte) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
// EDNS0_TCP_KEEPALIVE is an EDNS0 option that instructs the server to keep
|
||||
// the TCP connection alive. See RFC 7828.
|
||||
type EDNS0_TCP_KEEPALIVE struct {
|
||||
Code uint16 // Always EDNSTCPKEEPALIVE
|
||||
Length uint16 // the value 0 if the TIMEOUT is omitted, the value 2 if it is present;
|
||||
Timeout uint16 // an idle timeout value for the TCP connection, specified in units of 100 milliseconds, encoded in network byte order.
|
||||
}
|
||||
|
||||
func (e *EDNS0_TCP_KEEPALIVE) Option() uint16 {
|
||||
return EDNS0TCPKEEPALIVE
|
||||
}
|
||||
// Option implements the EDNS0 interface.
|
||||
func (e *EDNS0_TCP_KEEPALIVE) Option() uint16 { return EDNS0TCPKEEPALIVE }
|
||||
|
||||
func (e *EDNS0_TCP_KEEPALIVE) pack() ([]byte, error) {
|
||||
if e.Timeout != 0 && e.Length != 2 {
|
||||
@@ -595,3 +612,16 @@ func (e *EDNS0_TCP_KEEPALIVE) String() (s string) {
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// EDNS0_PADDING option is used to add padding to a request/response. The default
|
||||
// value of padding SHOULD be 0x0 but other values MAY be used, for instance if
|
||||
// compression is applied before encryption which may break signatures.
|
||||
type EDNS0_PADDING struct {
|
||||
Padding []byte
|
||||
}
|
||||
|
||||
// Option implements the EDNS0 interface.
|
||||
func (e *EDNS0_PADDING) Option() uint16 { return EDNS0PADDING }
|
||||
func (e *EDNS0_PADDING) pack() ([]byte, error) { return e.Padding, nil }
|
||||
func (e *EDNS0_PADDING) unpack(b []byte) error { e.Padding = b; return nil }
|
||||
func (e *EDNS0_PADDING) String() string { return fmt.Sprintf("%0X", e.Padding) }
|
||||
|
||||
23
vendor/github.com/miekg/dns/fuzz.go
generated
vendored
Normal file
23
vendor/github.com/miekg/dns/fuzz.go
generated
vendored
Normal file
@@ -0,0 +1,23 @@
|
||||
// +build fuzz
|
||||
|
||||
package dns
|
||||
|
||||
func Fuzz(data []byte) int {
|
||||
msg := new(Msg)
|
||||
|
||||
if err := msg.Unpack(data); err != nil {
|
||||
return 0
|
||||
}
|
||||
if _, err := msg.Pack(); err != nil {
|
||||
return 0
|
||||
}
|
||||
|
||||
return 1
|
||||
}
|
||||
|
||||
func FuzzNewRR(data []byte) int {
|
||||
if _, err := NewRR(string(data)); err != nil {
|
||||
return 0
|
||||
}
|
||||
return 1
|
||||
}
|
||||
43
vendor/github.com/miekg/dns/labels.go
generated
vendored
43
vendor/github.com/miekg/dns/labels.go
generated
vendored
@@ -42,7 +42,7 @@ func SplitDomainName(s string) (labels []string) {
|
||||
|
||||
// CompareDomainName compares the names s1 and s2 and
|
||||
// returns how many labels they have in common starting from the *right*.
|
||||
// The comparison stops at the first inequality. The names are not downcased
|
||||
// The comparison stops at the first inequality. The names are downcased
|
||||
// before the comparison.
|
||||
//
|
||||
// www.miek.nl. and miek.nl. have two labels in common: miek and nl
|
||||
@@ -50,23 +50,21 @@ func SplitDomainName(s string) (labels []string) {
|
||||
//
|
||||
// s1 and s2 must be syntactically valid domain names.
|
||||
func CompareDomainName(s1, s2 string) (n int) {
|
||||
s1 = Fqdn(s1)
|
||||
s2 = Fqdn(s2)
|
||||
// the first check: root label
|
||||
if s1 == "." || s2 == "." {
|
||||
return 0
|
||||
}
|
||||
|
||||
l1 := Split(s1)
|
||||
l2 := Split(s2)
|
||||
|
||||
// the first check: root label
|
||||
if l1 == nil || l2 == nil {
|
||||
return
|
||||
}
|
||||
|
||||
j1 := len(l1) - 1 // end
|
||||
i1 := len(l1) - 2 // start
|
||||
j2 := len(l2) - 1
|
||||
i2 := len(l2) - 2
|
||||
// the second check can be done here: last/only label
|
||||
// before we fall through into the for-loop below
|
||||
if s1[l1[j1]:] == s2[l2[j2]:] {
|
||||
if equal(s1[l1[j1]:], s2[l2[j2]:]) {
|
||||
n++
|
||||
} else {
|
||||
return
|
||||
@@ -75,7 +73,7 @@ func CompareDomainName(s1, s2 string) (n int) {
|
||||
if i1 < 0 || i2 < 0 {
|
||||
break
|
||||
}
|
||||
if s1[l1[i1]:l1[j1]] == s2[l2[i2]:l2[j2]] {
|
||||
if equal(s1[l1[i1]:l1[j1]], s2[l2[i2]:l2[j2]]) {
|
||||
n++
|
||||
} else {
|
||||
break
|
||||
@@ -166,3 +164,28 @@ func PrevLabel(s string, n int) (i int, start bool) {
|
||||
}
|
||||
return lab[len(lab)-n], false
|
||||
}
|
||||
|
||||
// equal compares a and b while ignoring case. It returns true when equal otherwise false.
|
||||
func equal(a, b string) bool {
|
||||
// might be lifted into API function.
|
||||
la := len(a)
|
||||
lb := len(b)
|
||||
if la != lb {
|
||||
return false
|
||||
}
|
||||
|
||||
for i := la - 1; i >= 0; i-- {
|
||||
ai := a[i]
|
||||
bi := b[i]
|
||||
if ai >= 'A' && ai <= 'Z' {
|
||||
ai |= ('a' - 'A')
|
||||
}
|
||||
if bi >= 'A' && bi <= 'Z' {
|
||||
bi |= ('a' - 'A')
|
||||
}
|
||||
if ai != bi {
|
||||
return false
|
||||
}
|
||||
}
|
||||
return true
|
||||
}
|
||||
|
||||
306
vendor/github.com/miekg/dns/msg.go
generated
vendored
306
vendor/github.com/miekg/dns/msg.go
generated
vendored
@@ -9,42 +9,36 @@
|
||||
package dns
|
||||
|
||||
//go:generate go run msg_generate.go
|
||||
//go:generate go run compress_generate.go
|
||||
|
||||
import (
|
||||
crand "crypto/rand"
|
||||
"encoding/binary"
|
||||
"fmt"
|
||||
"math/big"
|
||||
"math/rand"
|
||||
"strconv"
|
||||
"sync"
|
||||
)
|
||||
|
||||
func init() {
|
||||
// Initialize default math/rand source using crypto/rand to provide better
|
||||
// security without the performance trade-off.
|
||||
buf := make([]byte, 8)
|
||||
_, err := crand.Read(buf)
|
||||
if err != nil {
|
||||
// Failed to read from cryptographic source, fallback to default initial
|
||||
// seed (1) by returning early
|
||||
return
|
||||
}
|
||||
seed := binary.BigEndian.Uint64(buf)
|
||||
rand.Seed(int64(seed))
|
||||
}
|
||||
|
||||
const maxCompressionOffset = 2 << 13 // We have 14 bits for the compression pointer
|
||||
const (
|
||||
maxCompressionOffset = 2 << 13 // We have 14 bits for the compression pointer
|
||||
maxDomainNameWireOctets = 255 // See RFC 1035 section 2.3.4
|
||||
)
|
||||
|
||||
// Errors defined in this package.
|
||||
var (
|
||||
ErrAlg error = &Error{err: "bad algorithm"} // ErrAlg indicates an error with the (DNSSEC) algorithm.
|
||||
ErrAuth error = &Error{err: "bad authentication"} // ErrAuth indicates an error in the TSIG authentication.
|
||||
ErrBuf error = &Error{err: "buffer size too small"} // ErrBuf indicates that the buffer used it too small for the message.
|
||||
ErrConnEmpty error = &Error{err: "conn has no connection"} // ErrConnEmpty indicates a connection is being uses before it is initialized.
|
||||
ErrBuf error = &Error{err: "buffer size too small"} // ErrBuf indicates that the buffer used is too small for the message.
|
||||
ErrConnEmpty error = &Error{err: "conn has no connection"} // ErrConnEmpty indicates a connection is being used before it is initialized.
|
||||
ErrExtendedRcode error = &Error{err: "bad extended rcode"} // ErrExtendedRcode ...
|
||||
ErrFqdn error = &Error{err: "domain must be fully qualified"} // ErrFqdn indicates that a domain name does not have a closing dot.
|
||||
ErrId error = &Error{err: "id mismatch"} // ErrId indicates there is a mismatch with the message's ID.
|
||||
ErrKeyAlg error = &Error{err: "bad key algorithm"} // ErrKeyAlg indicates that the algorithm in the key is not valid.
|
||||
ErrKey error = &Error{err: "bad key"}
|
||||
ErrKeySize error = &Error{err: "bad key size"}
|
||||
ErrLongDomain error = &Error{err: fmt.Sprintf("domain name exceeded %d wire-format octets", maxDomainNameWireOctets)}
|
||||
ErrNoSig error = &Error{err: "no signature found"}
|
||||
ErrPrivKey error = &Error{err: "bad private key"}
|
||||
ErrRcode error = &Error{err: "bad rcode"}
|
||||
@@ -64,13 +58,47 @@ var (
|
||||
// For instance, to make it return a static value:
|
||||
//
|
||||
// dns.Id = func() uint16 { return 3 }
|
||||
var Id func() uint16 = id
|
||||
var Id = id
|
||||
|
||||
var (
|
||||
idLock sync.Mutex
|
||||
idRand *rand.Rand
|
||||
)
|
||||
|
||||
// id returns a 16 bits random number to be used as a
|
||||
// message id. The random provided should be good enough.
|
||||
func id() uint16 {
|
||||
id32 := rand.Uint32()
|
||||
return uint16(id32)
|
||||
idLock.Lock()
|
||||
|
||||
if idRand == nil {
|
||||
// This (partially) works around
|
||||
// https://github.com/golang/go/issues/11833 by only
|
||||
// seeding idRand upon the first call to id.
|
||||
|
||||
var seed int64
|
||||
var buf [8]byte
|
||||
|
||||
if _, err := crand.Read(buf[:]); err == nil {
|
||||
seed = int64(binary.LittleEndian.Uint64(buf[:]))
|
||||
} else {
|
||||
seed = rand.Int63()
|
||||
}
|
||||
|
||||
idRand = rand.New(rand.NewSource(seed))
|
||||
}
|
||||
|
||||
// The call to idRand.Uint32 must be within the
|
||||
// mutex lock because *rand.Rand is not safe for
|
||||
// concurrent use.
|
||||
//
|
||||
// There is no added performance overhead to calling
|
||||
// idRand.Uint32 inside a mutex lock over just
|
||||
// calling rand.Uint32 as the global math/rand rng
|
||||
// is internally protected by a sync.Mutex.
|
||||
id := uint16(idRand.Uint32())
|
||||
|
||||
idLock.Unlock()
|
||||
return id
|
||||
}
|
||||
|
||||
// MsgHdr is a a manually-unpacked version of (id, bits).
|
||||
@@ -241,7 +269,9 @@ func packDomainName(s string, msg []byte, off int, compression map[string]int, c
|
||||
bsFresh = true
|
||||
}
|
||||
// Don't try to compress '.'
|
||||
if compress && roBs[begin:] != "." {
|
||||
// We should only compress when compress it true, but we should also still pick
|
||||
// up names that can be used for *future* compression(s).
|
||||
if compression != nil && roBs[begin:] != "." {
|
||||
if p, ok := compression[roBs[begin:]]; !ok {
|
||||
// Only offsets smaller than this can be used.
|
||||
if offset < maxCompressionOffset {
|
||||
@@ -305,6 +335,7 @@ func UnpackDomainName(msg []byte, off int) (string, int, error) {
|
||||
s := make([]byte, 0, 64)
|
||||
off1 := 0
|
||||
lenmsg := len(msg)
|
||||
maxLen := maxDomainNameWireOctets
|
||||
ptr := 0 // number of pointers followed
|
||||
Loop:
|
||||
for {
|
||||
@@ -329,8 +360,10 @@ Loop:
|
||||
fallthrough
|
||||
case '"', '\\':
|
||||
s = append(s, '\\', b)
|
||||
// presentation-format \X escapes add an extra byte
|
||||
maxLen++
|
||||
default:
|
||||
if b < 32 || b >= 127 { // unprintable use \DDD
|
||||
if b < 32 || b >= 127 { // unprintable, use \DDD
|
||||
var buf [3]byte
|
||||
bufs := strconv.AppendInt(buf[:0], int64(b), 10)
|
||||
s = append(s, '\\')
|
||||
@@ -340,6 +373,8 @@ Loop:
|
||||
for _, r := range bufs {
|
||||
s = append(s, r)
|
||||
}
|
||||
// presentation-format \DDD escapes add 3 extra bytes
|
||||
maxLen += 3
|
||||
} else {
|
||||
s = append(s, b)
|
||||
}
|
||||
@@ -364,6 +399,9 @@ Loop:
|
||||
if ptr++; ptr > 10 {
|
||||
return "", lenmsg, &Error{err: "too many compression pointers"}
|
||||
}
|
||||
// pointer should guarantee that it advances and points forwards at least
|
||||
// but the condition on previous three lines guarantees that it's
|
||||
// at least loop-free
|
||||
off = (c^0xC0)<<8 | int(c1)
|
||||
default:
|
||||
// 0x80 and 0x40 are reserved
|
||||
@@ -375,6 +413,9 @@ Loop:
|
||||
}
|
||||
if len(s) == 0 {
|
||||
s = []byte(".")
|
||||
} else if len(s) >= maxLen {
|
||||
// error if the name is too long, but don't throw it away
|
||||
return string(s), lenmsg, ErrLongDomain
|
||||
}
|
||||
return string(s), off1, nil
|
||||
}
|
||||
@@ -571,8 +612,8 @@ func UnpackRR(msg []byte, off int) (rr RR, off1 int, err error) {
|
||||
// If we cannot unpack the whole array, then it will return nil
|
||||
func unpackRRslice(l int, msg []byte, off int) (dst1 []RR, off1 int, err error) {
|
||||
var r RR
|
||||
// Optimistically make dst be the length that was sent
|
||||
dst := make([]RR, 0, l)
|
||||
// Don't pre-allocate, l may be under attacker control
|
||||
var dst []RR
|
||||
for i := 0; i < l; i++ {
|
||||
off1 := off
|
||||
r, off, err = UnpackRR(msg, off)
|
||||
@@ -710,12 +751,10 @@ func (dns *Msg) PackBuffer(buf []byte) (msg []byte, err error) {
|
||||
|
||||
// We need the uncompressed length here, because we first pack it and then compress it.
|
||||
msg = buf
|
||||
compress := dns.Compress
|
||||
dns.Compress = false
|
||||
if packLen := dns.Len() + 1; len(msg) < packLen {
|
||||
uncompressedLen := compressedLen(dns, false)
|
||||
if packLen := uncompressedLen + 1; len(msg) < packLen {
|
||||
msg = make([]byte, packLen)
|
||||
}
|
||||
dns.Compress = compress
|
||||
|
||||
// Pack it in: header and then the pieces.
|
||||
off := 0
|
||||
@@ -772,13 +811,19 @@ func (dns *Msg) Unpack(msg []byte) (err error) {
|
||||
dns.CheckingDisabled = (dh.Bits & _CD) != 0
|
||||
dns.Rcode = int(dh.Bits & 0xF)
|
||||
|
||||
// If we are at the end of the message we should return *just* the
|
||||
// header. This can still be useful to the caller. 9.9.9.9 sends these
|
||||
// when responding with REFUSED for instance.
|
||||
if off == len(msg) {
|
||||
return ErrTruncated
|
||||
// reset sections before returning
|
||||
dns.Question, dns.Answer, dns.Ns, dns.Extra = nil, nil, nil, nil
|
||||
return nil
|
||||
}
|
||||
|
||||
// Optimistically use the count given to us in the header
|
||||
dns.Question = make([]Question, 0, int(dh.Qdcount))
|
||||
|
||||
// Qdcount, Ancount, Nscount, Arcount can't be trusted, as they are
|
||||
// attacker controlled. This means we can't use them to pre-allocate
|
||||
// slices.
|
||||
dns.Question = nil
|
||||
for i := 0; i < int(dh.Qdcount); i++ {
|
||||
off1 := off
|
||||
var q Question
|
||||
@@ -868,72 +913,62 @@ func (dns *Msg) String() string {
|
||||
// If dns.Compress is true compression it is taken into account. Len()
|
||||
// is provided to be a faster way to get the size of the resulting packet,
|
||||
// than packing it, measuring the size and discarding the buffer.
|
||||
func (dns *Msg) Len() int {
|
||||
func (dns *Msg) Len() int { return compressedLen(dns, dns.Compress) }
|
||||
|
||||
// compressedLen returns the message length when in compressed wire format
|
||||
// when compress is true, otherwise the uncompressed length is returned.
|
||||
func compressedLen(dns *Msg, compress bool) int {
|
||||
// We always return one more than needed.
|
||||
l := 12 // Message header is always 12 bytes
|
||||
var compression map[string]int
|
||||
if dns.Compress {
|
||||
compression = make(map[string]int)
|
||||
}
|
||||
for i := 0; i < len(dns.Question); i++ {
|
||||
l += dns.Question[i].len()
|
||||
if dns.Compress {
|
||||
compressionLenHelper(compression, dns.Question[i].Name)
|
||||
if compress {
|
||||
compression := map[string]int{}
|
||||
for _, r := range dns.Question {
|
||||
l += r.len()
|
||||
compressionLenHelper(compression, r.Name)
|
||||
}
|
||||
l += compressionLenSlice(compression, dns.Answer)
|
||||
l += compressionLenSlice(compression, dns.Ns)
|
||||
l += compressionLenSlice(compression, dns.Extra)
|
||||
} else {
|
||||
for _, r := range dns.Question {
|
||||
l += r.len()
|
||||
}
|
||||
for _, r := range dns.Answer {
|
||||
if r != nil {
|
||||
l += r.len()
|
||||
}
|
||||
}
|
||||
for _, r := range dns.Ns {
|
||||
if r != nil {
|
||||
l += r.len()
|
||||
}
|
||||
}
|
||||
for _, r := range dns.Extra {
|
||||
if r != nil {
|
||||
l += r.len()
|
||||
}
|
||||
}
|
||||
}
|
||||
for i := 0; i < len(dns.Answer); i++ {
|
||||
if dns.Answer[i] == nil {
|
||||
return l
|
||||
}
|
||||
|
||||
func compressionLenSlice(c map[string]int, rs []RR) int {
|
||||
var l int
|
||||
for _, r := range rs {
|
||||
if r == nil {
|
||||
continue
|
||||
}
|
||||
l += dns.Answer[i].len()
|
||||
if dns.Compress {
|
||||
k, ok := compressionLenSearch(compression, dns.Answer[i].Header().Name)
|
||||
if ok {
|
||||
l += 1 - k
|
||||
}
|
||||
compressionLenHelper(compression, dns.Answer[i].Header().Name)
|
||||
k, ok = compressionLenSearchType(compression, dns.Answer[i])
|
||||
if ok {
|
||||
l += 1 - k
|
||||
}
|
||||
compressionLenHelperType(compression, dns.Answer[i])
|
||||
l += r.len()
|
||||
k, ok := compressionLenSearch(c, r.Header().Name)
|
||||
if ok {
|
||||
l += 1 - k
|
||||
}
|
||||
}
|
||||
for i := 0; i < len(dns.Ns); i++ {
|
||||
if dns.Ns[i] == nil {
|
||||
continue
|
||||
}
|
||||
l += dns.Ns[i].len()
|
||||
if dns.Compress {
|
||||
k, ok := compressionLenSearch(compression, dns.Ns[i].Header().Name)
|
||||
if ok {
|
||||
l += 1 - k
|
||||
}
|
||||
compressionLenHelper(compression, dns.Ns[i].Header().Name)
|
||||
k, ok = compressionLenSearchType(compression, dns.Ns[i])
|
||||
if ok {
|
||||
l += 1 - k
|
||||
}
|
||||
compressionLenHelperType(compression, dns.Ns[i])
|
||||
}
|
||||
}
|
||||
for i := 0; i < len(dns.Extra); i++ {
|
||||
if dns.Extra[i] == nil {
|
||||
continue
|
||||
}
|
||||
l += dns.Extra[i].len()
|
||||
if dns.Compress {
|
||||
k, ok := compressionLenSearch(compression, dns.Extra[i].Header().Name)
|
||||
if ok {
|
||||
l += 1 - k
|
||||
}
|
||||
compressionLenHelper(compression, dns.Extra[i].Header().Name)
|
||||
k, ok = compressionLenSearchType(compression, dns.Extra[i])
|
||||
if ok {
|
||||
l += 1 - k
|
||||
}
|
||||
compressionLenHelperType(compression, dns.Extra[i])
|
||||
compressionLenHelper(c, r.Header().Name)
|
||||
k, ok = compressionLenSearchType(c, r)
|
||||
if ok {
|
||||
l += 1 - k
|
||||
}
|
||||
compressionLenHelperType(c, r)
|
||||
}
|
||||
return l
|
||||
}
|
||||
@@ -970,97 +1005,6 @@ func compressionLenSearch(c map[string]int, s string) (int, bool) {
|
||||
return 0, false
|
||||
}
|
||||
|
||||
// TODO(miek): should add all types, because the all can be *used* for compression. Autogenerate from msg_generate and put in zmsg.go
|
||||
func compressionLenHelperType(c map[string]int, r RR) {
|
||||
switch x := r.(type) {
|
||||
case *NS:
|
||||
compressionLenHelper(c, x.Ns)
|
||||
case *MX:
|
||||
compressionLenHelper(c, x.Mx)
|
||||
case *CNAME:
|
||||
compressionLenHelper(c, x.Target)
|
||||
case *PTR:
|
||||
compressionLenHelper(c, x.Ptr)
|
||||
case *SOA:
|
||||
compressionLenHelper(c, x.Ns)
|
||||
compressionLenHelper(c, x.Mbox)
|
||||
case *MB:
|
||||
compressionLenHelper(c, x.Mb)
|
||||
case *MG:
|
||||
compressionLenHelper(c, x.Mg)
|
||||
case *MR:
|
||||
compressionLenHelper(c, x.Mr)
|
||||
case *MF:
|
||||
compressionLenHelper(c, x.Mf)
|
||||
case *MD:
|
||||
compressionLenHelper(c, x.Md)
|
||||
case *RT:
|
||||
compressionLenHelper(c, x.Host)
|
||||
case *RP:
|
||||
compressionLenHelper(c, x.Mbox)
|
||||
compressionLenHelper(c, x.Txt)
|
||||
case *MINFO:
|
||||
compressionLenHelper(c, x.Rmail)
|
||||
compressionLenHelper(c, x.Email)
|
||||
case *AFSDB:
|
||||
compressionLenHelper(c, x.Hostname)
|
||||
case *SRV:
|
||||
compressionLenHelper(c, x.Target)
|
||||
case *NAPTR:
|
||||
compressionLenHelper(c, x.Replacement)
|
||||
case *RRSIG:
|
||||
compressionLenHelper(c, x.SignerName)
|
||||
case *NSEC:
|
||||
compressionLenHelper(c, x.NextDomain)
|
||||
// HIP?
|
||||
}
|
||||
}
|
||||
|
||||
// Only search on compressing these types.
|
||||
func compressionLenSearchType(c map[string]int, r RR) (int, bool) {
|
||||
switch x := r.(type) {
|
||||
case *NS:
|
||||
return compressionLenSearch(c, x.Ns)
|
||||
case *MX:
|
||||
return compressionLenSearch(c, x.Mx)
|
||||
case *CNAME:
|
||||
return compressionLenSearch(c, x.Target)
|
||||
case *DNAME:
|
||||
return compressionLenSearch(c, x.Target)
|
||||
case *PTR:
|
||||
return compressionLenSearch(c, x.Ptr)
|
||||
case *SOA:
|
||||
k, ok := compressionLenSearch(c, x.Ns)
|
||||
k1, ok1 := compressionLenSearch(c, x.Mbox)
|
||||
if !ok && !ok1 {
|
||||
return 0, false
|
||||
}
|
||||
return k + k1, true
|
||||
case *MB:
|
||||
return compressionLenSearch(c, x.Mb)
|
||||
case *MG:
|
||||
return compressionLenSearch(c, x.Mg)
|
||||
case *MR:
|
||||
return compressionLenSearch(c, x.Mr)
|
||||
case *MF:
|
||||
return compressionLenSearch(c, x.Mf)
|
||||
case *MD:
|
||||
return compressionLenSearch(c, x.Md)
|
||||
case *RT:
|
||||
return compressionLenSearch(c, x.Host)
|
||||
case *MINFO:
|
||||
k, ok := compressionLenSearch(c, x.Rmail)
|
||||
k1, ok1 := compressionLenSearch(c, x.Email)
|
||||
if !ok && !ok1 {
|
||||
return 0, false
|
||||
}
|
||||
return k + k1, true
|
||||
case *AFSDB:
|
||||
return compressionLenSearch(c, x.Hostname)
|
||||
}
|
||||
return 0, false
|
||||
}
|
||||
|
||||
// Copy returns a new RR which is a deep-copy of r.
|
||||
func Copy(r RR) RR { r1 := r.copy(); return r1 }
|
||||
|
||||
|
||||
16
vendor/github.com/miekg/dns/msg_generate.go
generated
vendored
16
vendor/github.com/miekg/dns/msg_generate.go
generated
vendored
@@ -18,8 +18,7 @@ import (
|
||||
)
|
||||
|
||||
var packageHdr = `
|
||||
// *** DO NOT MODIFY ***
|
||||
// AUTOGENERATED BY go generate from msg_generate.go
|
||||
// Code generated by "go run msg_generate.go"; DO NOT EDIT.
|
||||
|
||||
package dns
|
||||
|
||||
@@ -139,8 +138,17 @@ return off, err
|
||||
case st.Tag(i) == `dns:"base64"`:
|
||||
o("off, err = packStringBase64(rr.%s, msg, off)\n")
|
||||
|
||||
case strings.HasPrefix(st.Tag(i), `dns:"size-hex:SaltLength`): // Hack to fix empty salt length for NSEC3
|
||||
o("if rr.%s == \"-\" { /* do nothing, empty salt */ }\n")
|
||||
case strings.HasPrefix(st.Tag(i), `dns:"size-hex:SaltLength`):
|
||||
// directly write instead of using o() so we get the error check in the correct place
|
||||
field := st.Field(i).Name()
|
||||
fmt.Fprintf(b, `// Only pack salt if value is not "-", i.e. empty
|
||||
if rr.%s != "-" {
|
||||
off, err = packStringHex(rr.%s, msg, off)
|
||||
if err != nil {
|
||||
return off, err
|
||||
}
|
||||
}
|
||||
`, field, field)
|
||||
continue
|
||||
case strings.HasPrefix(st.Tag(i), `dns:"size-hex`): // size-hex can be packed just like hex
|
||||
fallthrough
|
||||
|
||||
19
vendor/github.com/miekg/dns/msg_helpers.go
generated
vendored
19
vendor/github.com/miekg/dns/msg_helpers.go
generated
vendored
@@ -96,7 +96,7 @@ func unpackHeader(msg []byte, off int) (rr RR_Header, off1 int, truncmsg []byte,
|
||||
return hdr, len(msg), msg, err
|
||||
}
|
||||
msg, err = truncateMsgFromRdlength(msg, off, hdr.Rdlength)
|
||||
return hdr, off, msg, nil
|
||||
return hdr, off, msg, err
|
||||
}
|
||||
|
||||
// pack packs an RR header, returning the offset to the end of the header.
|
||||
@@ -142,6 +142,11 @@ func truncateMsgFromRdlength(msg []byte, off int, rdlength uint16) (truncmsg []b
|
||||
}
|
||||
|
||||
func fromBase32(s []byte) (buf []byte, err error) {
|
||||
for i, b := range s {
|
||||
if b >= 'a' && b <= 'z' {
|
||||
s[i] = b - 32
|
||||
}
|
||||
}
|
||||
buflen := base32.HexEncoding.DecodedLen(len(s))
|
||||
buf = make([]byte, buflen)
|
||||
n, err := base32.HexEncoding.Decode(buf, s)
|
||||
@@ -401,16 +406,13 @@ Option:
|
||||
}
|
||||
edns = append(edns, e)
|
||||
off += int(optlen)
|
||||
case EDNS0SUBNET, EDNS0SUBNETDRAFT:
|
||||
case EDNS0SUBNET:
|
||||
e := new(EDNS0_SUBNET)
|
||||
if err := e.unpack(msg[off : off+int(optlen)]); err != nil {
|
||||
return nil, len(msg), err
|
||||
}
|
||||
edns = append(edns, e)
|
||||
off += int(optlen)
|
||||
if code == EDNS0SUBNETDRAFT {
|
||||
e.DraftOption = true
|
||||
}
|
||||
case EDNS0COOKIE:
|
||||
e := new(EDNS0_COOKIE)
|
||||
if err := e.unpack(msg[off : off+int(optlen)]); err != nil {
|
||||
@@ -453,6 +455,13 @@ Option:
|
||||
}
|
||||
edns = append(edns, e)
|
||||
off += int(optlen)
|
||||
case EDNS0PADDING:
|
||||
e := new(EDNS0_PADDING)
|
||||
if err := e.unpack(msg[off : off+int(optlen)]); err != nil {
|
||||
return nil, len(msg), err
|
||||
}
|
||||
edns = append(edns, e)
|
||||
off += int(optlen)
|
||||
default:
|
||||
e := new(EDNS0_LOCAL)
|
||||
e.Code = code
|
||||
|
||||
76
vendor/github.com/miekg/dns/nsecx.go
generated
vendored
76
vendor/github.com/miekg/dns/nsecx.go
generated
vendored
@@ -48,62 +48,50 @@ func HashName(label string, ha uint8, iter uint16, salt string) string {
|
||||
return toBase32(nsec3)
|
||||
}
|
||||
|
||||
// Denialer is an interface that should be implemented by types that are used to denial
|
||||
// answers in DNSSEC.
|
||||
type Denialer interface {
|
||||
// Cover will check if the (unhashed) name is being covered by this NSEC or NSEC3.
|
||||
Cover(name string) bool
|
||||
// Match will check if the ownername matches the (unhashed) name for this NSEC3 or NSEC3.
|
||||
Match(name string) bool
|
||||
}
|
||||
|
||||
// Cover implements the Denialer interface.
|
||||
func (rr *NSEC) Cover(name string) bool {
|
||||
return true
|
||||
}
|
||||
|
||||
// Match implements the Denialer interface.
|
||||
func (rr *NSEC) Match(name string) bool {
|
||||
return true
|
||||
}
|
||||
|
||||
// Cover implements the Denialer interface.
|
||||
// Cover returns true if a name is covered by the NSEC3 record
|
||||
func (rr *NSEC3) Cover(name string) bool {
|
||||
// FIXME(miek): check if the zones match
|
||||
// FIXME(miek): check if we're not dealing with parent nsec3
|
||||
hname := HashName(name, rr.Hash, rr.Iterations, rr.Salt)
|
||||
labels := Split(rr.Hdr.Name)
|
||||
if len(labels) < 2 {
|
||||
nameHash := HashName(name, rr.Hash, rr.Iterations, rr.Salt)
|
||||
owner := strings.ToUpper(rr.Hdr.Name)
|
||||
labelIndices := Split(owner)
|
||||
if len(labelIndices) < 2 {
|
||||
return false
|
||||
}
|
||||
hash := strings.ToUpper(rr.Hdr.Name[labels[0] : labels[1]-1]) // -1 to remove the dot
|
||||
if hash == rr.NextDomain {
|
||||
return false // empty interval
|
||||
}
|
||||
if hash > rr.NextDomain { // last name, points to apex
|
||||
// hname > hash
|
||||
// hname > rr.NextDomain
|
||||
// TODO(miek)
|
||||
}
|
||||
if hname <= hash {
|
||||
ownerHash := owner[:labelIndices[1]-1]
|
||||
ownerZone := owner[labelIndices[1]:]
|
||||
if !IsSubDomain(ownerZone, strings.ToUpper(name)) { // name is outside owner zone
|
||||
return false
|
||||
}
|
||||
if hname >= rr.NextDomain {
|
||||
|
||||
nextHash := rr.NextDomain
|
||||
if ownerHash == nextHash { // empty interval
|
||||
return false
|
||||
}
|
||||
return true
|
||||
if ownerHash > nextHash { // end of zone
|
||||
if nameHash > ownerHash { // covered since there is nothing after ownerHash
|
||||
return true
|
||||
}
|
||||
return nameHash < nextHash // if nameHash is before beginning of zone it is covered
|
||||
}
|
||||
if nameHash < ownerHash { // nameHash is before ownerHash, not covered
|
||||
return false
|
||||
}
|
||||
return nameHash < nextHash // if nameHash is before nextHash is it covered (between ownerHash and nextHash)
|
||||
}
|
||||
|
||||
// Match implements the Denialer interface.
|
||||
// Match returns true if a name matches the NSEC3 record
|
||||
func (rr *NSEC3) Match(name string) bool {
|
||||
// FIXME(miek): Check if we are in the same zone
|
||||
hname := HashName(name, rr.Hash, rr.Iterations, rr.Salt)
|
||||
labels := Split(rr.Hdr.Name)
|
||||
if len(labels) < 2 {
|
||||
nameHash := HashName(name, rr.Hash, rr.Iterations, rr.Salt)
|
||||
owner := strings.ToUpper(rr.Hdr.Name)
|
||||
labelIndices := Split(owner)
|
||||
if len(labelIndices) < 2 {
|
||||
return false
|
||||
}
|
||||
hash := strings.ToUpper(rr.Hdr.Name[labels[0] : labels[1]-1]) // -1 to remove the .
|
||||
if hash == hname {
|
||||
ownerHash := owner[:labelIndices[1]-1]
|
||||
ownerZone := owner[labelIndices[1]:]
|
||||
if !IsSubDomain(ownerZone, strings.ToUpper(name)) { // name is outside owner zone
|
||||
return false
|
||||
}
|
||||
if ownerHash == nameHash {
|
||||
return true
|
||||
}
|
||||
return false
|
||||
|
||||
4
vendor/github.com/miekg/dns/reverse.go
generated
vendored
4
vendor/github.com/miekg/dns/reverse.go
generated
vendored
@@ -6,10 +6,10 @@ var StringToType = reverseInt16(TypeToString)
|
||||
// StringToClass is the reverse of ClassToString, needed for string parsing.
|
||||
var StringToClass = reverseInt16(ClassToString)
|
||||
|
||||
// Map of opcodes strings.
|
||||
// StringToOpcode is a map of opcodes to strings.
|
||||
var StringToOpcode = reverseInt(OpcodeToString)
|
||||
|
||||
// Map of rcodes strings.
|
||||
// StringToRcode is a map of rcodes to strings.
|
||||
var StringToRcode = reverseInt(RcodeToString)
|
||||
|
||||
// Reverse a map
|
||||
|
||||
2
vendor/github.com/miekg/dns/sanitize.go
generated
vendored
2
vendor/github.com/miekg/dns/sanitize.go
generated
vendored
@@ -3,7 +3,7 @@ package dns
|
||||
// Dedup removes identical RRs from rrs. It preserves the original ordering.
|
||||
// The lowest TTL of any duplicates is used in the remaining one. Dedup modifies
|
||||
// rrs.
|
||||
// m is used to store the RRs temporay. If it is nil a new map will be allocated.
|
||||
// m is used to store the RRs temporary. If it is nil a new map will be allocated.
|
||||
func Dedup(rrs []RR, m map[string]RR) []RR {
|
||||
if m == nil {
|
||||
m = make(map[string]RR)
|
||||
|
||||
244
vendor/github.com/miekg/dns/scan.go
generated
vendored
244
vendor/github.com/miekg/dns/scan.go
generated
vendored
@@ -1,23 +1,14 @@
|
||||
package dns
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"io"
|
||||
"log"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"strconv"
|
||||
"strings"
|
||||
)
|
||||
|
||||
type debugging bool
|
||||
|
||||
const debug debugging = false
|
||||
|
||||
func (d debugging) Printf(format string, args ...interface{}) {
|
||||
if d {
|
||||
log.Printf(format, args...)
|
||||
}
|
||||
}
|
||||
|
||||
const maxTok = 2048 // Largest token we can return.
|
||||
const maxUint16 = 1<<16 - 1
|
||||
|
||||
@@ -38,7 +29,7 @@ const (
|
||||
zOwner
|
||||
zClass
|
||||
zDirOrigin // $ORIGIN
|
||||
zDirTtl // $TTL
|
||||
zDirTTL // $TTL
|
||||
zDirInclude // $INCLUDE
|
||||
zDirGenerate // $GENERATE
|
||||
|
||||
@@ -51,13 +42,13 @@ const (
|
||||
zExpectAny // Expect rrtype, ttl or class
|
||||
zExpectAnyNoClass // Expect rrtype or ttl
|
||||
zExpectAnyNoClassBl // The whitespace after _EXPECT_ANY_NOCLASS
|
||||
zExpectAnyNoTtl // Expect rrtype or class
|
||||
zExpectAnyNoTtlBl // Whitespace after _EXPECT_ANY_NOTTL
|
||||
zExpectAnyNoTTL // Expect rrtype or class
|
||||
zExpectAnyNoTTLBl // Whitespace after _EXPECT_ANY_NOTTL
|
||||
zExpectRrtype // Expect rrtype
|
||||
zExpectRrtypeBl // Whitespace BEFORE rrtype
|
||||
zExpectRdata // The first element of the rdata
|
||||
zExpectDirTtlBl // Space after directive $TTL
|
||||
zExpectDirTtl // Directive $TTL
|
||||
zExpectDirTTLBl // Space after directive $TTL
|
||||
zExpectDirTTL // Directive $TTL
|
||||
zExpectDirOriginBl // Space after directive $ORIGIN
|
||||
zExpectDirOrigin // Directive $ORIGIN
|
||||
zExpectDirIncludeBl // Space after directive $INCLUDE
|
||||
@@ -105,6 +96,12 @@ type Token struct {
|
||||
Comment string
|
||||
}
|
||||
|
||||
// ttlState describes the state necessary to fill in an omitted RR TTL
|
||||
type ttlState struct {
|
||||
ttl uint32 // ttl is the current default TTL
|
||||
isByDirective bool // isByDirective indicates whether ttl was set by a $TTL directive
|
||||
}
|
||||
|
||||
// NewRR reads the RR contained in the string s. Only the first RR is
|
||||
// returned. If s contains no RR, return nil with no error. The class
|
||||
// defaults to IN and TTL defaults to 3600. The full zone file syntax
|
||||
@@ -120,7 +117,8 @@ func NewRR(s string) (RR, error) {
|
||||
// ReadRR reads the RR contained in q.
|
||||
// See NewRR for more documentation.
|
||||
func ReadRR(q io.Reader, filename string) (RR, error) {
|
||||
r := <-parseZoneHelper(q, ".", filename, 1)
|
||||
defttl := &ttlState{defaultTtl, false}
|
||||
r := <-parseZoneHelper(q, ".", filename, defttl, 1)
|
||||
if r == nil {
|
||||
return nil, nil
|
||||
}
|
||||
@@ -132,10 +130,10 @@ func ReadRR(q io.Reader, filename string) (RR, error) {
|
||||
}
|
||||
|
||||
// ParseZone reads a RFC 1035 style zonefile from r. It returns *Tokens on the
|
||||
// returned channel, which consist out the parsed RR, a potential comment or an error.
|
||||
// If there is an error the RR is nil. The string file is only used
|
||||
// returned channel, each consisting of either a parsed RR and optional comment
|
||||
// or a nil RR and an error. The string file is only used
|
||||
// in error reporting. The string origin is used as the initial origin, as
|
||||
// if the file would start with: $ORIGIN origin .
|
||||
// if the file would start with an $ORIGIN directive.
|
||||
// The directives $INCLUDE, $ORIGIN, $TTL and $GENERATE are supported.
|
||||
// The channel t is closed by ParseZone when the end of r is reached.
|
||||
//
|
||||
@@ -157,25 +155,37 @@ func ReadRR(q io.Reader, filename string) (RR, error) {
|
||||
// The text "; this is comment" is returned in Token.Comment. Comments inside the
|
||||
// RR are discarded. Comments on a line by themselves are discarded too.
|
||||
func ParseZone(r io.Reader, origin, file string) chan *Token {
|
||||
return parseZoneHelper(r, origin, file, 10000)
|
||||
return parseZoneHelper(r, origin, file, nil, 10000)
|
||||
}
|
||||
|
||||
func parseZoneHelper(r io.Reader, origin, file string, chansize int) chan *Token {
|
||||
func parseZoneHelper(r io.Reader, origin, file string, defttl *ttlState, chansize int) chan *Token {
|
||||
t := make(chan *Token, chansize)
|
||||
go parseZone(r, origin, file, t, 0)
|
||||
go parseZone(r, origin, file, defttl, t, 0)
|
||||
return t
|
||||
}
|
||||
|
||||
func parseZone(r io.Reader, origin, f string, t chan *Token, include int) {
|
||||
func parseZone(r io.Reader, origin, f string, defttl *ttlState, t chan *Token, include int) {
|
||||
defer func() {
|
||||
if include == 0 {
|
||||
close(t)
|
||||
}
|
||||
}()
|
||||
s := scanInit(r)
|
||||
s, cancel := scanInit(r)
|
||||
c := make(chan lex)
|
||||
// Start the lexer
|
||||
go zlexer(s, c)
|
||||
|
||||
defer func() {
|
||||
cancel()
|
||||
// zlexer can send up to three tokens, the next one and possibly 2 remainders.
|
||||
// Do a non-blocking read.
|
||||
_, ok := <-c
|
||||
_, ok = <-c
|
||||
_, ok = <-c
|
||||
if !ok {
|
||||
// too bad
|
||||
}
|
||||
}()
|
||||
// 6 possible beginnings of a line, _ is a space
|
||||
// 0. zRRTYPE -> all omitted until the rrtype
|
||||
// 1. zOwner _ zRrtype -> class/ttl omitted
|
||||
@@ -186,18 +196,16 @@ func parseZone(r io.Reader, origin, f string, t chan *Token, include int) {
|
||||
// After detecting these, we know the zRrtype so we can jump to functions
|
||||
// handling the rdata for each of these types.
|
||||
|
||||
if origin == "" {
|
||||
origin = "."
|
||||
}
|
||||
origin = Fqdn(origin)
|
||||
if _, ok := IsDomainName(origin); !ok {
|
||||
t <- &Token{Error: &ParseError{f, "bad initial origin name", lex{}}}
|
||||
return
|
||||
if origin != "" {
|
||||
origin = Fqdn(origin)
|
||||
if _, ok := IsDomainName(origin); !ok {
|
||||
t <- &Token{Error: &ParseError{f, "bad initial origin name", lex{}}}
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
st := zExpectOwnerDir // initial state
|
||||
var h RR_Header
|
||||
var defttl uint32 = defaultTtl
|
||||
var prevName string
|
||||
for l := range c {
|
||||
// Lexer spotted an error already
|
||||
@@ -209,31 +217,25 @@ func parseZone(r io.Reader, origin, f string, t chan *Token, include int) {
|
||||
switch st {
|
||||
case zExpectOwnerDir:
|
||||
// We can also expect a directive, like $TTL or $ORIGIN
|
||||
h.Ttl = defttl
|
||||
if defttl != nil {
|
||||
h.Ttl = defttl.ttl
|
||||
}
|
||||
h.Class = ClassINET
|
||||
switch l.value {
|
||||
case zNewline:
|
||||
st = zExpectOwnerDir
|
||||
case zOwner:
|
||||
h.Name = l.token
|
||||
if l.token[0] == '@' {
|
||||
h.Name = origin
|
||||
prevName = h.Name
|
||||
st = zExpectOwnerBl
|
||||
break
|
||||
}
|
||||
if h.Name[l.length-1] != '.' {
|
||||
h.Name = appendOrigin(h.Name, origin)
|
||||
}
|
||||
_, ok := IsDomainName(l.token)
|
||||
name, ok := toAbsoluteName(l.token, origin)
|
||||
if !ok {
|
||||
t <- &Token{Error: &ParseError{f, "bad owner name", l}}
|
||||
return
|
||||
}
|
||||
h.Name = name
|
||||
prevName = h.Name
|
||||
st = zExpectOwnerBl
|
||||
case zDirTtl:
|
||||
st = zExpectDirTtlBl
|
||||
case zDirTTL:
|
||||
st = zExpectDirTTLBl
|
||||
case zDirOrigin:
|
||||
st = zExpectDirOriginBl
|
||||
case zDirInclude:
|
||||
@@ -252,15 +254,16 @@ func parseZone(r io.Reader, origin, f string, t chan *Token, include int) {
|
||||
// Discard, can happen when there is nothing on the
|
||||
// line except the RR type
|
||||
case zString:
|
||||
ttl, ok := stringToTtl(l.token)
|
||||
ttl, ok := stringToTTL(l.token)
|
||||
if !ok {
|
||||
t <- &Token{Error: &ParseError{f, "not a TTL", l}}
|
||||
return
|
||||
}
|
||||
h.Ttl = ttl
|
||||
// Don't about the defttl, we should take the $TTL value
|
||||
// defttl = ttl
|
||||
st = zExpectAnyNoTtlBl
|
||||
if defttl == nil || !defttl.isByDirective {
|
||||
defttl = &ttlState{ttl, false}
|
||||
}
|
||||
st = zExpectAnyNoTTLBl
|
||||
|
||||
default:
|
||||
t <- &Token{Error: &ParseError{f, "syntax error at beginning", l}}
|
||||
@@ -278,25 +281,16 @@ func parseZone(r io.Reader, origin, f string, t chan *Token, include int) {
|
||||
return
|
||||
}
|
||||
neworigin := origin // There may be optionally a new origin set after the filename, if not use current one
|
||||
l := <-c
|
||||
switch l.value {
|
||||
switch l := <-c; l.value {
|
||||
case zBlank:
|
||||
l := <-c
|
||||
if l.value == zString {
|
||||
if _, ok := IsDomainName(l.token); !ok || l.length == 0 || l.err {
|
||||
name, ok := toAbsoluteName(l.token, origin)
|
||||
if !ok {
|
||||
t <- &Token{Error: &ParseError{f, "bad origin name", l}}
|
||||
return
|
||||
}
|
||||
// a new origin is specified.
|
||||
if l.token[l.length-1] != '.' {
|
||||
if origin != "." { // Prevent .. endings
|
||||
neworigin = l.token + "." + origin
|
||||
} else {
|
||||
neworigin = l.token + origin
|
||||
}
|
||||
} else {
|
||||
neworigin = l.token
|
||||
}
|
||||
neworigin = name
|
||||
}
|
||||
case zNewline, zEOF:
|
||||
// Ok
|
||||
@@ -305,24 +299,32 @@ func parseZone(r io.Reader, origin, f string, t chan *Token, include int) {
|
||||
return
|
||||
}
|
||||
// Start with the new file
|
||||
r1, e1 := os.Open(l.token)
|
||||
includePath := l.token
|
||||
if !filepath.IsAbs(includePath) {
|
||||
includePath = filepath.Join(filepath.Dir(f), includePath)
|
||||
}
|
||||
r1, e1 := os.Open(includePath)
|
||||
if e1 != nil {
|
||||
t <- &Token{Error: &ParseError{f, "failed to open `" + l.token + "'", l}}
|
||||
msg := fmt.Sprintf("failed to open `%s'", l.token)
|
||||
if !filepath.IsAbs(l.token) {
|
||||
msg += fmt.Sprintf(" as `%s'", includePath)
|
||||
}
|
||||
t <- &Token{Error: &ParseError{f, msg, l}}
|
||||
return
|
||||
}
|
||||
if include+1 > 7 {
|
||||
t <- &Token{Error: &ParseError{f, "too deeply nested $INCLUDE", l}}
|
||||
return
|
||||
}
|
||||
parseZone(r1, l.token, neworigin, t, include+1)
|
||||
parseZone(r1, neworigin, includePath, defttl, t, include+1)
|
||||
st = zExpectOwnerDir
|
||||
case zExpectDirTtlBl:
|
||||
case zExpectDirTTLBl:
|
||||
if l.value != zBlank {
|
||||
t <- &Token{Error: &ParseError{f, "no blank after $TTL-directive", l}}
|
||||
return
|
||||
}
|
||||
st = zExpectDirTtl
|
||||
case zExpectDirTtl:
|
||||
st = zExpectDirTTL
|
||||
case zExpectDirTTL:
|
||||
if l.value != zString {
|
||||
t <- &Token{Error: &ParseError{f, "expecting $TTL value, not this...", l}}
|
||||
return
|
||||
@@ -331,12 +333,12 @@ func parseZone(r io.Reader, origin, f string, t chan *Token, include int) {
|
||||
t <- &Token{Error: e}
|
||||
return
|
||||
}
|
||||
ttl, ok := stringToTtl(l.token)
|
||||
ttl, ok := stringToTTL(l.token)
|
||||
if !ok {
|
||||
t <- &Token{Error: &ParseError{f, "expecting $TTL value, not this...", l}}
|
||||
return
|
||||
}
|
||||
defttl = ttl
|
||||
defttl = &ttlState{ttl, true}
|
||||
st = zExpectOwnerDir
|
||||
case zExpectDirOriginBl:
|
||||
if l.value != zBlank {
|
||||
@@ -352,19 +354,12 @@ func parseZone(r io.Reader, origin, f string, t chan *Token, include int) {
|
||||
if e, _ := slurpRemainder(c, f); e != nil {
|
||||
t <- &Token{Error: e}
|
||||
}
|
||||
if _, ok := IsDomainName(l.token); !ok {
|
||||
name, ok := toAbsoluteName(l.token, origin)
|
||||
if !ok {
|
||||
t <- &Token{Error: &ParseError{f, "bad origin name", l}}
|
||||
return
|
||||
}
|
||||
if l.token[l.length-1] != '.' {
|
||||
if origin != "." { // Prevent .. endings
|
||||
origin = l.token + "." + origin
|
||||
} else {
|
||||
origin = l.token + origin
|
||||
}
|
||||
} else {
|
||||
origin = l.token
|
||||
}
|
||||
origin = name
|
||||
st = zExpectOwnerDir
|
||||
case zExpectDirGenerateBl:
|
||||
if l.value != zBlank {
|
||||
@@ -391,20 +386,26 @@ func parseZone(r io.Reader, origin, f string, t chan *Token, include int) {
|
||||
case zExpectAny:
|
||||
switch l.value {
|
||||
case zRrtpe:
|
||||
if defttl == nil {
|
||||
t <- &Token{Error: &ParseError{f, "missing TTL with no previous value", l}}
|
||||
return
|
||||
}
|
||||
h.Rrtype = l.torc
|
||||
st = zExpectRdata
|
||||
case zClass:
|
||||
h.Class = l.torc
|
||||
st = zExpectAnyNoClassBl
|
||||
case zString:
|
||||
ttl, ok := stringToTtl(l.token)
|
||||
ttl, ok := stringToTTL(l.token)
|
||||
if !ok {
|
||||
t <- &Token{Error: &ParseError{f, "not a TTL", l}}
|
||||
return
|
||||
}
|
||||
h.Ttl = ttl
|
||||
// defttl = ttl // don't set the defttl here
|
||||
st = zExpectAnyNoTtlBl
|
||||
if defttl == nil || !defttl.isByDirective {
|
||||
defttl = &ttlState{ttl, false}
|
||||
}
|
||||
st = zExpectAnyNoTTLBl
|
||||
default:
|
||||
t <- &Token{Error: &ParseError{f, "expecting RR type, TTL or class, not this...", l}}
|
||||
return
|
||||
@@ -415,13 +416,13 @@ func parseZone(r io.Reader, origin, f string, t chan *Token, include int) {
|
||||
return
|
||||
}
|
||||
st = zExpectAnyNoClass
|
||||
case zExpectAnyNoTtlBl:
|
||||
case zExpectAnyNoTTLBl:
|
||||
if l.value != zBlank {
|
||||
t <- &Token{Error: &ParseError{f, "no blank before TTL", l}}
|
||||
return
|
||||
}
|
||||
st = zExpectAnyNoTtl
|
||||
case zExpectAnyNoTtl:
|
||||
st = zExpectAnyNoTTL
|
||||
case zExpectAnyNoTTL:
|
||||
switch l.value {
|
||||
case zClass:
|
||||
h.Class = l.torc
|
||||
@@ -436,13 +437,15 @@ func parseZone(r io.Reader, origin, f string, t chan *Token, include int) {
|
||||
case zExpectAnyNoClass:
|
||||
switch l.value {
|
||||
case zString:
|
||||
ttl, ok := stringToTtl(l.token)
|
||||
ttl, ok := stringToTTL(l.token)
|
||||
if !ok {
|
||||
t <- &Token{Error: &ParseError{f, "not a TTL", l}}
|
||||
return
|
||||
}
|
||||
h.Ttl = ttl
|
||||
// defttl = ttl // don't set the def ttl anymore
|
||||
if defttl == nil || !defttl.isByDirective {
|
||||
defttl = &ttlState{ttl, false}
|
||||
}
|
||||
st = zExpectRrtypeBl
|
||||
case zRrtpe:
|
||||
h.Rrtype = l.torc
|
||||
@@ -505,14 +508,12 @@ func zlexer(s *scan, c chan lex) {
|
||||
if stri >= maxTok {
|
||||
l.token = "token length insufficient for parsing"
|
||||
l.err = true
|
||||
debug.Printf("[%+v]", l.token)
|
||||
c <- l
|
||||
return
|
||||
}
|
||||
if comi >= maxTok {
|
||||
l.token = "comment length insufficient for parsing"
|
||||
l.err = true
|
||||
debug.Printf("[%+v]", l.token)
|
||||
c <- l
|
||||
return
|
||||
}
|
||||
@@ -547,7 +548,7 @@ func zlexer(s *scan, c chan lex) {
|
||||
// escape $... start with a \ not a $, so this will work
|
||||
switch l.tokenUpper {
|
||||
case "$TTL":
|
||||
l.value = zDirTtl
|
||||
l.value = zDirTTL
|
||||
case "$ORIGIN":
|
||||
l.value = zDirOrigin
|
||||
case "$INCLUDE":
|
||||
@@ -555,7 +556,6 @@ func zlexer(s *scan, c chan lex) {
|
||||
case "$GENERATE":
|
||||
l.value = zDirGenerate
|
||||
}
|
||||
debug.Printf("[7 %+v]", l.token)
|
||||
c <- l
|
||||
} else {
|
||||
l.value = zString
|
||||
@@ -577,6 +577,7 @@ func zlexer(s *scan, c chan lex) {
|
||||
return
|
||||
}
|
||||
l.value = zRrtpe
|
||||
rrtype = true
|
||||
l.torc = t
|
||||
}
|
||||
}
|
||||
@@ -597,16 +598,14 @@ func zlexer(s *scan, c chan lex) {
|
||||
}
|
||||
}
|
||||
}
|
||||
debug.Printf("[6 %+v]", l.token)
|
||||
c <- l
|
||||
}
|
||||
stri = 0
|
||||
// I reverse space stuff here
|
||||
|
||||
if !space && !commt {
|
||||
l.value = zBlank
|
||||
l.token = " "
|
||||
l.length = 1
|
||||
debug.Printf("[5 %+v]", l.token)
|
||||
c <- l
|
||||
}
|
||||
owner = false
|
||||
@@ -629,7 +628,6 @@ func zlexer(s *scan, c chan lex) {
|
||||
l.token = string(str[:stri])
|
||||
l.tokenUpper = strings.ToUpper(l.token)
|
||||
l.length = stri
|
||||
debug.Printf("[4 %+v]", l.token)
|
||||
c <- l
|
||||
stri = 0
|
||||
}
|
||||
@@ -667,7 +665,6 @@ func zlexer(s *scan, c chan lex) {
|
||||
l.tokenUpper = l.token
|
||||
l.length = 1
|
||||
l.comment = string(com[:comi])
|
||||
debug.Printf("[3 %+v %+v]", l.token, l.comment)
|
||||
c <- l
|
||||
l.comment = ""
|
||||
comi = 0
|
||||
@@ -693,14 +690,12 @@ func zlexer(s *scan, c chan lex) {
|
||||
rrtype = true
|
||||
}
|
||||
}
|
||||
debug.Printf("[2 %+v]", l.token)
|
||||
c <- l
|
||||
}
|
||||
l.value = zNewline
|
||||
l.token = "\n"
|
||||
l.tokenUpper = l.token
|
||||
l.length = 1
|
||||
debug.Printf("[1 %+v]", l.token)
|
||||
c <- l
|
||||
stri = 0
|
||||
commt = false
|
||||
@@ -746,7 +741,6 @@ func zlexer(s *scan, c chan lex) {
|
||||
l.tokenUpper = strings.ToUpper(l.token)
|
||||
l.length = stri
|
||||
|
||||
debug.Printf("[%+v]", l.token)
|
||||
c <- l
|
||||
stri = 0
|
||||
}
|
||||
@@ -782,7 +776,6 @@ func zlexer(s *scan, c chan lex) {
|
||||
l.token = "extra closing brace"
|
||||
l.tokenUpper = l.token
|
||||
l.err = true
|
||||
debug.Printf("[%+v]", l.token)
|
||||
c <- l
|
||||
return
|
||||
}
|
||||
@@ -808,7 +801,12 @@ func zlexer(s *scan, c chan lex) {
|
||||
l.tokenUpper = strings.ToUpper(l.token)
|
||||
l.length = stri
|
||||
l.value = zString
|
||||
debug.Printf("[%+v]", l.token)
|
||||
c <- l
|
||||
}
|
||||
if brace != 0 {
|
||||
l.token = "unbalanced brace"
|
||||
l.tokenUpper = l.token
|
||||
l.err = true
|
||||
c <- l
|
||||
}
|
||||
}
|
||||
@@ -819,8 +817,8 @@ func classToInt(token string) (uint16, bool) {
|
||||
if len(token) < offset+1 {
|
||||
return 0, false
|
||||
}
|
||||
class, ok := strconv.Atoi(token[offset:])
|
||||
if ok != nil || class > maxUint16 {
|
||||
class, err := strconv.ParseUint(token[offset:], 10, 16)
|
||||
if err != nil {
|
||||
return 0, false
|
||||
}
|
||||
return uint16(class), true
|
||||
@@ -832,15 +830,15 @@ func typeToInt(token string) (uint16, bool) {
|
||||
if len(token) < offset+1 {
|
||||
return 0, false
|
||||
}
|
||||
typ, ok := strconv.Atoi(token[offset:])
|
||||
if ok != nil || typ > maxUint16 {
|
||||
typ, err := strconv.ParseUint(token[offset:], 10, 16)
|
||||
if err != nil {
|
||||
return 0, false
|
||||
}
|
||||
return uint16(typ), true
|
||||
}
|
||||
|
||||
// Parse things like 2w, 2m, etc, Return the time in seconds.
|
||||
func stringToTtl(token string) (uint32, bool) {
|
||||
// stringToTTL parses things like 2w, 2m, etc, and returns the time in seconds.
|
||||
func stringToTTL(token string) (uint32, bool) {
|
||||
s := uint32(0)
|
||||
i := uint32(0)
|
||||
for _, c := range token {
|
||||
@@ -913,6 +911,34 @@ func stringToCm(token string) (e, m uint8, ok bool) {
|
||||
return
|
||||
}
|
||||
|
||||
func toAbsoluteName(name, origin string) (absolute string, ok bool) {
|
||||
// check for an explicit origin reference
|
||||
if name == "@" {
|
||||
// require a nonempty origin
|
||||
if origin == "" {
|
||||
return "", false
|
||||
}
|
||||
return origin, true
|
||||
}
|
||||
|
||||
// require a valid domain name
|
||||
_, ok = IsDomainName(name)
|
||||
if !ok || name == "" {
|
||||
return "", false
|
||||
}
|
||||
|
||||
// check if name is already absolute
|
||||
if name[len(name)-1] == '.' {
|
||||
return name, true
|
||||
}
|
||||
|
||||
// require a nonempty origin
|
||||
if origin == "" {
|
||||
return "", false
|
||||
}
|
||||
return appendOrigin(name, origin), true
|
||||
}
|
||||
|
||||
func appendOrigin(name, origin string) string {
|
||||
if origin == "." {
|
||||
return name + origin
|
||||
|
||||
824
vendor/github.com/miekg/dns/scan_rr.go
generated
vendored
824
vendor/github.com/miekg/dns/scan_rr.go
generated
vendored
File diff suppressed because it is too large
Load Diff
17
vendor/github.com/miekg/dns/scanner.go
generated
vendored
17
vendor/github.com/miekg/dns/scanner.go
generated
vendored
@@ -4,6 +4,7 @@ package dns
|
||||
|
||||
import (
|
||||
"bufio"
|
||||
"context"
|
||||
"io"
|
||||
"text/scanner"
|
||||
)
|
||||
@@ -12,13 +13,18 @@ type scan struct {
|
||||
src *bufio.Reader
|
||||
position scanner.Position
|
||||
eof bool // Have we just seen a eof
|
||||
ctx context.Context
|
||||
}
|
||||
|
||||
func scanInit(r io.Reader) *scan {
|
||||
func scanInit(r io.Reader) (*scan, context.CancelFunc) {
|
||||
s := new(scan)
|
||||
s.src = bufio.NewReader(r)
|
||||
s.position.Line = 1
|
||||
return s
|
||||
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
s.ctx = ctx
|
||||
|
||||
return s, cancel
|
||||
}
|
||||
|
||||
// tokenText returns the next byte from the input
|
||||
@@ -27,6 +33,13 @@ func (s *scan) tokenText() (byte, error) {
|
||||
if err != nil {
|
||||
return c, err
|
||||
}
|
||||
select {
|
||||
case <-s.ctx.Done():
|
||||
return c, context.Canceled
|
||||
default:
|
||||
break
|
||||
}
|
||||
|
||||
// delay the newline handling until the next token is delivered,
|
||||
// fixes off-by-one errors when reporting a parse error.
|
||||
if s.eof == true {
|
||||
|
||||
65
vendor/github.com/miekg/dns/server.go
generated
vendored
65
vendor/github.com/miekg/dns/server.go
generated
vendored
@@ -285,7 +285,7 @@ type Server struct {
|
||||
WriteTimeout time.Duration
|
||||
// TCP idle timeout for multiple queries, if nil, defaults to 8 * time.Second (RFC 5966).
|
||||
IdleTimeout func() time.Duration
|
||||
// Secret(s) for Tsig map[<zonename>]<base64 secret>.
|
||||
// Secret(s) for Tsig map[<zonename>]<base64 secret>. The zonename must be in canonical form (lowercase, fqdn, see RFC 4034 Section 6.2).
|
||||
TsigSecret map[string]string
|
||||
// Unsafe instructs the server to disregard any sanity checks and directly hand the message to
|
||||
// the handler. It will specifically not check if the query has the QR bit not set.
|
||||
@@ -297,10 +297,7 @@ type Server struct {
|
||||
// DecorateWriter is optional, allows customization of the process that writes raw DNS messages.
|
||||
DecorateWriter DecorateWriter
|
||||
|
||||
// Graceful shutdown handling
|
||||
|
||||
inFlight sync.WaitGroup
|
||||
|
||||
// Shutdown handling
|
||||
lock sync.RWMutex
|
||||
started bool
|
||||
}
|
||||
@@ -412,10 +409,8 @@ func (srv *Server) ActivateAndServe() error {
|
||||
return &Error{err: "bad listeners"}
|
||||
}
|
||||
|
||||
// Shutdown gracefully shuts down a server. After a call to Shutdown, ListenAndServe and
|
||||
// ActivateAndServe will return. All in progress queries are completed before the server
|
||||
// is taken down. If the Shutdown is taking longer than the reading timeout an error
|
||||
// is returned.
|
||||
// Shutdown shuts down a server. After a call to Shutdown, ListenAndServe and
|
||||
// ActivateAndServe will return.
|
||||
func (srv *Server) Shutdown() error {
|
||||
srv.lock.Lock()
|
||||
if !srv.started {
|
||||
@@ -431,19 +426,7 @@ func (srv *Server) Shutdown() error {
|
||||
if srv.Listener != nil {
|
||||
srv.Listener.Close()
|
||||
}
|
||||
|
||||
fin := make(chan bool)
|
||||
go func() {
|
||||
srv.inFlight.Wait()
|
||||
fin <- true
|
||||
}()
|
||||
|
||||
select {
|
||||
case <-time.After(srv.getReadTimeout()):
|
||||
return &Error{err: "server shutdown is pending"}
|
||||
case <-fin:
|
||||
return nil
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// getReadTimeout is a helper func to use system timeout if server did not intend to change it.
|
||||
@@ -477,13 +460,6 @@ func (srv *Server) serveTCP(l net.Listener) error {
|
||||
// deadline is not used here
|
||||
for {
|
||||
rw, err := l.Accept()
|
||||
if err != nil {
|
||||
if neterr, ok := err.(net.Error); ok && neterr.Temporary() {
|
||||
continue
|
||||
}
|
||||
return err
|
||||
}
|
||||
m, err := reader.ReadTCP(rw, rtimeout)
|
||||
srv.lock.RLock()
|
||||
if !srv.started {
|
||||
srv.lock.RUnlock()
|
||||
@@ -491,10 +467,19 @@ func (srv *Server) serveTCP(l net.Listener) error {
|
||||
}
|
||||
srv.lock.RUnlock()
|
||||
if err != nil {
|
||||
continue
|
||||
if neterr, ok := err.(net.Error); ok && neterr.Temporary() {
|
||||
continue
|
||||
}
|
||||
return err
|
||||
}
|
||||
srv.inFlight.Add(1)
|
||||
go srv.serve(rw.RemoteAddr(), handler, m, nil, nil, rw)
|
||||
go func() {
|
||||
m, err := reader.ReadTCP(rw, rtimeout)
|
||||
if err != nil {
|
||||
rw.Close()
|
||||
return
|
||||
}
|
||||
srv.serve(rw.RemoteAddr(), handler, m, nil, nil, rw)
|
||||
}()
|
||||
}
|
||||
}
|
||||
|
||||
@@ -527,17 +512,20 @@ func (srv *Server) serveUDP(l *net.UDPConn) error {
|
||||
}
|
||||
srv.lock.RUnlock()
|
||||
if err != nil {
|
||||
if netErr, ok := err.(net.Error); ok && netErr.Temporary() {
|
||||
continue
|
||||
}
|
||||
return err
|
||||
}
|
||||
if len(m) < headerSize {
|
||||
continue
|
||||
}
|
||||
srv.inFlight.Add(1)
|
||||
go srv.serve(s.RemoteAddr(), handler, m, l, s, nil)
|
||||
}
|
||||
}
|
||||
|
||||
// Serve a new connection.
|
||||
func (srv *Server) serve(a net.Addr, h Handler, m []byte, u *net.UDPConn, s *SessionUDP, t net.Conn) {
|
||||
defer srv.inFlight.Done()
|
||||
|
||||
w := &response{tsigSecret: srv.TsigSecret, udp: u, tcp: t, remoteAddr: a, udpSession: s}
|
||||
if srv.DecorateWriter != nil {
|
||||
w.writer = srv.DecorateWriter(w)
|
||||
@@ -647,11 +635,8 @@ func (srv *Server) readUDP(conn *net.UDPConn, timeout time.Duration) ([]byte, *S
|
||||
conn.SetReadDeadline(time.Now().Add(timeout))
|
||||
m := make([]byte, srv.UDPSize)
|
||||
n, s, err := ReadFromSessionUDP(conn, m)
|
||||
if err != nil || n == 0 {
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
return nil, nil, ErrShortRead
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
m = m[:n]
|
||||
return m, s, nil
|
||||
|
||||
8
vendor/github.com/miekg/dns/smimea.go
generated
vendored
8
vendor/github.com/miekg/dns/smimea.go
generated
vendored
@@ -33,15 +33,15 @@ func (r *SMIMEA) Verify(cert *x509.Certificate) error {
|
||||
return ErrSig // ErrSig, really?
|
||||
}
|
||||
|
||||
// SIMEAName returns the ownername of a SMIMEA resource record as per the
|
||||
// SMIMEAName returns the ownername of a SMIMEA resource record as per the
|
||||
// format specified in RFC 'draft-ietf-dane-smime-12' Section 2 and 3
|
||||
func SMIMEAName(email_address string, domain_name string) (string, error) {
|
||||
func SMIMEAName(email, domain string) (string, error) {
|
||||
hasher := sha256.New()
|
||||
hasher.Write([]byte(email_address))
|
||||
hasher.Write([]byte(email))
|
||||
|
||||
// RFC Section 3: "The local-part is hashed using the SHA2-256
|
||||
// algorithm with the hash truncated to 28 octets and
|
||||
// represented in its hexadecimal representation to become the
|
||||
// left-most label in the prepared domain name"
|
||||
return hex.EncodeToString(hasher.Sum(nil)[:28]) + "." + "_smimecert." + domain_name, nil
|
||||
return hex.EncodeToString(hasher.Sum(nil)[:28]) + "." + "_smimecert." + domain, nil
|
||||
}
|
||||
|
||||
3
vendor/github.com/miekg/dns/tsig.go
generated
vendored
3
vendor/github.com/miekg/dns/tsig.go
generated
vendored
@@ -208,6 +208,9 @@ func tsigBuffer(msgbuf []byte, rr *TSIG, requestMAC string, timersOnly bool) []b
|
||||
rr.Fudge = 300 // Standard (RFC) default.
|
||||
}
|
||||
|
||||
// Replace message ID in header with original ID from TSIG
|
||||
binary.BigEndian.PutUint16(msgbuf[0:2], rr.OrigId)
|
||||
|
||||
if requestMAC != "" {
|
||||
m := new(macWireFmt)
|
||||
m.MACSize = uint16(len(requestMAC) / 2)
|
||||
|
||||
184
vendor/github.com/miekg/dns/types.go
generated
vendored
184
vendor/github.com/miekg/dns/types.go
generated
vendored
@@ -78,6 +78,7 @@ const (
|
||||
TypeCDS uint16 = 59
|
||||
TypeCDNSKEY uint16 = 60
|
||||
TypeOPENPGPKEY uint16 = 61
|
||||
TypeCSYNC uint16 = 62
|
||||
TypeSPF uint16 = 99
|
||||
TypeUINFO uint16 = 100
|
||||
TypeUID uint16 = 101
|
||||
@@ -91,6 +92,7 @@ const (
|
||||
TypeEUI64 uint16 = 109
|
||||
TypeURI uint16 = 256
|
||||
TypeCAA uint16 = 257
|
||||
TypeAVC uint16 = 258
|
||||
|
||||
TypeTKEY uint16 = 249
|
||||
TypeTSIG uint16 = 250
|
||||
@@ -114,27 +116,27 @@ const (
|
||||
ClassNONE = 254
|
||||
ClassANY = 255
|
||||
|
||||
// Message Response Codes.
|
||||
RcodeSuccess = 0
|
||||
RcodeFormatError = 1
|
||||
RcodeServerFailure = 2
|
||||
RcodeNameError = 3
|
||||
RcodeNotImplemented = 4
|
||||
RcodeRefused = 5
|
||||
RcodeYXDomain = 6
|
||||
RcodeYXRrset = 7
|
||||
RcodeNXRrset = 8
|
||||
RcodeNotAuth = 9
|
||||
RcodeNotZone = 10
|
||||
RcodeBadSig = 16 // TSIG
|
||||
RcodeBadVers = 16 // EDNS0
|
||||
RcodeBadKey = 17
|
||||
RcodeBadTime = 18
|
||||
RcodeBadMode = 19 // TKEY
|
||||
RcodeBadName = 20
|
||||
RcodeBadAlg = 21
|
||||
RcodeBadTrunc = 22 // TSIG
|
||||
RcodeBadCookie = 23 // DNS Cookies
|
||||
// Message Response Codes, see https://www.iana.org/assignments/dns-parameters/dns-parameters.xhtml
|
||||
RcodeSuccess = 0 // NoError - No Error [DNS]
|
||||
RcodeFormatError = 1 // FormErr - Format Error [DNS]
|
||||
RcodeServerFailure = 2 // ServFail - Server Failure [DNS]
|
||||
RcodeNameError = 3 // NXDomain - Non-Existent Domain [DNS]
|
||||
RcodeNotImplemented = 4 // NotImp - Not Implemented [DNS]
|
||||
RcodeRefused = 5 // Refused - Query Refused [DNS]
|
||||
RcodeYXDomain = 6 // YXDomain - Name Exists when it should not [DNS Update]
|
||||
RcodeYXRrset = 7 // YXRRSet - RR Set Exists when it should not [DNS Update]
|
||||
RcodeNXRrset = 8 // NXRRSet - RR Set that should exist does not [DNS Update]
|
||||
RcodeNotAuth = 9 // NotAuth - Server Not Authoritative for zone [DNS Update]
|
||||
RcodeNotZone = 10 // NotZone - Name not contained in zone [DNS Update/TSIG]
|
||||
RcodeBadSig = 16 // BADSIG - TSIG Signature Failure [TSIG]
|
||||
RcodeBadVers = 16 // BADVERS - Bad OPT Version [EDNS0]
|
||||
RcodeBadKey = 17 // BADKEY - Key not recognized [TSIG]
|
||||
RcodeBadTime = 18 // BADTIME - Signature out of time window [TSIG]
|
||||
RcodeBadMode = 19 // BADMODE - Bad TKEY Mode [TKEY]
|
||||
RcodeBadName = 20 // BADNAME - Duplicate key name [TKEY]
|
||||
RcodeBadAlg = 21 // BADALG - Algorithm not supported [TKEY]
|
||||
RcodeBadTrunc = 22 // BADTRUNC - Bad Truncation [TSIG]
|
||||
RcodeBadCookie = 23 // BADCOOKIE - Bad/missing Server Cookie [DNS Cookies]
|
||||
|
||||
// Message Opcodes. There is no 3.
|
||||
OpcodeQuery = 0
|
||||
@@ -144,7 +146,7 @@ const (
|
||||
OpcodeUpdate = 5
|
||||
)
|
||||
|
||||
// Headers is the wire format for the DNS packet header.
|
||||
// Header is the wire format for the DNS packet header.
|
||||
type Header struct {
|
||||
Id uint16
|
||||
Bits uint16
|
||||
@@ -163,14 +165,15 @@ const (
|
||||
_Z = 1 << 6 // Z
|
||||
_AD = 1 << 5 // authticated data
|
||||
_CD = 1 << 4 // checking disabled
|
||||
)
|
||||
|
||||
// Various constants used in the LOC RR, See RFC 1887.
|
||||
const (
|
||||
LOC_EQUATOR = 1 << 31 // RFC 1876, Section 2.
|
||||
LOC_PRIMEMERIDIAN = 1 << 31 // RFC 1876, Section 2.
|
||||
|
||||
LOC_HOURS = 60 * 1000
|
||||
LOC_DEGREES = 60 * LOC_HOURS
|
||||
|
||||
LOC_ALTITUDEBASE = 100000
|
||||
LOC_HOURS = 60 * 1000
|
||||
LOC_DEGREES = 60 * LOC_HOURS
|
||||
LOC_ALTITUDEBASE = 100000
|
||||
)
|
||||
|
||||
// Different Certificate Types, see RFC 4398, Section 2.1
|
||||
@@ -236,6 +239,7 @@ type ANY struct {
|
||||
|
||||
func (rr *ANY) String() string { return rr.Hdr.String() }
|
||||
|
||||
// CNAME RR. See RFC 1034.
|
||||
type CNAME struct {
|
||||
Hdr RR_Header
|
||||
Target string `dns:"cdomain-name"`
|
||||
@@ -243,6 +247,7 @@ type CNAME struct {
|
||||
|
||||
func (rr *CNAME) String() string { return rr.Hdr.String() + sprintName(rr.Target) }
|
||||
|
||||
// HINFO RR. See RFC 1034.
|
||||
type HINFO struct {
|
||||
Hdr RR_Header
|
||||
Cpu string
|
||||
@@ -253,6 +258,7 @@ func (rr *HINFO) String() string {
|
||||
return rr.Hdr.String() + sprintTxt([]string{rr.Cpu, rr.Os})
|
||||
}
|
||||
|
||||
// MB RR. See RFC 1035.
|
||||
type MB struct {
|
||||
Hdr RR_Header
|
||||
Mb string `dns:"cdomain-name"`
|
||||
@@ -260,6 +266,7 @@ type MB struct {
|
||||
|
||||
func (rr *MB) String() string { return rr.Hdr.String() + sprintName(rr.Mb) }
|
||||
|
||||
// MG RR. See RFC 1035.
|
||||
type MG struct {
|
||||
Hdr RR_Header
|
||||
Mg string `dns:"cdomain-name"`
|
||||
@@ -267,6 +274,7 @@ type MG struct {
|
||||
|
||||
func (rr *MG) String() string { return rr.Hdr.String() + sprintName(rr.Mg) }
|
||||
|
||||
// MINFO RR. See RFC 1035.
|
||||
type MINFO struct {
|
||||
Hdr RR_Header
|
||||
Rmail string `dns:"cdomain-name"`
|
||||
@@ -277,6 +285,7 @@ func (rr *MINFO) String() string {
|
||||
return rr.Hdr.String() + sprintName(rr.Rmail) + " " + sprintName(rr.Email)
|
||||
}
|
||||
|
||||
// MR RR. See RFC 1035.
|
||||
type MR struct {
|
||||
Hdr RR_Header
|
||||
Mr string `dns:"cdomain-name"`
|
||||
@@ -286,6 +295,7 @@ func (rr *MR) String() string {
|
||||
return rr.Hdr.String() + sprintName(rr.Mr)
|
||||
}
|
||||
|
||||
// MF RR. See RFC 1035.
|
||||
type MF struct {
|
||||
Hdr RR_Header
|
||||
Mf string `dns:"cdomain-name"`
|
||||
@@ -295,6 +305,7 @@ func (rr *MF) String() string {
|
||||
return rr.Hdr.String() + sprintName(rr.Mf)
|
||||
}
|
||||
|
||||
// MD RR. See RFC 1035.
|
||||
type MD struct {
|
||||
Hdr RR_Header
|
||||
Md string `dns:"cdomain-name"`
|
||||
@@ -304,6 +315,7 @@ func (rr *MD) String() string {
|
||||
return rr.Hdr.String() + sprintName(rr.Md)
|
||||
}
|
||||
|
||||
// MX RR. See RFC 1035.
|
||||
type MX struct {
|
||||
Hdr RR_Header
|
||||
Preference uint16
|
||||
@@ -314,6 +326,7 @@ func (rr *MX) String() string {
|
||||
return rr.Hdr.String() + strconv.Itoa(int(rr.Preference)) + " " + sprintName(rr.Mx)
|
||||
}
|
||||
|
||||
// AFSDB RR. See RFC 1183.
|
||||
type AFSDB struct {
|
||||
Hdr RR_Header
|
||||
Subtype uint16
|
||||
@@ -324,6 +337,7 @@ func (rr *AFSDB) String() string {
|
||||
return rr.Hdr.String() + strconv.Itoa(int(rr.Subtype)) + " " + sprintName(rr.Hostname)
|
||||
}
|
||||
|
||||
// X25 RR. See RFC 1183, Section 3.1.
|
||||
type X25 struct {
|
||||
Hdr RR_Header
|
||||
PSDNAddress string
|
||||
@@ -333,6 +347,7 @@ func (rr *X25) String() string {
|
||||
return rr.Hdr.String() + rr.PSDNAddress
|
||||
}
|
||||
|
||||
// RT RR. See RFC 1183, Section 3.3.
|
||||
type RT struct {
|
||||
Hdr RR_Header
|
||||
Preference uint16
|
||||
@@ -343,6 +358,7 @@ func (rr *RT) String() string {
|
||||
return rr.Hdr.String() + strconv.Itoa(int(rr.Preference)) + " " + sprintName(rr.Host)
|
||||
}
|
||||
|
||||
// NS RR. See RFC 1035.
|
||||
type NS struct {
|
||||
Hdr RR_Header
|
||||
Ns string `dns:"cdomain-name"`
|
||||
@@ -352,6 +368,7 @@ func (rr *NS) String() string {
|
||||
return rr.Hdr.String() + sprintName(rr.Ns)
|
||||
}
|
||||
|
||||
// PTR RR. See RFC 1035.
|
||||
type PTR struct {
|
||||
Hdr RR_Header
|
||||
Ptr string `dns:"cdomain-name"`
|
||||
@@ -361,6 +378,7 @@ func (rr *PTR) String() string {
|
||||
return rr.Hdr.String() + sprintName(rr.Ptr)
|
||||
}
|
||||
|
||||
// RP RR. See RFC 1138, Section 2.2.
|
||||
type RP struct {
|
||||
Hdr RR_Header
|
||||
Mbox string `dns:"domain-name"`
|
||||
@@ -371,6 +389,7 @@ func (rr *RP) String() string {
|
||||
return rr.Hdr.String() + rr.Mbox + " " + sprintTxt([]string{rr.Txt})
|
||||
}
|
||||
|
||||
// SOA RR. See RFC 1035.
|
||||
type SOA struct {
|
||||
Hdr RR_Header
|
||||
Ns string `dns:"cdomain-name"`
|
||||
@@ -391,6 +410,7 @@ func (rr *SOA) String() string {
|
||||
" " + strconv.FormatInt(int64(rr.Minttl), 10)
|
||||
}
|
||||
|
||||
// TXT RR. See RFC 1035.
|
||||
type TXT struct {
|
||||
Hdr RR_Header
|
||||
Txt []string `dns:"txt"`
|
||||
@@ -523,6 +543,7 @@ func nextByte(b []byte, offset int) (byte, int) {
|
||||
return b[offset+1], 2
|
||||
}
|
||||
|
||||
// SPF RR. See RFC 4408, Section 3.1.1.
|
||||
type SPF struct {
|
||||
Hdr RR_Header
|
||||
Txt []string `dns:"txt"`
|
||||
@@ -530,6 +551,15 @@ type SPF struct {
|
||||
|
||||
func (rr *SPF) String() string { return rr.Hdr.String() + sprintTxt(rr.Txt) }
|
||||
|
||||
// AVC RR. See https://www.iana.org/assignments/dns-parameters/AVC/avc-completed-template.
|
||||
type AVC struct {
|
||||
Hdr RR_Header
|
||||
Txt []string `dns:"txt"`
|
||||
}
|
||||
|
||||
func (rr *AVC) String() string { return rr.Hdr.String() + sprintTxt(rr.Txt) }
|
||||
|
||||
// SRV RR. See RFC 2782.
|
||||
type SRV struct {
|
||||
Hdr RR_Header
|
||||
Priority uint16
|
||||
@@ -545,6 +575,7 @@ func (rr *SRV) String() string {
|
||||
strconv.Itoa(int(rr.Port)) + " " + sprintName(rr.Target)
|
||||
}
|
||||
|
||||
// NAPTR RR. See RFC 2915.
|
||||
type NAPTR struct {
|
||||
Hdr RR_Header
|
||||
Order uint16
|
||||
@@ -565,7 +596,7 @@ func (rr *NAPTR) String() string {
|
||||
rr.Replacement
|
||||
}
|
||||
|
||||
// The CERT resource record, see RFC 4398.
|
||||
// CERT RR. See RFC 4398.
|
||||
type CERT struct {
|
||||
Hdr RR_Header
|
||||
Type uint16
|
||||
@@ -591,7 +622,7 @@ func (rr *CERT) String() string {
|
||||
" " + rr.Certificate
|
||||
}
|
||||
|
||||
// The DNAME resource record, see RFC 2672.
|
||||
// DNAME RR. See RFC 2672.
|
||||
type DNAME struct {
|
||||
Hdr RR_Header
|
||||
Target string `dns:"domain-name"`
|
||||
@@ -601,6 +632,7 @@ func (rr *DNAME) String() string {
|
||||
return rr.Hdr.String() + sprintName(rr.Target)
|
||||
}
|
||||
|
||||
// A RR. See RFC 1035.
|
||||
type A struct {
|
||||
Hdr RR_Header
|
||||
A net.IP `dns:"a"`
|
||||
@@ -613,6 +645,7 @@ func (rr *A) String() string {
|
||||
return rr.Hdr.String() + rr.A.String()
|
||||
}
|
||||
|
||||
// AAAA RR. See RFC 3596.
|
||||
type AAAA struct {
|
||||
Hdr RR_Header
|
||||
AAAA net.IP `dns:"aaaa"`
|
||||
@@ -625,6 +658,7 @@ func (rr *AAAA) String() string {
|
||||
return rr.Hdr.String() + rr.AAAA.String()
|
||||
}
|
||||
|
||||
// PX RR. See RFC 2163.
|
||||
type PX struct {
|
||||
Hdr RR_Header
|
||||
Preference uint16
|
||||
@@ -636,6 +670,7 @@ func (rr *PX) String() string {
|
||||
return rr.Hdr.String() + strconv.Itoa(int(rr.Preference)) + " " + sprintName(rr.Map822) + " " + sprintName(rr.Mapx400)
|
||||
}
|
||||
|
||||
// GPOS RR. See RFC 1712.
|
||||
type GPOS struct {
|
||||
Hdr RR_Header
|
||||
Longitude string
|
||||
@@ -647,6 +682,7 @@ func (rr *GPOS) String() string {
|
||||
return rr.Hdr.String() + rr.Longitude + " " + rr.Latitude + " " + rr.Altitude
|
||||
}
|
||||
|
||||
// LOC RR. See RFC RFC 1876.
|
||||
type LOC struct {
|
||||
Hdr RR_Header
|
||||
Version uint8
|
||||
@@ -723,11 +759,12 @@ func (rr *LOC) String() string {
|
||||
return s
|
||||
}
|
||||
|
||||
// SIG is identical to RRSIG and nowadays only used for SIG(0), RFC2931.
|
||||
// SIG RR. See RFC 2535. The SIG RR is identical to RRSIG and nowadays only used for SIG(0), See RFC 2931.
|
||||
type SIG struct {
|
||||
RRSIG
|
||||
}
|
||||
|
||||
// RRSIG RR. See RFC 4034 and RFC 3755.
|
||||
type RRSIG struct {
|
||||
Hdr RR_Header
|
||||
TypeCovered uint16
|
||||
@@ -755,6 +792,7 @@ func (rr *RRSIG) String() string {
|
||||
return s
|
||||
}
|
||||
|
||||
// NSEC RR. See RFC 4034 and RFC 3755.
|
||||
type NSEC struct {
|
||||
Hdr RR_Header
|
||||
NextDomain string `dns:"domain-name"`
|
||||
@@ -782,14 +820,13 @@ func (rr *NSEC) len() int {
|
||||
return l
|
||||
}
|
||||
|
||||
type DLV struct {
|
||||
DS
|
||||
}
|
||||
// DLV RR. See RFC 4431.
|
||||
type DLV struct{ DS }
|
||||
|
||||
type CDS struct {
|
||||
DS
|
||||
}
|
||||
// CDS RR. See RFC 7344.
|
||||
type CDS struct{ DS }
|
||||
|
||||
// DS RR. See RFC 4034 and RFC 3658.
|
||||
type DS struct {
|
||||
Hdr RR_Header
|
||||
KeyTag uint16
|
||||
@@ -805,6 +842,7 @@ func (rr *DS) String() string {
|
||||
" " + strings.ToUpper(rr.Digest)
|
||||
}
|
||||
|
||||
// KX RR. See RFC 2230.
|
||||
type KX struct {
|
||||
Hdr RR_Header
|
||||
Preference uint16
|
||||
@@ -816,6 +854,7 @@ func (rr *KX) String() string {
|
||||
" " + sprintName(rr.Exchanger)
|
||||
}
|
||||
|
||||
// TA RR. See http://www.watson.org/~weiler/INI1999-19.pdf.
|
||||
type TA struct {
|
||||
Hdr RR_Header
|
||||
KeyTag uint16
|
||||
@@ -831,6 +870,7 @@ func (rr *TA) String() string {
|
||||
" " + strings.ToUpper(rr.Digest)
|
||||
}
|
||||
|
||||
// TALINK RR. See https://www.iana.org/assignments/dns-parameters/TALINK/talink-completed-template.
|
||||
type TALINK struct {
|
||||
Hdr RR_Header
|
||||
PreviousName string `dns:"domain-name"`
|
||||
@@ -842,6 +882,7 @@ func (rr *TALINK) String() string {
|
||||
sprintName(rr.PreviousName) + " " + sprintName(rr.NextName)
|
||||
}
|
||||
|
||||
// SSHFP RR. See RFC RFC 4255.
|
||||
type SSHFP struct {
|
||||
Hdr RR_Header
|
||||
Algorithm uint8
|
||||
@@ -855,14 +896,17 @@ func (rr *SSHFP) String() string {
|
||||
" " + strings.ToUpper(rr.FingerPrint)
|
||||
}
|
||||
|
||||
// KEY RR. See RFC RFC 2535.
|
||||
type KEY struct {
|
||||
DNSKEY
|
||||
}
|
||||
|
||||
// CDNSKEY RR. See RFC 7344.
|
||||
type CDNSKEY struct {
|
||||
DNSKEY
|
||||
}
|
||||
|
||||
// DNSKEY RR. See RFC 4034 and RFC 3755.
|
||||
type DNSKEY struct {
|
||||
Hdr RR_Header
|
||||
Flags uint16
|
||||
@@ -878,6 +922,7 @@ func (rr *DNSKEY) String() string {
|
||||
" " + rr.PublicKey
|
||||
}
|
||||
|
||||
// RKEY RR. See https://www.iana.org/assignments/dns-parameters/RKEY/rkey-completed-template.
|
||||
type RKEY struct {
|
||||
Hdr RR_Header
|
||||
Flags uint16
|
||||
@@ -893,6 +938,7 @@ func (rr *RKEY) String() string {
|
||||
" " + rr.PublicKey
|
||||
}
|
||||
|
||||
// NSAPPTR RR. See RFC 1348.
|
||||
type NSAPPTR struct {
|
||||
Hdr RR_Header
|
||||
Ptr string `dns:"domain-name"`
|
||||
@@ -900,6 +946,7 @@ type NSAPPTR struct {
|
||||
|
||||
func (rr *NSAPPTR) String() string { return rr.Hdr.String() + sprintName(rr.Ptr) }
|
||||
|
||||
// NSEC3 RR. See RFC 5155.
|
||||
type NSEC3 struct {
|
||||
Hdr RR_Header
|
||||
Hash uint8
|
||||
@@ -938,6 +985,7 @@ func (rr *NSEC3) len() int {
|
||||
return l
|
||||
}
|
||||
|
||||
// NSEC3PARAM RR. See RFC 5155.
|
||||
type NSEC3PARAM struct {
|
||||
Hdr RR_Header
|
||||
Hash uint8
|
||||
@@ -956,6 +1004,7 @@ func (rr *NSEC3PARAM) String() string {
|
||||
return s
|
||||
}
|
||||
|
||||
// TKEY RR. See RFC 2930.
|
||||
type TKEY struct {
|
||||
Hdr RR_Header
|
||||
Algorithm string `dns:"domain-name"`
|
||||
@@ -964,17 +1013,21 @@ type TKEY struct {
|
||||
Mode uint16
|
||||
Error uint16
|
||||
KeySize uint16
|
||||
Key string
|
||||
Key string `dns:"size-hex:KeySize"`
|
||||
OtherLen uint16
|
||||
OtherData string
|
||||
OtherData string `dns:"size-hex:OtherLen"`
|
||||
}
|
||||
|
||||
// TKEY has no official presentation format, but this will suffice.
|
||||
func (rr *TKEY) String() string {
|
||||
// It has no presentation format
|
||||
return ""
|
||||
s := "\n;; TKEY PSEUDOSECTION:\n"
|
||||
s += rr.Hdr.String() + " " + rr.Algorithm + " " +
|
||||
strconv.Itoa(int(rr.KeySize)) + " " + rr.Key + " " +
|
||||
strconv.Itoa(int(rr.OtherLen)) + " " + rr.OtherData
|
||||
return s
|
||||
}
|
||||
|
||||
// RFC3597 represents an unknown/generic RR.
|
||||
// RFC3597 represents an unknown/generic RR. See RFC 3597.
|
||||
type RFC3597 struct {
|
||||
Hdr RR_Header
|
||||
Rdata string `dns:"hex"`
|
||||
@@ -998,6 +1051,7 @@ func rfc3597Header(h RR_Header) string {
|
||||
return s
|
||||
}
|
||||
|
||||
// URI RR. See RFC 7553.
|
||||
type URI struct {
|
||||
Hdr RR_Header
|
||||
Priority uint16
|
||||
@@ -1010,6 +1064,7 @@ func (rr *URI) String() string {
|
||||
" " + strconv.Itoa(int(rr.Weight)) + " " + sprintTxtOctet(rr.Target)
|
||||
}
|
||||
|
||||
// DHCID RR. See RFC 4701.
|
||||
type DHCID struct {
|
||||
Hdr RR_Header
|
||||
Digest string `dns:"base64"`
|
||||
@@ -1017,6 +1072,7 @@ type DHCID struct {
|
||||
|
||||
func (rr *DHCID) String() string { return rr.Hdr.String() + rr.Digest }
|
||||
|
||||
// TLSA RR. See RFC 6698.
|
||||
type TLSA struct {
|
||||
Hdr RR_Header
|
||||
Usage uint8
|
||||
@@ -1033,6 +1089,7 @@ func (rr *TLSA) String() string {
|
||||
" " + rr.Certificate
|
||||
}
|
||||
|
||||
// SMIMEA RR. See RFC 8162.
|
||||
type SMIMEA struct {
|
||||
Hdr RR_Header
|
||||
Usage uint8
|
||||
@@ -1055,6 +1112,7 @@ func (rr *SMIMEA) String() string {
|
||||
return s
|
||||
}
|
||||
|
||||
// HIP RR. See RFC 8005.
|
||||
type HIP struct {
|
||||
Hdr RR_Header
|
||||
HitLength uint8
|
||||
@@ -1076,6 +1134,7 @@ func (rr *HIP) String() string {
|
||||
return s
|
||||
}
|
||||
|
||||
// NINFO RR. See https://www.iana.org/assignments/dns-parameters/NINFO/ninfo-completed-template.
|
||||
type NINFO struct {
|
||||
Hdr RR_Header
|
||||
ZSData []string `dns:"txt"`
|
||||
@@ -1083,6 +1142,7 @@ type NINFO struct {
|
||||
|
||||
func (rr *NINFO) String() string { return rr.Hdr.String() + sprintTxt(rr.ZSData) }
|
||||
|
||||
// NID RR. See RFC RFC 6742.
|
||||
type NID struct {
|
||||
Hdr RR_Header
|
||||
Preference uint16
|
||||
@@ -1096,6 +1156,7 @@ func (rr *NID) String() string {
|
||||
return s
|
||||
}
|
||||
|
||||
// L32 RR, See RFC 6742.
|
||||
type L32 struct {
|
||||
Hdr RR_Header
|
||||
Preference uint16
|
||||
@@ -1110,6 +1171,7 @@ func (rr *L32) String() string {
|
||||
" " + rr.Locator32.String()
|
||||
}
|
||||
|
||||
// L64 RR, See RFC 6742.
|
||||
type L64 struct {
|
||||
Hdr RR_Header
|
||||
Preference uint16
|
||||
@@ -1123,6 +1185,7 @@ func (rr *L64) String() string {
|
||||
return s
|
||||
}
|
||||
|
||||
// LP RR. See RFC 6742.
|
||||
type LP struct {
|
||||
Hdr RR_Header
|
||||
Preference uint16
|
||||
@@ -1133,6 +1196,7 @@ func (rr *LP) String() string {
|
||||
return rr.Hdr.String() + strconv.Itoa(int(rr.Preference)) + " " + sprintName(rr.Fqdn)
|
||||
}
|
||||
|
||||
// EUI48 RR. See RFC 7043.
|
||||
type EUI48 struct {
|
||||
Hdr RR_Header
|
||||
Address uint64 `dns:"uint48"`
|
||||
@@ -1140,6 +1204,7 @@ type EUI48 struct {
|
||||
|
||||
func (rr *EUI48) String() string { return rr.Hdr.String() + euiToString(rr.Address, 48) }
|
||||
|
||||
// EUI64 RR. See RFC 7043.
|
||||
type EUI64 struct {
|
||||
Hdr RR_Header
|
||||
Address uint64
|
||||
@@ -1147,6 +1212,7 @@ type EUI64 struct {
|
||||
|
||||
func (rr *EUI64) String() string { return rr.Hdr.String() + euiToString(rr.Address, 64) }
|
||||
|
||||
// CAA RR. See RFC 6844.
|
||||
type CAA struct {
|
||||
Hdr RR_Header
|
||||
Flag uint8
|
||||
@@ -1158,6 +1224,7 @@ func (rr *CAA) String() string {
|
||||
return rr.Hdr.String() + strconv.Itoa(int(rr.Flag)) + " " + rr.Tag + " " + sprintTxtOctet(rr.Value)
|
||||
}
|
||||
|
||||
// UID RR. Deprecated, IANA-Reserved.
|
||||
type UID struct {
|
||||
Hdr RR_Header
|
||||
Uid uint32
|
||||
@@ -1165,6 +1232,7 @@ type UID struct {
|
||||
|
||||
func (rr *UID) String() string { return rr.Hdr.String() + strconv.FormatInt(int64(rr.Uid), 10) }
|
||||
|
||||
// GID RR. Deprecated, IANA-Reserved.
|
||||
type GID struct {
|
||||
Hdr RR_Header
|
||||
Gid uint32
|
||||
@@ -1172,6 +1240,7 @@ type GID struct {
|
||||
|
||||
func (rr *GID) String() string { return rr.Hdr.String() + strconv.FormatInt(int64(rr.Gid), 10) }
|
||||
|
||||
// UINFO RR. Deprecated, IANA-Reserved.
|
||||
type UINFO struct {
|
||||
Hdr RR_Header
|
||||
Uinfo string
|
||||
@@ -1179,6 +1248,7 @@ type UINFO struct {
|
||||
|
||||
func (rr *UINFO) String() string { return rr.Hdr.String() + sprintTxt([]string{rr.Uinfo}) }
|
||||
|
||||
// EID RR. See http://ana-3.lcs.mit.edu/~jnc/nimrod/dns.txt.
|
||||
type EID struct {
|
||||
Hdr RR_Header
|
||||
Endpoint string `dns:"hex"`
|
||||
@@ -1186,6 +1256,7 @@ type EID struct {
|
||||
|
||||
func (rr *EID) String() string { return rr.Hdr.String() + strings.ToUpper(rr.Endpoint) }
|
||||
|
||||
// NIMLOC RR. See http://ana-3.lcs.mit.edu/~jnc/nimrod/dns.txt.
|
||||
type NIMLOC struct {
|
||||
Hdr RR_Header
|
||||
Locator string `dns:"hex"`
|
||||
@@ -1193,6 +1264,7 @@ type NIMLOC struct {
|
||||
|
||||
func (rr *NIMLOC) String() string { return rr.Hdr.String() + strings.ToUpper(rr.Locator) }
|
||||
|
||||
// OPENPGPKEY RR. See RFC 7929.
|
||||
type OPENPGPKEY struct {
|
||||
Hdr RR_Header
|
||||
PublicKey string `dns:"base64"`
|
||||
@@ -1200,6 +1272,36 @@ type OPENPGPKEY struct {
|
||||
|
||||
func (rr *OPENPGPKEY) String() string { return rr.Hdr.String() + rr.PublicKey }
|
||||
|
||||
// CSYNC RR. See RFC 7477.
|
||||
type CSYNC struct {
|
||||
Hdr RR_Header
|
||||
Serial uint32
|
||||
Flags uint16
|
||||
TypeBitMap []uint16 `dns:"nsec"`
|
||||
}
|
||||
|
||||
func (rr *CSYNC) String() string {
|
||||
s := rr.Hdr.String() + strconv.FormatInt(int64(rr.Serial), 10) + " " + strconv.Itoa(int(rr.Flags))
|
||||
|
||||
for i := 0; i < len(rr.TypeBitMap); i++ {
|
||||
s += " " + Type(rr.TypeBitMap[i]).String()
|
||||
}
|
||||
return s
|
||||
}
|
||||
|
||||
func (rr *CSYNC) len() int {
|
||||
l := rr.Hdr.len() + 4 + 2
|
||||
lastwindow := uint32(2 ^ 32 + 1)
|
||||
for _, t := range rr.TypeBitMap {
|
||||
window := t / 256
|
||||
if uint32(window) != lastwindow {
|
||||
l += 1 + 32
|
||||
}
|
||||
lastwindow = uint32(window)
|
||||
}
|
||||
return l
|
||||
}
|
||||
|
||||
// TimeToString translates the RRSIG's incep. and expir. times to the
|
||||
// string representation used when printing the record.
|
||||
// It takes serial arithmetic (RFC 1982) into account.
|
||||
|
||||
9
vendor/github.com/miekg/dns/types_generate.go
generated
vendored
9
vendor/github.com/miekg/dns/types_generate.go
generated
vendored
@@ -23,11 +23,11 @@ var skipLen = map[string]struct{}{
|
||||
"NSEC": {},
|
||||
"NSEC3": {},
|
||||
"OPT": {},
|
||||
"CSYNC": {},
|
||||
}
|
||||
|
||||
var packageHdr = `
|
||||
// *** DO NOT MODIFY ***
|
||||
// AUTOGENERATED BY go generate from type_generate.go
|
||||
// Code generated by "go run types_generate.go"; DO NOT EDIT.
|
||||
|
||||
package dns
|
||||
|
||||
@@ -56,7 +56,6 @@ var TypeToString = map[uint16]string{
|
||||
`))
|
||||
|
||||
var headerFunc = template.Must(template.New("headerFunc").Parse(`
|
||||
// Header() functions
|
||||
{{range .}} func (rr *{{.}}) Header() *RR_Header { return &rr.Hdr }
|
||||
{{end}}
|
||||
|
||||
@@ -182,6 +181,8 @@ func main() {
|
||||
fallthrough
|
||||
case st.Tag(i) == `dns:"base64"`:
|
||||
o("l += base64.StdEncoding.DecodedLen(len(rr.%s))\n")
|
||||
case strings.HasPrefix(st.Tag(i), `dns:"size-hex:`): // this has an extra field where the length is stored
|
||||
o("l += len(rr.%s)/2\n")
|
||||
case strings.HasPrefix(st.Tag(i), `dns:"size-hex`):
|
||||
fallthrough
|
||||
case st.Tag(i) == `dns:"hex"`:
|
||||
@@ -197,7 +198,7 @@ func main() {
|
||||
case st.Tag(i) == "":
|
||||
switch st.Field(i).Type().(*types.Basic).Kind() {
|
||||
case types.Uint8:
|
||||
o("l += 1 // %s\n")
|
||||
o("l++ // %s\n")
|
||||
case types.Uint16:
|
||||
o("l += 2 // %s\n")
|
||||
case types.Uint32:
|
||||
|
||||
85
vendor/github.com/miekg/dns/udp.go
generated
vendored
85
vendor/github.com/miekg/dns/udp.go
generated
vendored
@@ -1,10 +1,12 @@
|
||||
// +build !windows,!plan9
|
||||
// +build !windows
|
||||
|
||||
package dns
|
||||
|
||||
import (
|
||||
"net"
|
||||
"syscall"
|
||||
|
||||
"golang.org/x/net/ipv4"
|
||||
"golang.org/x/net/ipv6"
|
||||
)
|
||||
|
||||
// SessionUDP holds the remote address and the associated
|
||||
@@ -17,29 +19,6 @@ type SessionUDP struct {
|
||||
// RemoteAddr returns the remote network address.
|
||||
func (s *SessionUDP) RemoteAddr() net.Addr { return s.raddr }
|
||||
|
||||
// setUDPSocketOptions sets the UDP socket options.
|
||||
// This function is implemented on a per platform basis. See udp_*.go for more details
|
||||
func setUDPSocketOptions(conn *net.UDPConn) error {
|
||||
sa, err := getUDPSocketName(conn)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
switch sa.(type) {
|
||||
case *syscall.SockaddrInet6:
|
||||
v6only, err := getUDPSocketOptions6Only(conn)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
setUDPSocketOptions6(conn)
|
||||
if !v6only {
|
||||
setUDPSocketOptions4(conn)
|
||||
}
|
||||
case *syscall.SockaddrInet4:
|
||||
setUDPSocketOptions4(conn)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// ReadFromSessionUDP acts just like net.UDPConn.ReadFrom(), but returns a session object instead of a
|
||||
// net.UDPAddr.
|
||||
func ReadFromSessionUDP(conn *net.UDPConn, b []byte) (int, *SessionUDP, error) {
|
||||
@@ -51,8 +30,60 @@ func ReadFromSessionUDP(conn *net.UDPConn, b []byte) (int, *SessionUDP, error) {
|
||||
return n, &SessionUDP{raddr, oob[:oobn]}, err
|
||||
}
|
||||
|
||||
// WriteToSessionUDP acts just like net.UDPConn.WritetTo(), but uses a *SessionUDP instead of a net.Addr.
|
||||
// WriteToSessionUDP acts just like net.UDPConn.WriteTo(), but uses a *SessionUDP instead of a net.Addr.
|
||||
func WriteToSessionUDP(conn *net.UDPConn, b []byte, session *SessionUDP) (int, error) {
|
||||
n, _, err := conn.WriteMsgUDP(b, session.context, session.raddr)
|
||||
oob := correctSource(session.context)
|
||||
n, _, err := conn.WriteMsgUDP(b, oob, session.raddr)
|
||||
return n, err
|
||||
}
|
||||
|
||||
func setUDPSocketOptions(conn *net.UDPConn) error {
|
||||
// Try setting the flags for both families and ignore the errors unless they
|
||||
// both error.
|
||||
err6 := ipv6.NewPacketConn(conn).SetControlMessage(ipv6.FlagDst|ipv6.FlagInterface, true)
|
||||
err4 := ipv4.NewPacketConn(conn).SetControlMessage(ipv4.FlagDst|ipv4.FlagInterface, true)
|
||||
if err6 != nil && err4 != nil {
|
||||
return err4
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// parseDstFromOOB takes oob data and returns the destination IP.
|
||||
func parseDstFromOOB(oob []byte) net.IP {
|
||||
// Start with IPv6 and then fallback to IPv4
|
||||
// TODO(fastest963): Figure out a way to prefer one or the other. Looking at
|
||||
// the lvl of the header for a 0 or 41 isn't cross-platform.
|
||||
var dst net.IP
|
||||
cm6 := new(ipv6.ControlMessage)
|
||||
if cm6.Parse(oob) == nil {
|
||||
dst = cm6.Dst
|
||||
}
|
||||
if dst == nil {
|
||||
cm4 := new(ipv4.ControlMessage)
|
||||
if cm4.Parse(oob) == nil {
|
||||
dst = cm4.Dst
|
||||
}
|
||||
}
|
||||
return dst
|
||||
}
|
||||
|
||||
// correctSource takes oob data and returns new oob data with the Src equal to the Dst
|
||||
func correctSource(oob []byte) []byte {
|
||||
dst := parseDstFromOOB(oob)
|
||||
if dst == nil {
|
||||
return nil
|
||||
}
|
||||
// If the dst is definitely an IPv6, then use ipv6's ControlMessage to
|
||||
// respond otherwise use ipv4's because ipv6's marshal ignores ipv4
|
||||
// addresses.
|
||||
if dst.To4() == nil {
|
||||
cm := new(ipv6.ControlMessage)
|
||||
cm.Src = dst
|
||||
oob = cm.Marshal()
|
||||
} else {
|
||||
cm := new(ipv4.ControlMessage)
|
||||
cm.Src = dst
|
||||
oob = cm.Marshal()
|
||||
}
|
||||
return oob
|
||||
}
|
||||
|
||||
82
vendor/github.com/miekg/dns/udp_linux.go
generated
vendored
82
vendor/github.com/miekg/dns/udp_linux.go
generated
vendored
@@ -1,82 +0,0 @@
|
||||
// +build linux
|
||||
|
||||
package dns
|
||||
|
||||
// See:
|
||||
// * http://stackoverflow.com/questions/3062205/setting-the-source-ip-for-a-udp-socket and
|
||||
// * http://blog.powerdns.com/2012/10/08/on-binding-datagram-udp-sockets-to-the-any-addresses/
|
||||
//
|
||||
// Why do we need this: When listening on 0.0.0.0 with UDP so kernel decides what is the outgoing
|
||||
// interface, this might not always be the correct one. This code will make sure the egress
|
||||
// packet's interface matched the ingress' one.
|
||||
|
||||
import (
|
||||
"net"
|
||||
"syscall"
|
||||
)
|
||||
|
||||
// setUDPSocketOptions4 prepares the v4 socket for sessions.
|
||||
func setUDPSocketOptions4(conn *net.UDPConn) error {
|
||||
file, err := conn.File()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if err := syscall.SetsockoptInt(int(file.Fd()), syscall.IPPROTO_IP, syscall.IP_PKTINFO, 1); err != nil {
|
||||
file.Close()
|
||||
return err
|
||||
}
|
||||
// Calling File() above results in the connection becoming blocking, we must fix that.
|
||||
// See https://github.com/miekg/dns/issues/279
|
||||
err = syscall.SetNonblock(int(file.Fd()), true)
|
||||
if err != nil {
|
||||
file.Close()
|
||||
return err
|
||||
}
|
||||
file.Close()
|
||||
return nil
|
||||
}
|
||||
|
||||
// setUDPSocketOptions6 prepares the v6 socket for sessions.
|
||||
func setUDPSocketOptions6(conn *net.UDPConn) error {
|
||||
file, err := conn.File()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if err := syscall.SetsockoptInt(int(file.Fd()), syscall.IPPROTO_IPV6, syscall.IPV6_RECVPKTINFO, 1); err != nil {
|
||||
file.Close()
|
||||
return err
|
||||
}
|
||||
err = syscall.SetNonblock(int(file.Fd()), true)
|
||||
if err != nil {
|
||||
file.Close()
|
||||
return err
|
||||
}
|
||||
file.Close()
|
||||
return nil
|
||||
}
|
||||
|
||||
// getUDPSocketOption6Only return true if the socket is v6 only and false when it is v4/v6 combined
|
||||
// (dualstack).
|
||||
func getUDPSocketOptions6Only(conn *net.UDPConn) (bool, error) {
|
||||
file, err := conn.File()
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
// dual stack. See http://stackoverflow.com/questions/1618240/how-to-support-both-ipv4-and-ipv6-connections
|
||||
v6only, err := syscall.GetsockoptInt(int(file.Fd()), syscall.IPPROTO_IPV6, syscall.IPV6_V6ONLY)
|
||||
if err != nil {
|
||||
file.Close()
|
||||
return false, err
|
||||
}
|
||||
file.Close()
|
||||
return v6only == 1, nil
|
||||
}
|
||||
|
||||
func getUDPSocketName(conn *net.UDPConn) (syscall.Sockaddr, error) {
|
||||
file, err := conn.File()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
defer file.Close()
|
||||
return syscall.Getsockname(int(file.Fd()))
|
||||
}
|
||||
17
vendor/github.com/miekg/dns/udp_other.go
generated
vendored
17
vendor/github.com/miekg/dns/udp_other.go
generated
vendored
@@ -1,17 +0,0 @@
|
||||
// +build !linux,!plan9
|
||||
|
||||
package dns
|
||||
|
||||
import (
|
||||
"net"
|
||||
"syscall"
|
||||
)
|
||||
|
||||
// These do nothing. See udp_linux.go for an example of how to implement this.
|
||||
|
||||
// We tried to adhire to some kind of naming scheme.
|
||||
|
||||
func setUDPSocketOptions4(conn *net.UDPConn) error { return nil }
|
||||
func setUDPSocketOptions6(conn *net.UDPConn) error { return nil }
|
||||
func getUDPSocketOptions6Only(conn *net.UDPConn) (bool, error) { return false, nil }
|
||||
func getUDPSocketName(conn *net.UDPConn) (syscall.Sockaddr, error) { return nil, nil }
|
||||
34
vendor/github.com/miekg/dns/udp_plan9.go
generated
vendored
34
vendor/github.com/miekg/dns/udp_plan9.go
generated
vendored
@@ -1,34 +0,0 @@
|
||||
package dns
|
||||
|
||||
import (
|
||||
"net"
|
||||
)
|
||||
|
||||
func setUDPSocketOptions(conn *net.UDPConn) error { return nil }
|
||||
|
||||
// SessionUDP holds the remote address and the associated
|
||||
// out-of-band data.
|
||||
type SessionUDP struct {
|
||||
raddr *net.UDPAddr
|
||||
context []byte
|
||||
}
|
||||
|
||||
// RemoteAddr returns the remote network address.
|
||||
func (s *SessionUDP) RemoteAddr() net.Addr { return s.raddr }
|
||||
|
||||
// ReadFromSessionUDP acts just like net.UDPConn.ReadFrom(), but returns a session object instead of a
|
||||
// net.UDPAddr.
|
||||
func ReadFromSessionUDP(conn *net.UDPConn, b []byte) (int, *SessionUDP, error) {
|
||||
oob := make([]byte, 40)
|
||||
n, oobn, _, raddr, err := conn.ReadMsgUDP(b, oob)
|
||||
if err != nil {
|
||||
return n, nil, err
|
||||
}
|
||||
return n, &SessionUDP{raddr, oob[:oobn]}, err
|
||||
}
|
||||
|
||||
// WriteToSessionUDP acts just like net.UDPConn.WritetTo(), but uses a *SessionUDP instead of a net.Addr.
|
||||
func WriteToSessionUDP(conn *net.UDPConn, b []byte, session *SessionUDP) (int, error) {
|
||||
n, _, err := conn.WriteMsgUDP(b, session.context, session.raddr)
|
||||
return n, err
|
||||
}
|
||||
19
vendor/github.com/miekg/dns/udp_windows.go
generated
vendored
19
vendor/github.com/miekg/dns/udp_windows.go
generated
vendored
@@ -4,12 +4,17 @@ package dns
|
||||
|
||||
import "net"
|
||||
|
||||
// SessionUDP holds the remote address
|
||||
type SessionUDP struct {
|
||||
raddr *net.UDPAddr
|
||||
}
|
||||
|
||||
// RemoteAddr returns the remote network address.
|
||||
func (s *SessionUDP) RemoteAddr() net.Addr { return s.raddr }
|
||||
|
||||
// ReadFromSessionUDP acts just like net.UDPConn.ReadFrom(), but returns a session object instead of a
|
||||
// net.UDPAddr.
|
||||
// TODO(fastest963): Once go1.10 is released, use ReadMsgUDP.
|
||||
func ReadFromSessionUDP(conn *net.UDPConn, b []byte) (int, *SessionUDP, error) {
|
||||
n, raddr, err := conn.ReadFrom(b)
|
||||
if err != nil {
|
||||
@@ -19,16 +24,14 @@ func ReadFromSessionUDP(conn *net.UDPConn, b []byte) (int, *SessionUDP, error) {
|
||||
return n, session, err
|
||||
}
|
||||
|
||||
// WriteToSessionUDP acts just like net.UDPConn.WritetTo(), but uses a *SessionUDP instead of a net.Addr.
|
||||
// WriteToSessionUDP acts just like net.UDPConn.WriteTo(), but uses a *SessionUDP instead of a net.Addr.
|
||||
// TODO(fastest963): Once go1.10 is released, use WriteMsgUDP.
|
||||
func WriteToSessionUDP(conn *net.UDPConn, b []byte, session *SessionUDP) (int, error) {
|
||||
n, err := conn.WriteTo(b, session.raddr)
|
||||
return n, err
|
||||
}
|
||||
|
||||
func (s *SessionUDP) RemoteAddr() net.Addr { return s.raddr }
|
||||
|
||||
// setUDPSocketOptions sets the UDP socket options.
|
||||
// This function is implemented on a per platform basis. See udp_*.go for more details
|
||||
func setUDPSocketOptions(conn *net.UDPConn) error {
|
||||
return nil
|
||||
}
|
||||
// TODO(fastest963): Once go1.10 is released and we can use *MsgUDP methods
|
||||
// use the standard method in udp.go for these.
|
||||
func setUDPSocketOptions(*net.UDPConn) error { return nil }
|
||||
func parseDstFromOOB([]byte, net.IP) net.IP { return nil }
|
||||
|
||||
15
vendor/github.com/miekg/dns/version.go
generated
vendored
Normal file
15
vendor/github.com/miekg/dns/version.go
generated
vendored
Normal file
@@ -0,0 +1,15 @@
|
||||
package dns
|
||||
|
||||
import "fmt"
|
||||
|
||||
// Version is current version of this library.
|
||||
var Version = V{1, 0, 4}
|
||||
|
||||
// V holds the version of this library.
|
||||
type V struct {
|
||||
Major, Minor, Patch int
|
||||
}
|
||||
|
||||
func (v V) String() string {
|
||||
return fmt.Sprintf("%d.%d.%d", v.Major, v.Minor, v.Patch)
|
||||
}
|
||||
64
vendor/github.com/miekg/dns/xfr.go
generated
vendored
64
vendor/github.com/miekg/dns/xfr.go
generated
vendored
@@ -1,6 +1,7 @@
|
||||
package dns
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"time"
|
||||
)
|
||||
|
||||
@@ -16,7 +17,7 @@ type Transfer struct {
|
||||
DialTimeout time.Duration // net.DialTimeout, defaults to 2 seconds
|
||||
ReadTimeout time.Duration // net.Conn.SetReadTimeout value for connections, defaults to 2 seconds
|
||||
WriteTimeout time.Duration // net.Conn.SetWriteTimeout value for connections, defaults to 2 seconds
|
||||
TsigSecret map[string]string // Secret(s) for Tsig map[<zonename>]<base64 secret>, zonename must be fully qualified
|
||||
TsigSecret map[string]string // Secret(s) for Tsig map[<zonename>]<base64 secret>, zonename must be in canonical form (lowercase, fqdn, see RFC 4034 Section 6.2)
|
||||
tsigTimersOnly bool
|
||||
}
|
||||
|
||||
@@ -50,18 +51,18 @@ func (t *Transfer) In(q *Msg, a string) (env chan *Envelope, err error) {
|
||||
env = make(chan *Envelope)
|
||||
go func() {
|
||||
if q.Question[0].Qtype == TypeAXFR {
|
||||
go t.inAxfr(q.Id, env)
|
||||
go t.inAxfr(q, env)
|
||||
return
|
||||
}
|
||||
if q.Question[0].Qtype == TypeIXFR {
|
||||
go t.inIxfr(q.Id, env)
|
||||
go t.inIxfr(q, env)
|
||||
return
|
||||
}
|
||||
}()
|
||||
return env, nil
|
||||
}
|
||||
|
||||
func (t *Transfer) inAxfr(id uint16, c chan *Envelope) {
|
||||
func (t *Transfer) inAxfr(q *Msg, c chan *Envelope) {
|
||||
first := true
|
||||
defer t.Close()
|
||||
defer close(c)
|
||||
@@ -76,11 +77,15 @@ func (t *Transfer) inAxfr(id uint16, c chan *Envelope) {
|
||||
c <- &Envelope{nil, err}
|
||||
return
|
||||
}
|
||||
if id != in.Id {
|
||||
if q.Id != in.Id {
|
||||
c <- &Envelope{in.Answer, ErrId}
|
||||
return
|
||||
}
|
||||
if first {
|
||||
if in.Rcode != RcodeSuccess {
|
||||
c <- &Envelope{in.Answer, &Error{err: fmt.Sprintf(errXFR, in.Rcode)}}
|
||||
return
|
||||
}
|
||||
if !isSOAFirst(in) {
|
||||
c <- &Envelope{in.Answer, ErrSoa}
|
||||
return
|
||||
@@ -105,9 +110,11 @@ func (t *Transfer) inAxfr(id uint16, c chan *Envelope) {
|
||||
}
|
||||
}
|
||||
|
||||
func (t *Transfer) inIxfr(id uint16, c chan *Envelope) {
|
||||
func (t *Transfer) inIxfr(q *Msg, c chan *Envelope) {
|
||||
serial := uint32(0) // The first serial seen is the current server serial
|
||||
first := true
|
||||
axfr := true
|
||||
n := 0
|
||||
qser := q.Ns[0].(*SOA).Serial
|
||||
defer t.Close()
|
||||
defer close(c)
|
||||
timeout := dnsTimeout
|
||||
@@ -121,17 +128,15 @@ func (t *Transfer) inIxfr(id uint16, c chan *Envelope) {
|
||||
c <- &Envelope{nil, err}
|
||||
return
|
||||
}
|
||||
if id != in.Id {
|
||||
if q.Id != in.Id {
|
||||
c <- &Envelope{in.Answer, ErrId}
|
||||
return
|
||||
}
|
||||
if first {
|
||||
// A single SOA RR signals "no changes"
|
||||
if len(in.Answer) == 1 && isSOAFirst(in) {
|
||||
c <- &Envelope{in.Answer, nil}
|
||||
return
|
||||
}
|
||||
|
||||
if in.Rcode != RcodeSuccess {
|
||||
c <- &Envelope{in.Answer, &Error{err: fmt.Sprintf(errXFR, in.Rcode)}}
|
||||
return
|
||||
}
|
||||
if n == 0 {
|
||||
// Check if the returned answer is ok
|
||||
if !isSOAFirst(in) {
|
||||
c <- &Envelope{in.Answer, ErrSoa}
|
||||
@@ -139,21 +144,30 @@ func (t *Transfer) inIxfr(id uint16, c chan *Envelope) {
|
||||
}
|
||||
// This serial is important
|
||||
serial = in.Answer[0].(*SOA).Serial
|
||||
first = !first
|
||||
// Check if there are no changes in zone
|
||||
if qser >= serial {
|
||||
c <- &Envelope{in.Answer, nil}
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
// Now we need to check each message for SOA records, to see what we need to do
|
||||
if !first {
|
||||
t.tsigTimersOnly = true
|
||||
// If the last record in the IXFR contains the servers' SOA, we should quit
|
||||
if v, ok := in.Answer[len(in.Answer)-1].(*SOA); ok {
|
||||
t.tsigTimersOnly = true
|
||||
for _, rr := range in.Answer {
|
||||
if v, ok := rr.(*SOA); ok {
|
||||
if v.Serial == serial {
|
||||
c <- &Envelope{in.Answer, nil}
|
||||
return
|
||||
n++
|
||||
// quit if it's a full axfr or the the servers' SOA is repeated the third time
|
||||
if axfr && n == 2 || n == 3 {
|
||||
c <- &Envelope{in.Answer, nil}
|
||||
return
|
||||
}
|
||||
} else if axfr {
|
||||
// it's an ixfr
|
||||
axfr = false
|
||||
}
|
||||
}
|
||||
c <- &Envelope{in.Answer, nil}
|
||||
}
|
||||
c <- &Envelope{in.Answer, nil}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -242,3 +256,5 @@ func isSOALast(in *Msg) bool {
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
const errXFR = "bad xfr rcode: %d"
|
||||
|
||||
118
vendor/github.com/miekg/dns/zcompress.go
generated
vendored
Normal file
118
vendor/github.com/miekg/dns/zcompress.go
generated
vendored
Normal file
@@ -0,0 +1,118 @@
|
||||
// Code generated by "go run compress_generate.go"; DO NOT EDIT.
|
||||
|
||||
package dns
|
||||
|
||||
func compressionLenHelperType(c map[string]int, r RR) {
|
||||
switch x := r.(type) {
|
||||
case *AFSDB:
|
||||
compressionLenHelper(c, x.Hostname)
|
||||
case *CNAME:
|
||||
compressionLenHelper(c, x.Target)
|
||||
case *DNAME:
|
||||
compressionLenHelper(c, x.Target)
|
||||
case *HIP:
|
||||
for i := range x.RendezvousServers {
|
||||
compressionLenHelper(c, x.RendezvousServers[i])
|
||||
}
|
||||
case *KX:
|
||||
compressionLenHelper(c, x.Exchanger)
|
||||
case *LP:
|
||||
compressionLenHelper(c, x.Fqdn)
|
||||
case *MB:
|
||||
compressionLenHelper(c, x.Mb)
|
||||
case *MD:
|
||||
compressionLenHelper(c, x.Md)
|
||||
case *MF:
|
||||
compressionLenHelper(c, x.Mf)
|
||||
case *MG:
|
||||
compressionLenHelper(c, x.Mg)
|
||||
case *MINFO:
|
||||
compressionLenHelper(c, x.Rmail)
|
||||
compressionLenHelper(c, x.Email)
|
||||
case *MR:
|
||||
compressionLenHelper(c, x.Mr)
|
||||
case *MX:
|
||||
compressionLenHelper(c, x.Mx)
|
||||
case *NAPTR:
|
||||
compressionLenHelper(c, x.Replacement)
|
||||
case *NS:
|
||||
compressionLenHelper(c, x.Ns)
|
||||
case *NSAPPTR:
|
||||
compressionLenHelper(c, x.Ptr)
|
||||
case *NSEC:
|
||||
compressionLenHelper(c, x.NextDomain)
|
||||
case *PTR:
|
||||
compressionLenHelper(c, x.Ptr)
|
||||
case *PX:
|
||||
compressionLenHelper(c, x.Map822)
|
||||
compressionLenHelper(c, x.Mapx400)
|
||||
case *RP:
|
||||
compressionLenHelper(c, x.Mbox)
|
||||
compressionLenHelper(c, x.Txt)
|
||||
case *RRSIG:
|
||||
compressionLenHelper(c, x.SignerName)
|
||||
case *RT:
|
||||
compressionLenHelper(c, x.Host)
|
||||
case *SIG:
|
||||
compressionLenHelper(c, x.SignerName)
|
||||
case *SOA:
|
||||
compressionLenHelper(c, x.Ns)
|
||||
compressionLenHelper(c, x.Mbox)
|
||||
case *SRV:
|
||||
compressionLenHelper(c, x.Target)
|
||||
case *TALINK:
|
||||
compressionLenHelper(c, x.PreviousName)
|
||||
compressionLenHelper(c, x.NextName)
|
||||
case *TKEY:
|
||||
compressionLenHelper(c, x.Algorithm)
|
||||
case *TSIG:
|
||||
compressionLenHelper(c, x.Algorithm)
|
||||
}
|
||||
}
|
||||
|
||||
func compressionLenSearchType(c map[string]int, r RR) (int, bool) {
|
||||
switch x := r.(type) {
|
||||
case *AFSDB:
|
||||
k1, ok1 := compressionLenSearch(c, x.Hostname)
|
||||
return k1, ok1
|
||||
case *CNAME:
|
||||
k1, ok1 := compressionLenSearch(c, x.Target)
|
||||
return k1, ok1
|
||||
case *MB:
|
||||
k1, ok1 := compressionLenSearch(c, x.Mb)
|
||||
return k1, ok1
|
||||
case *MD:
|
||||
k1, ok1 := compressionLenSearch(c, x.Md)
|
||||
return k1, ok1
|
||||
case *MF:
|
||||
k1, ok1 := compressionLenSearch(c, x.Mf)
|
||||
return k1, ok1
|
||||
case *MG:
|
||||
k1, ok1 := compressionLenSearch(c, x.Mg)
|
||||
return k1, ok1
|
||||
case *MINFO:
|
||||
k1, ok1 := compressionLenSearch(c, x.Rmail)
|
||||
k2, ok2 := compressionLenSearch(c, x.Email)
|
||||
return k1 + k2, ok1 && ok2
|
||||
case *MR:
|
||||
k1, ok1 := compressionLenSearch(c, x.Mr)
|
||||
return k1, ok1
|
||||
case *MX:
|
||||
k1, ok1 := compressionLenSearch(c, x.Mx)
|
||||
return k1, ok1
|
||||
case *NS:
|
||||
k1, ok1 := compressionLenSearch(c, x.Ns)
|
||||
return k1, ok1
|
||||
case *PTR:
|
||||
k1, ok1 := compressionLenSearch(c, x.Ptr)
|
||||
return k1, ok1
|
||||
case *RT:
|
||||
k1, ok1 := compressionLenSearch(c, x.Host)
|
||||
return k1, ok1
|
||||
case *SOA:
|
||||
k1, ok1 := compressionLenSearch(c, x.Ns)
|
||||
k2, ok2 := compressionLenSearch(c, x.Mbox)
|
||||
return k1 + k2, ok1 && ok2
|
||||
}
|
||||
return 0, false
|
||||
}
|
||||
120
vendor/github.com/miekg/dns/zmsg.go
generated
vendored
120
vendor/github.com/miekg/dns/zmsg.go
generated
vendored
@@ -1,5 +1,4 @@
|
||||
// *** DO NOT MODIFY ***
|
||||
// AUTOGENERATED BY go generate from msg_generate.go
|
||||
// Code generated by "go run msg_generate.go"; DO NOT EDIT.
|
||||
|
||||
package dns
|
||||
|
||||
@@ -61,6 +60,20 @@ func (rr *ANY) pack(msg []byte, off int, compression map[string]int, compress bo
|
||||
return off, nil
|
||||
}
|
||||
|
||||
func (rr *AVC) pack(msg []byte, off int, compression map[string]int, compress bool) (int, error) {
|
||||
off, err := rr.Hdr.pack(msg, off, compression, compress)
|
||||
if err != nil {
|
||||
return off, err
|
||||
}
|
||||
headerEnd := off
|
||||
off, err = packStringTxt(rr.Txt, msg, off)
|
||||
if err != nil {
|
||||
return off, err
|
||||
}
|
||||
rr.Header().Rdlength = uint16(off - headerEnd)
|
||||
return off, nil
|
||||
}
|
||||
|
||||
func (rr *CAA) pack(msg []byte, off int, compression map[string]int, compress bool) (int, error) {
|
||||
off, err := rr.Hdr.pack(msg, off, compression, compress)
|
||||
if err != nil {
|
||||
@@ -175,6 +188,28 @@ func (rr *CNAME) pack(msg []byte, off int, compression map[string]int, compress
|
||||
return off, nil
|
||||
}
|
||||
|
||||
func (rr *CSYNC) pack(msg []byte, off int, compression map[string]int, compress bool) (int, error) {
|
||||
off, err := rr.Hdr.pack(msg, off, compression, compress)
|
||||
if err != nil {
|
||||
return off, err
|
||||
}
|
||||
headerEnd := off
|
||||
off, err = packUint32(rr.Serial, msg, off)
|
||||
if err != nil {
|
||||
return off, err
|
||||
}
|
||||
off, err = packUint16(rr.Flags, msg, off)
|
||||
if err != nil {
|
||||
return off, err
|
||||
}
|
||||
off, err = packDataNsec(rr.TypeBitMap, msg, off)
|
||||
if err != nil {
|
||||
return off, err
|
||||
}
|
||||
rr.Header().Rdlength = uint16(off - headerEnd)
|
||||
return off, nil
|
||||
}
|
||||
|
||||
func (rr *DHCID) pack(msg []byte, off int, compression map[string]int, compress bool) (int, error) {
|
||||
off, err := rr.Hdr.pack(msg, off, compression, compress)
|
||||
if err != nil {
|
||||
@@ -801,10 +836,12 @@ func (rr *NSEC3) pack(msg []byte, off int, compression map[string]int, compress
|
||||
if err != nil {
|
||||
return off, err
|
||||
}
|
||||
if rr.Salt == "-" { /* do nothing, empty salt */
|
||||
}
|
||||
if err != nil {
|
||||
return off, err
|
||||
// Only pack salt if value is not "-", i.e. empty
|
||||
if rr.Salt != "-" {
|
||||
off, err = packStringHex(rr.Salt, msg, off)
|
||||
if err != nil {
|
||||
return off, err
|
||||
}
|
||||
}
|
||||
off, err = packUint8(rr.HashLength, msg, off)
|
||||
if err != nil {
|
||||
@@ -844,10 +881,12 @@ func (rr *NSEC3PARAM) pack(msg []byte, off int, compression map[string]int, comp
|
||||
if err != nil {
|
||||
return off, err
|
||||
}
|
||||
if rr.Salt == "-" { /* do nothing, empty salt */
|
||||
}
|
||||
if err != nil {
|
||||
return off, err
|
||||
// Only pack salt if value is not "-", i.e. empty
|
||||
if rr.Salt != "-" {
|
||||
off, err = packStringHex(rr.Salt, msg, off)
|
||||
if err != nil {
|
||||
return off, err
|
||||
}
|
||||
}
|
||||
rr.Header().Rdlength = uint16(off - headerEnd)
|
||||
return off, nil
|
||||
@@ -1285,7 +1324,7 @@ func (rr *TKEY) pack(msg []byte, off int, compression map[string]int, compress b
|
||||
if err != nil {
|
||||
return off, err
|
||||
}
|
||||
off, err = packString(rr.Key, msg, off)
|
||||
off, err = packStringHex(rr.Key, msg, off)
|
||||
if err != nil {
|
||||
return off, err
|
||||
}
|
||||
@@ -1293,7 +1332,7 @@ func (rr *TKEY) pack(msg []byte, off int, compression map[string]int, compress b
|
||||
if err != nil {
|
||||
return off, err
|
||||
}
|
||||
off, err = packString(rr.OtherData, msg, off)
|
||||
off, err = packStringHex(rr.OtherData, msg, off)
|
||||
if err != nil {
|
||||
return off, err
|
||||
}
|
||||
@@ -1524,6 +1563,23 @@ func unpackANY(h RR_Header, msg []byte, off int) (RR, int, error) {
|
||||
return rr, off, err
|
||||
}
|
||||
|
||||
func unpackAVC(h RR_Header, msg []byte, off int) (RR, int, error) {
|
||||
rr := new(AVC)
|
||||
rr.Hdr = h
|
||||
if noRdata(h) {
|
||||
return rr, off, nil
|
||||
}
|
||||
var err error
|
||||
rdStart := off
|
||||
_ = rdStart
|
||||
|
||||
rr.Txt, off, err = unpackStringTxt(msg, off)
|
||||
if err != nil {
|
||||
return rr, off, err
|
||||
}
|
||||
return rr, off, err
|
||||
}
|
||||
|
||||
func unpackCAA(h RR_Header, msg []byte, off int) (RR, int, error) {
|
||||
rr := new(CAA)
|
||||
rr.Hdr = h
|
||||
@@ -1686,6 +1742,37 @@ func unpackCNAME(h RR_Header, msg []byte, off int) (RR, int, error) {
|
||||
return rr, off, err
|
||||
}
|
||||
|
||||
func unpackCSYNC(h RR_Header, msg []byte, off int) (RR, int, error) {
|
||||
rr := new(CSYNC)
|
||||
rr.Hdr = h
|
||||
if noRdata(h) {
|
||||
return rr, off, nil
|
||||
}
|
||||
var err error
|
||||
rdStart := off
|
||||
_ = rdStart
|
||||
|
||||
rr.Serial, off, err = unpackUint32(msg, off)
|
||||
if err != nil {
|
||||
return rr, off, err
|
||||
}
|
||||
if off == len(msg) {
|
||||
return rr, off, nil
|
||||
}
|
||||
rr.Flags, off, err = unpackUint16(msg, off)
|
||||
if err != nil {
|
||||
return rr, off, err
|
||||
}
|
||||
if off == len(msg) {
|
||||
return rr, off, nil
|
||||
}
|
||||
rr.TypeBitMap, off, err = unpackDataNsec(msg, off)
|
||||
if err != nil {
|
||||
return rr, off, err
|
||||
}
|
||||
return rr, off, err
|
||||
}
|
||||
|
||||
func unpackDHCID(h RR_Header, msg []byte, off int) (RR, int, error) {
|
||||
rr := new(DHCID)
|
||||
rr.Hdr = h
|
||||
@@ -3230,13 +3317,10 @@ func unpackTKEY(h RR_Header, msg []byte, off int) (RR, int, error) {
|
||||
if off == len(msg) {
|
||||
return rr, off, nil
|
||||
}
|
||||
rr.Key, off, err = unpackString(msg, off)
|
||||
rr.Key, off, err = unpackStringHex(msg, off, off+int(rr.KeySize))
|
||||
if err != nil {
|
||||
return rr, off, err
|
||||
}
|
||||
if off == len(msg) {
|
||||
return rr, off, nil
|
||||
}
|
||||
rr.OtherLen, off, err = unpackUint16(msg, off)
|
||||
if err != nil {
|
||||
return rr, off, err
|
||||
@@ -3244,7 +3328,7 @@ func unpackTKEY(h RR_Header, msg []byte, off int) (RR, int, error) {
|
||||
if off == len(msg) {
|
||||
return rr, off, nil
|
||||
}
|
||||
rr.OtherData, off, err = unpackString(msg, off)
|
||||
rr.OtherData, off, err = unpackStringHex(msg, off, off+int(rr.OtherLen))
|
||||
if err != nil {
|
||||
return rr, off, err
|
||||
}
|
||||
@@ -3463,11 +3547,13 @@ var typeToUnpack = map[uint16]func(RR_Header, []byte, int) (RR, int, error){
|
||||
TypeAAAA: unpackAAAA,
|
||||
TypeAFSDB: unpackAFSDB,
|
||||
TypeANY: unpackANY,
|
||||
TypeAVC: unpackAVC,
|
||||
TypeCAA: unpackCAA,
|
||||
TypeCDNSKEY: unpackCDNSKEY,
|
||||
TypeCDS: unpackCDS,
|
||||
TypeCERT: unpackCERT,
|
||||
TypeCNAME: unpackCNAME,
|
||||
TypeCSYNC: unpackCSYNC,
|
||||
TypeDHCID: unpackDHCID,
|
||||
TypeDLV: unpackDLV,
|
||||
TypeDNAME: unpackDNAME,
|
||||
|
||||
97
vendor/github.com/miekg/dns/ztypes.go
generated
vendored
97
vendor/github.com/miekg/dns/ztypes.go
generated
vendored
@@ -1,5 +1,4 @@
|
||||
// *** DO NOT MODIFY ***
|
||||
// AUTOGENERATED BY go generate from type_generate.go
|
||||
// Code generated by "go run types_generate.go"; DO NOT EDIT.
|
||||
|
||||
package dns
|
||||
|
||||
@@ -14,11 +13,13 @@ var TypeToRR = map[uint16]func() RR{
|
||||
TypeAAAA: func() RR { return new(AAAA) },
|
||||
TypeAFSDB: func() RR { return new(AFSDB) },
|
||||
TypeANY: func() RR { return new(ANY) },
|
||||
TypeAVC: func() RR { return new(AVC) },
|
||||
TypeCAA: func() RR { return new(CAA) },
|
||||
TypeCDNSKEY: func() RR { return new(CDNSKEY) },
|
||||
TypeCDS: func() RR { return new(CDS) },
|
||||
TypeCERT: func() RR { return new(CERT) },
|
||||
TypeCNAME: func() RR { return new(CNAME) },
|
||||
TypeCSYNC: func() RR { return new(CSYNC) },
|
||||
TypeDHCID: func() RR { return new(DHCID) },
|
||||
TypeDLV: func() RR { return new(DLV) },
|
||||
TypeDNAME: func() RR { return new(DNAME) },
|
||||
@@ -86,12 +87,14 @@ var TypeToString = map[uint16]string{
|
||||
TypeAFSDB: "AFSDB",
|
||||
TypeANY: "ANY",
|
||||
TypeATMA: "ATMA",
|
||||
TypeAVC: "AVC",
|
||||
TypeAXFR: "AXFR",
|
||||
TypeCAA: "CAA",
|
||||
TypeCDNSKEY: "CDNSKEY",
|
||||
TypeCDS: "CDS",
|
||||
TypeCERT: "CERT",
|
||||
TypeCNAME: "CNAME",
|
||||
TypeCSYNC: "CSYNC",
|
||||
TypeDHCID: "DHCID",
|
||||
TypeDLV: "DLV",
|
||||
TypeDNAME: "DNAME",
|
||||
@@ -161,16 +164,17 @@ var TypeToString = map[uint16]string{
|
||||
TypeNSAPPTR: "NSAP-PTR",
|
||||
}
|
||||
|
||||
// Header() functions
|
||||
func (rr *A) Header() *RR_Header { return &rr.Hdr }
|
||||
func (rr *AAAA) Header() *RR_Header { return &rr.Hdr }
|
||||
func (rr *AFSDB) Header() *RR_Header { return &rr.Hdr }
|
||||
func (rr *ANY) Header() *RR_Header { return &rr.Hdr }
|
||||
func (rr *AVC) Header() *RR_Header { return &rr.Hdr }
|
||||
func (rr *CAA) Header() *RR_Header { return &rr.Hdr }
|
||||
func (rr *CDNSKEY) Header() *RR_Header { return &rr.Hdr }
|
||||
func (rr *CDS) Header() *RR_Header { return &rr.Hdr }
|
||||
func (rr *CERT) Header() *RR_Header { return &rr.Hdr }
|
||||
func (rr *CNAME) Header() *RR_Header { return &rr.Hdr }
|
||||
func (rr *CSYNC) Header() *RR_Header { return &rr.Hdr }
|
||||
func (rr *DHCID) Header() *RR_Header { return &rr.Hdr }
|
||||
func (rr *DLV) Header() *RR_Header { return &rr.Hdr }
|
||||
func (rr *DNAME) Header() *RR_Header { return &rr.Hdr }
|
||||
@@ -252,9 +256,16 @@ func (rr *ANY) len() int {
|
||||
l := rr.Hdr.len()
|
||||
return l
|
||||
}
|
||||
func (rr *AVC) len() int {
|
||||
l := rr.Hdr.len()
|
||||
for _, x := range rr.Txt {
|
||||
l += len(x) + 1
|
||||
}
|
||||
return l
|
||||
}
|
||||
func (rr *CAA) len() int {
|
||||
l := rr.Hdr.len()
|
||||
l += 1 // Flag
|
||||
l++ // Flag
|
||||
l += len(rr.Tag) + 1
|
||||
l += len(rr.Value)
|
||||
return l
|
||||
@@ -263,7 +274,7 @@ func (rr *CERT) len() int {
|
||||
l := rr.Hdr.len()
|
||||
l += 2 // Type
|
||||
l += 2 // KeyTag
|
||||
l += 1 // Algorithm
|
||||
l++ // Algorithm
|
||||
l += base64.StdEncoding.DecodedLen(len(rr.Certificate))
|
||||
return l
|
||||
}
|
||||
@@ -285,16 +296,16 @@ func (rr *DNAME) len() int {
|
||||
func (rr *DNSKEY) len() int {
|
||||
l := rr.Hdr.len()
|
||||
l += 2 // Flags
|
||||
l += 1 // Protocol
|
||||
l += 1 // Algorithm
|
||||
l++ // Protocol
|
||||
l++ // Algorithm
|
||||
l += base64.StdEncoding.DecodedLen(len(rr.PublicKey))
|
||||
return l
|
||||
}
|
||||
func (rr *DS) len() int {
|
||||
l := rr.Hdr.len()
|
||||
l += 2 // KeyTag
|
||||
l += 1 // Algorithm
|
||||
l += 1 // DigestType
|
||||
l++ // Algorithm
|
||||
l++ // DigestType
|
||||
l += len(rr.Digest)/2 + 1
|
||||
return l
|
||||
}
|
||||
@@ -333,10 +344,10 @@ func (rr *HINFO) len() int {
|
||||
}
|
||||
func (rr *HIP) len() int {
|
||||
l := rr.Hdr.len()
|
||||
l += 1 // HitLength
|
||||
l += 1 // PublicKeyAlgorithm
|
||||
l++ // HitLength
|
||||
l++ // PublicKeyAlgorithm
|
||||
l += 2 // PublicKeyLength
|
||||
l += len(rr.Hit)/2 + 1
|
||||
l += len(rr.Hit) / 2
|
||||
l += base64.StdEncoding.DecodedLen(len(rr.PublicKey))
|
||||
for _, x := range rr.RendezvousServers {
|
||||
l += len(x) + 1
|
||||
@@ -363,10 +374,10 @@ func (rr *L64) len() int {
|
||||
}
|
||||
func (rr *LOC) len() int {
|
||||
l := rr.Hdr.len()
|
||||
l += 1 // Version
|
||||
l += 1 // Size
|
||||
l += 1 // HorizPre
|
||||
l += 1 // VertPre
|
||||
l++ // Version
|
||||
l++ // Size
|
||||
l++ // HorizPre
|
||||
l++ // VertPre
|
||||
l += 4 // Latitude
|
||||
l += 4 // Longitude
|
||||
l += 4 // Altitude
|
||||
@@ -455,11 +466,11 @@ func (rr *NSAPPTR) len() int {
|
||||
}
|
||||
func (rr *NSEC3PARAM) len() int {
|
||||
l := rr.Hdr.len()
|
||||
l += 1 // Hash
|
||||
l += 1 // Flags
|
||||
l++ // Hash
|
||||
l++ // Flags
|
||||
l += 2 // Iterations
|
||||
l += 1 // SaltLength
|
||||
l += len(rr.Salt)/2 + 1
|
||||
l++ // SaltLength
|
||||
l += len(rr.Salt) / 2
|
||||
return l
|
||||
}
|
||||
func (rr *OPENPGPKEY) len() int {
|
||||
@@ -487,8 +498,8 @@ func (rr *RFC3597) len() int {
|
||||
func (rr *RKEY) len() int {
|
||||
l := rr.Hdr.len()
|
||||
l += 2 // Flags
|
||||
l += 1 // Protocol
|
||||
l += 1 // Algorithm
|
||||
l++ // Protocol
|
||||
l++ // Algorithm
|
||||
l += base64.StdEncoding.DecodedLen(len(rr.PublicKey))
|
||||
return l
|
||||
}
|
||||
@@ -501,8 +512,8 @@ func (rr *RP) len() int {
|
||||
func (rr *RRSIG) len() int {
|
||||
l := rr.Hdr.len()
|
||||
l += 2 // TypeCovered
|
||||
l += 1 // Algorithm
|
||||
l += 1 // Labels
|
||||
l++ // Algorithm
|
||||
l++ // Labels
|
||||
l += 4 // OrigTtl
|
||||
l += 4 // Expiration
|
||||
l += 4 // Inception
|
||||
@@ -519,9 +530,9 @@ func (rr *RT) len() int {
|
||||
}
|
||||
func (rr *SMIMEA) len() int {
|
||||
l := rr.Hdr.len()
|
||||
l += 1 // Usage
|
||||
l += 1 // Selector
|
||||
l += 1 // MatchingType
|
||||
l++ // Usage
|
||||
l++ // Selector
|
||||
l++ // MatchingType
|
||||
l += len(rr.Certificate)/2 + 1
|
||||
return l
|
||||
}
|
||||
@@ -553,16 +564,16 @@ func (rr *SRV) len() int {
|
||||
}
|
||||
func (rr *SSHFP) len() int {
|
||||
l := rr.Hdr.len()
|
||||
l += 1 // Algorithm
|
||||
l += 1 // Type
|
||||
l++ // Algorithm
|
||||
l++ // Type
|
||||
l += len(rr.FingerPrint)/2 + 1
|
||||
return l
|
||||
}
|
||||
func (rr *TA) len() int {
|
||||
l := rr.Hdr.len()
|
||||
l += 2 // KeyTag
|
||||
l += 1 // Algorithm
|
||||
l += 1 // DigestType
|
||||
l++ // Algorithm
|
||||
l++ // DigestType
|
||||
l += len(rr.Digest)/2 + 1
|
||||
return l
|
||||
}
|
||||
@@ -580,16 +591,16 @@ func (rr *TKEY) len() int {
|
||||
l += 2 // Mode
|
||||
l += 2 // Error
|
||||
l += 2 // KeySize
|
||||
l += len(rr.Key) + 1
|
||||
l += len(rr.Key) / 2
|
||||
l += 2 // OtherLen
|
||||
l += len(rr.OtherData) + 1
|
||||
l += len(rr.OtherData) / 2
|
||||
return l
|
||||
}
|
||||
func (rr *TLSA) len() int {
|
||||
l := rr.Hdr.len()
|
||||
l += 1 // Usage
|
||||
l += 1 // Selector
|
||||
l += 1 // MatchingType
|
||||
l++ // Usage
|
||||
l++ // Selector
|
||||
l++ // MatchingType
|
||||
l += len(rr.Certificate)/2 + 1
|
||||
return l
|
||||
}
|
||||
@@ -599,11 +610,11 @@ func (rr *TSIG) len() int {
|
||||
l += 6 // TimeSigned
|
||||
l += 2 // Fudge
|
||||
l += 2 // MACSize
|
||||
l += len(rr.MAC)/2 + 1
|
||||
l += len(rr.MAC) / 2
|
||||
l += 2 // OrigId
|
||||
l += 2 // Error
|
||||
l += 2 // OtherLen
|
||||
l += len(rr.OtherData)/2 + 1
|
||||
l += len(rr.OtherData) / 2
|
||||
return l
|
||||
}
|
||||
func (rr *TXT) len() int {
|
||||
@@ -649,6 +660,11 @@ func (rr *AFSDB) copy() RR {
|
||||
func (rr *ANY) copy() RR {
|
||||
return &ANY{*rr.Hdr.copyHeader()}
|
||||
}
|
||||
func (rr *AVC) copy() RR {
|
||||
Txt := make([]string, len(rr.Txt))
|
||||
copy(Txt, rr.Txt)
|
||||
return &AVC{*rr.Hdr.copyHeader(), Txt}
|
||||
}
|
||||
func (rr *CAA) copy() RR {
|
||||
return &CAA{*rr.Hdr.copyHeader(), rr.Flag, rr.Tag, rr.Value}
|
||||
}
|
||||
@@ -658,6 +674,11 @@ func (rr *CERT) copy() RR {
|
||||
func (rr *CNAME) copy() RR {
|
||||
return &CNAME{*rr.Hdr.copyHeader(), rr.Target}
|
||||
}
|
||||
func (rr *CSYNC) copy() RR {
|
||||
TypeBitMap := make([]uint16, len(rr.TypeBitMap))
|
||||
copy(TypeBitMap, rr.TypeBitMap)
|
||||
return &CSYNC{*rr.Hdr.copyHeader(), rr.Serial, rr.Flags, TypeBitMap}
|
||||
}
|
||||
func (rr *DHCID) copy() RR {
|
||||
return &DHCID{*rr.Hdr.copyHeader(), rr.Digest}
|
||||
}
|
||||
|
||||
7
vendor/golang.org/x/crypto/bcrypt/bcrypt.go
generated
vendored
7
vendor/golang.org/x/crypto/bcrypt/bcrypt.go
generated
vendored
@@ -12,9 +12,10 @@ import (
|
||||
"crypto/subtle"
|
||||
"errors"
|
||||
"fmt"
|
||||
"golang.org/x/crypto/blowfish"
|
||||
"io"
|
||||
"strconv"
|
||||
|
||||
"golang.org/x/crypto/blowfish"
|
||||
)
|
||||
|
||||
const (
|
||||
@@ -205,7 +206,6 @@ func bcrypt(password []byte, cost int, salt []byte) ([]byte, error) {
|
||||
}
|
||||
|
||||
func expensiveBlowfishSetup(key []byte, cost uint32, salt []byte) (*blowfish.Cipher, error) {
|
||||
|
||||
csalt, err := base64Decode(salt)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
@@ -213,7 +213,8 @@ func expensiveBlowfishSetup(key []byte, cost uint32, salt []byte) (*blowfish.Cip
|
||||
|
||||
// Bug compatibility with C bcrypt implementations. They use the trailing
|
||||
// NULL in the key string during expansion.
|
||||
ckey := append(key, 0)
|
||||
// We copy the key to prevent changing the underlying array.
|
||||
ckey := append(key[:len(key):len(key)], 0)
|
||||
|
||||
c, err := blowfish.NewSaltedCipher(ckey, csalt)
|
||||
if err != nil {
|
||||
|
||||
4
vendor/golang.org/x/crypto/blowfish/cipher.go
generated
vendored
4
vendor/golang.org/x/crypto/blowfish/cipher.go
generated
vendored
@@ -6,7 +6,7 @@
|
||||
package blowfish // import "golang.org/x/crypto/blowfish"
|
||||
|
||||
// The code is a port of Bruce Schneier's C implementation.
|
||||
// See http://www.schneier.com/blowfish.html.
|
||||
// See https://www.schneier.com/blowfish.html.
|
||||
|
||||
import "strconv"
|
||||
|
||||
@@ -39,7 +39,7 @@ func NewCipher(key []byte) (*Cipher, error) {
|
||||
|
||||
// NewSaltedCipher creates a returns a Cipher that folds a salt into its key
|
||||
// schedule. For most purposes, NewCipher, instead of NewSaltedCipher, is
|
||||
// sufficient and desirable. For bcrypt compatiblity, the key can be over 56
|
||||
// sufficient and desirable. For bcrypt compatibility, the key can be over 56
|
||||
// bytes.
|
||||
func NewSaltedCipher(key, salt []byte) (*Cipher, error) {
|
||||
if len(salt) == 0 {
|
||||
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user