From ba99fbe39050a11f9e62d398321c7a32f7842324 Mon Sep 17 00:00:00 2001 From: Jean-Baptiste Doumenjou Date: Tue, 16 Oct 2018 11:00:04 +0200 Subject: [PATCH 01/29] Fix certificate insertion loop to keep valid certificate and ignore the bad one --- integration/fixtures/https/dynamic_https.toml | 15 +++++++++++++++ server/server_configuration.go | 11 ++++------- tls/certificate.go | 11 +++++++++++ tls/tls.go | 14 +++++--------- 4 files changed, 35 insertions(+), 16 deletions(-) diff --git a/integration/fixtures/https/dynamic_https.toml b/integration/fixtures/https/dynamic_https.toml index a97bee293..264552b02 100644 --- a/integration/fixtures/https/dynamic_https.toml +++ b/integration/fixtures/https/dynamic_https.toml @@ -18,6 +18,21 @@ [frontends.frontend2.routes.test_2] rule = "Host:snitest.org" +[[tls]] +entryPoints = ["https"] + # bad certificates to validate the loop on the certificate appending + [tls.certificate] + # bad content + certFile = """-----BEGIN CERTIFICATE----- +MIIC/zCCAeegAwIBAgIJALAYHG/vGqWEMA0GCSqGSIb3DQEBBQUAMBYxFDASBgNV +-----END CERTIFICATE-----""" + # bad content + keyFile = """-----BEGIN RSA PRIVATE KEY----- +wihZ13e3i5UQEYuoRcH1RUd1wyYoBSKuQnsT2WwVZ1wlXSYaELAbQgaI9NtfBA0G +eRG3DaVpez4DQVupZDHMgxJUYqqKynUj6GD1YiaxGROj3TYCu6e7OxyhalhCllSu +w/X5M802XqzLjeec5zHoZDfknnAkgR9MsxZYmZPFaDyL6GOKUB8= +-----END RSA PRIVATE KEY-----""" + [[tls]] entryPoints = ["https"] [tls.certificate] diff --git a/server/server_configuration.go b/server/server_configuration.go index 164048bcd..3d224e1c4 100644 --- a/server/server_configuration.go +++ b/server/server_configuration.go @@ -118,8 +118,7 @@ func (s *Server) loadConfig(configurations types.Configurations, globalConfigura // Get new certificates list sorted per entrypoints // Update certificates - entryPointsCertificates, err := s.loadHTTPSConfiguration(configurations, globalConfiguration.DefaultEntryPoints) - // FIXME error management + entryPointsCertificates := s.loadHTTPSConfiguration(configurations, globalConfiguration.DefaultEntryPoints) // Sort routes and update certificates for serverEntryPointName, serverEntryPoint := range serverEntryPoints { @@ -558,17 +557,15 @@ func (s *Server) postLoadConfiguration() { } // loadHTTPSConfiguration add/delete HTTPS certificate managed dynamically -func (s *Server) loadHTTPSConfiguration(configurations types.Configurations, defaultEntryPoints configuration.DefaultEntryPoints) (map[string]map[string]*tls.Certificate, error) { +func (s *Server) loadHTTPSConfiguration(configurations types.Configurations, defaultEntryPoints configuration.DefaultEntryPoints) map[string]map[string]*tls.Certificate { newEPCertificates := make(map[string]map[string]*tls.Certificate) // Get all certificates for _, config := range configurations { if config.TLS != nil && len(config.TLS) > 0 { - if err := traefiktls.SortTLSPerEntryPoints(config.TLS, newEPCertificates, defaultEntryPoints); err != nil { - return nil, err - } + traefiktls.SortTLSPerEntryPoints(config.TLS, newEPCertificates, defaultEntryPoints) } } - return newEPCertificates, nil + return newEPCertificates } func (s *Server) buildServerEntryPoints() map[string]*serverEntryPoint { diff --git a/tls/certificate.go b/tls/certificate.go index f680c78f5..47b2a10eb 100644 --- a/tls/certificate.go +++ b/tls/certificate.go @@ -196,6 +196,17 @@ func (c *Certificate) AppendCertificates(certs map[string]map[string]*tls.Certif return err } +func (c *Certificate) getTruncatedCertificateName() string { + certName := c.CertFile.String() + + // Truncate certificate information only if it's a well formed certificate content with more than 50 characters + if !c.CertFile.IsPath() && strings.HasPrefix(certName, certificateHeader) && len(certName) > len(certificateHeader)+50 { + certName = strings.TrimPrefix(c.CertFile.String(), certificateHeader)[:50] + } + + return certName +} + // String is the method to format the flag's value, part of the flag.Value interface. // The String method's output will be used in diagnostics. func (c *Certificates) String() string { diff --git a/tls/tls.go b/tls/tls.go index 32a7583de..ea56dc8ab 100644 --- a/tls/tls.go +++ b/tls/tls.go @@ -80,27 +80,23 @@ func (r *FilesOrContents) Type() string { } // SortTLSPerEntryPoints converts TLS configuration sorted by Certificates into TLS configuration sorted by EntryPoints -func SortTLSPerEntryPoints(configurations []*Configuration, epConfiguration map[string]map[string]*tls.Certificate, defaultEntryPoints []string) error { +func SortTLSPerEntryPoints(configurations []*Configuration, epConfiguration map[string]map[string]*tls.Certificate, defaultEntryPoints []string) { if epConfiguration == nil { epConfiguration = make(map[string]map[string]*tls.Certificate) } for _, conf := range configurations { if conf.EntryPoints == nil || len(conf.EntryPoints) == 0 { if log.GetLevel() >= logrus.DebugLevel { - certName := conf.Certificate.CertFile.String() - // Truncate certificate information only if it's a well formed certificate content with more than 50 characters - if !conf.Certificate.CertFile.IsPath() && strings.HasPrefix(conf.Certificate.CertFile.String(), certificateHeader) && len(conf.Certificate.CertFile.String()) > len(certificateHeader)+50 { - certName = strings.TrimPrefix(conf.Certificate.CertFile.String(), certificateHeader)[:50] - } - log.Debugf("No entryPoint is defined to add the certificate %s, it will be added to the default entryPoints: %s", certName, strings.Join(defaultEntryPoints, ", ")) + log.Debugf("No entryPoint is defined to add the certificate %s, it will be added to the default entryPoints: %s", + conf.Certificate.getTruncatedCertificateName(), + strings.Join(defaultEntryPoints, ", ")) } conf.EntryPoints = append(conf.EntryPoints, defaultEntryPoints...) } for _, ep := range conf.EntryPoints { if err := conf.Certificate.AppendCertificates(epConfiguration, ep); err != nil { - return err + log.Errorf("Unable to append certificate %s to entrypoint %s: %v", conf.Certificate.getTruncatedCertificateName(), ep, err) } } } - return nil } From 70fa42aee0b04396592809f5b0d8fdf2736a1310 Mon Sep 17 00:00:00 2001 From: Michael Date: Tue, 16 Oct 2018 18:12:03 +0200 Subject: [PATCH 02/29] Improve maintainer documentation --- MAINTAINER.md | 61 ++++++++++++++++++++++++++------------------------- 1 file changed, 31 insertions(+), 30 deletions(-) diff --git a/MAINTAINER.md b/MAINTAINER.md index 92771a6a9..68c90160e 100644 --- a/MAINTAINER.md +++ b/MAINTAINER.md @@ -18,8 +18,8 @@ ## PR review process: * The status `needs-design-review` is only used in complex/heavy/tricky PRs. -* From `1` to `2`: 1 design LGTM in comment, by a senior maintainer, if needed. -* From `2` to `3`: 3 LGTM by any maintainer. +* From `1` to `2`: 1 comment that says “design LGTM” (by a senior maintainer). +* From `2` to `3`: 3 LGTM approvals by any maintainer. * If needed, a specific maintainer familiar with a particular domain can be requested for the review. We use [PRM](https://github.com/ldez/prm) to manage locally pull requests. @@ -34,20 +34,21 @@ We use [PRM](https://github.com/ldez/prm) to manage locally pull requests. The maintainer giving the final LGTM must add the `status/3-needs-merge` label to trigger the merge bot. By default, a squash-rebase merge will be carried out. -If you want to preserve commits you must add `bot/merge-method-rebase` before `status/3-needs-merge`. +To preserve commits, add `bot/merge-method-rebase` before `status/3-needs-merge`. -The status `status/4-merge-in-progress` is only for the bot. +The status `status/4-merge-in-progress` is only used by the bot. If the bot is not able to perform the merge, the label `bot/need-human-merge` is added. -In this case you must solve conflicts/CI/... and after you only need to remove `bot/need-human-merge`. +In such a situation, solve the conflicts/CI/... and then remove the label `bot/need-human-merge`. -A maintainer can add `bot/no-merge` on a PR if he want (temporarily) prevent a merge by the bot. +To prevent the bot from automatically merging a PR, add the label `bot/no-merge`. -`bot/light-review` can be used to decrease required LGTM from 3 to 1 when: +The label `bot/light-review` decreases the number of required LGTM from 3 to 1. -- vendor updates from previously reviewed PRs -- merges branches into master -- prepare release +This label is used when: +- Updating the vendors from previously reviewed PRs +- Merging branches into the master +- Preparing the release ### [Myrmica Bibikoffi](https://github.com/containous/bibikoffi/) @@ -68,7 +69,7 @@ A maintainer can add `bot/no-merge` on a PR if he want (temporarily) prevent a m ## Labels -If we open/look an issue/PR, we must add a `kind/*`, an `area/*` and a `status/*`. +A maintainer that looks at an issue/PR must define its `kind/*`, `area/*`, and `status/*`. ### Contributor @@ -80,19 +81,19 @@ If we open/look an issue/PR, we must add a `kind/*`, an `area/*` and a `status/* ### Kind * `kind/enhancement`: a new or improved feature. -* `kind/question`: It's a question. **(only for issue)** -* `kind/proposal`: proposal PR/issues need a public debate. - * _Proposal issues_ are design proposal that need to be refined with multiple contributors. +* `kind/question`: a question. **(only for issue)** +* `kind/proposal`: a proposal that needs to be discussed. + * _Proposal issues_ are design proposals * _Proposal PRs_ are technical prototypes that need to be refined with multiple contributors. -* `kind/bug/possible`: if we need to analyze to understand if it's a bug or not. **(only for issues)** -* `kind/bug/confirmed`: we are sure, it's a bug. **(only for issues)** -* `kind/bug/fix`: it's a bug fix. **(only for PR)** +* `kind/bug/possible`: a possible bug that needs analysis before it is confirmed or fixed. **(only for issues)** +* `kind/bug/confirmed`: a confirmed bug (reproducible). **(only for issues)** +* `kind/bug/fix`: a bug fix. **(only for PR)** ### Resolution -* `resolution/duplicate`: it's a duplicate issue/PR. -* `resolution/declined`: Rule #1 of open-source: no is temporary, yes is forever. +* `resolution/duplicate`: a duplicate issue/PR. +* `resolution/declined`: declined (Rule #1 of open-source: no is temporary, yes is forever). * `WIP`: Work In Progress. **(only for PR)** ### Platform @@ -105,10 +106,10 @@ If we open/look an issue/PR, we must add a `kind/*`, an `area/*` and a `status/* * `area/api`: Traefik API related. * `area/authentication`: Authentication related. * `area/cluster`: Traefik clustering related. -* `area/documentation`: regards improving/adding documentation. -* `area/infrastructure`: related to CI or Traefik building scripts. +* `area/documentation`: Documentation related. +* `area/infrastructure`: CI or Traefik building scripts related. * `area/healthcheck`: Health-check related. -* `area/logs`: Traefik logs related. +* `area/logs`: Logs related. * `area/middleware`: Middleware related. * `area/middleware/metrics`: Metrics related. (Prometheus, StatsD, ...) * `area/oxy`: Oxy related. @@ -132,23 +133,23 @@ If we open/look an issue/PR, we must add a `kind/*`, an `area/*` and a `status/* ### Priority -* `priority/P0`: needs hot fix. **(only for issue)** -* `priority/P1`: need to be fixed in next release. **(only for issue)** -* `priority/P2`: need to be fixed in the future. **(only for issue)** +* `priority/P0`: needs a hot fix. **(only for issue)** +* `priority/P1`: needs to be fixed the next release. **(only for issue)** +* `priority/P2`: needs to be fixed in the future. **(only for issue)** * `priority/P3`: maybe. **(only for issue)** ### PR size * `size/S`: small PR. **(only for PR)** _[bot only]_ * `size/M`: medium PR. **(only for PR)** _[bot only]_ -* `size/L`: Large PR. **(only for PR)** _[bot only]_ +* `size/L`: large PR. **(only for PR)** _[bot only]_ ### Status - Workflow The `status/*` labels represent the desired state in the workflow. -* `status/0-needs-triage`: all new issue or PR have this status. _[bot only]_ -* `status/1-needs-design-review`: need a design review. **(only for PR)** -* `status/2-needs-review`: need a code/documentation review. **(only for PR)** +* `status/0-needs-triage`: all the new issues and PRs have this status. _[bot only]_ +* `status/1-needs-design-review`: needs a design review. **(only for PR)** +* `status/2-needs-review`: needs a code/documentation review. **(only for PR)** * `status/3-needs-merge`: ready to merge. **(only for PR)** -* `status/4-merge-in-progress`: merge in progress. _[bot only]_ +* `status/4-merge-in-progress`: merge is in progress. _[bot only]_ From 95d86d84b456fbf104aa8f567cd86013c4213c86 Mon Sep 17 00:00:00 2001 From: SALLEYRON Julien Date: Wed, 17 Oct 2018 14:22:03 +0200 Subject: [PATCH 03/29] Add keepTrailingSlash option --- configuration/configuration.go | 1 + docs/configuration/commons.md | 26 ++++++++++ integration/basic_test.go | 48 +++++++++++++++++++ integration/fixtures/keep_trailing_slash.toml | 23 +++++++++ server/server.go | 2 +- server/server_configuration.go | 2 +- 6 files changed, 100 insertions(+), 2 deletions(-) create mode 100644 integration/fixtures/keep_trailing_slash.toml diff --git a/configuration/configuration.go b/configuration/configuration.go index 9d1a67865..026af61d1 100644 --- a/configuration/configuration.go +++ b/configuration/configuration.go @@ -87,6 +87,7 @@ type GlobalConfiguration struct { RespondingTimeouts *RespondingTimeouts `description:"Timeouts for incoming requests to the Traefik instance" export:"true"` ForwardingTimeouts *ForwardingTimeouts `description:"Timeouts for requests forwarded to the backend servers" export:"true"` AllowMinWeightZero bool `description:"Allow weight to take 0 as minimum real value." export:"true"` // Deprecated + KeepTrailingSlash bool `description:"Do not remove trailing slash." export:"true"` // Deprecated Web *WebCompatibility `description:"(Deprecated) Enable Web backend with default settings" export:"true"` // Deprecated Docker *docker.Provider `description:"Enable Docker backend with default settings" export:"true"` File *file.Provider `description:"Enable File backend with default settings" export:"true"` diff --git a/docs/configuration/commons.md b/docs/configuration/commons.md index 082281892..4f59d698f 100644 --- a/docs/configuration/commons.md +++ b/docs/configuration/commons.md @@ -33,6 +33,13 @@ # # checkNewVersion = false +# Tells traefik whether it should keep the trailing slashes in the paths (e.g. /paths/) or redirect to the no trailing slash paths instead (/paths). +# +# Optional +# Default: false +# +# keepTrailingSlash = false + # Providers throttle duration. # # Optional @@ -103,6 +110,25 @@ If you encounter 'too many open files' errors, you can either increase this valu - `defaultEntryPoints`: Entrypoints to be used by frontends that do not specify any entrypoint. Each frontend can specify its own entrypoints. +- `keepTrailingSlash`: Tells Træfik whether it should keep the trailing slashes that might be present in the paths of incoming requests (true), or if it should redirect to the slashless version of the URL (default behavior: false) + +!!! note + Beware that the value of `keepTrailingSlash` can have a significant impact on the way your frontend rules are interpreted. + The table below tries to sum up several behaviors depending on requests/configurations. + The current default behavior is deprecated and kept for compatibility reasons. + As a consequence, we encourage you to set `keepTrailingSlash` to true. + + | Incoming request | keepTrailingSlash | Path:{value} | Behavior + |----------------------|-------------------|--------------|----------------------------| + | http://foo.com/path/ | false | Path:/path/ | Proceeds with the request | + | http://foo.com/path/ | false | Path:/path | 301 to http://foo.com/path | + | http://foo.com/path | false | Path:/path/ | Proceeds with the request | + | http://foo.com/path | false | Path:/path | Proceeds with the request | + | http://foo.com/path/ | true | Path:/path/ | Proceeds with the request | + | http://foo.com/path/ | true | Path:/path | 404 | + | http://foo.com/path | true | Path:/path/ | 404 | + | http://foo.com/path | true | Path:/path | Proceeds with the request | + ## Constraints diff --git a/integration/basic_test.go b/integration/basic_test.go index a576cea46..2263d7f11 100644 --- a/integration/basic_test.go +++ b/integration/basic_test.go @@ -396,3 +396,51 @@ func (s *SimpleSuite) TestMultipleProviderSameBackendName(c *check.C) { c.Assert(err, checker.IsNil) } + +func (s *SimpleSuite) TestDontKeepTrailingSlash(c *check.C) { + file := s.adaptFile(c, "fixtures/keep_trailing_slash.toml", struct { + KeepTrailingSlash bool + }{false}) + defer os.Remove(file) + + cmd, output := s.traefikCmd(withConfigFile(file)) + defer output(c) + + err := cmd.Start() + c.Assert(err, checker.IsNil) + defer cmd.Process.Kill() + + oldCheckRedirect := http.DefaultClient.CheckRedirect + http.DefaultClient.CheckRedirect = func(req *http.Request, via []*http.Request) error { + return http.ErrUseLastResponse + } + + err = try.GetRequest("http://127.0.0.1:8000/test/foo/", 1*time.Second, try.StatusCodeIs(http.StatusMovedPermanently)) + c.Assert(err, checker.IsNil) + + http.DefaultClient.CheckRedirect = oldCheckRedirect +} + +func (s *SimpleSuite) TestKeepTrailingSlash(c *check.C) { + file := s.adaptFile(c, "fixtures/keep_trailing_slash.toml", struct { + KeepTrailingSlash bool + }{true}) + defer os.Remove(file) + + cmd, output := s.traefikCmd(withConfigFile(file)) + defer output(c) + + err := cmd.Start() + c.Assert(err, checker.IsNil) + defer cmd.Process.Kill() + + oldCheckRedirect := http.DefaultClient.CheckRedirect + http.DefaultClient.CheckRedirect = func(req *http.Request, via []*http.Request) error { + return http.ErrUseLastResponse + } + + err = try.GetRequest("http://127.0.0.1:8000/test/foo/", 1*time.Second, try.StatusCodeIs(http.StatusNotFound)) + c.Assert(err, checker.IsNil) + + http.DefaultClient.CheckRedirect = oldCheckRedirect +} diff --git a/integration/fixtures/keep_trailing_slash.toml b/integration/fixtures/keep_trailing_slash.toml new file mode 100644 index 000000000..9d9079814 --- /dev/null +++ b/integration/fixtures/keep_trailing_slash.toml @@ -0,0 +1,23 @@ +defaultEntryPoints = ["http"] + +keepTrailingSlash = {{ .KeepTrailingSlash }} +[entryPoints] + [entryPoints.http] + address = ":8000" + +logLevel = "DEBUG" + +[file] + +# rules +[backends] + [backends.backend1] + [backends.backend1.servers.server1] + url = "http://172.17.0.2:80" + weight = 1 + +[frontends] + [frontends.frontend1] + backend = "backend1" + [frontends.frontend1.routes.test_1] + rule = "Path:/test/foo" diff --git a/server/server.go b/server/server.go index 74c3beeb4..a4ab46b37 100644 --- a/server/server.go +++ b/server/server.go @@ -627,7 +627,7 @@ func buildProxyProtocolListener(entryPoint *configuration.EntryPoint, listener n func (s *Server) buildInternalRouter(entryPointName string) *mux.Router { internalMuxRouter := mux.NewRouter() - internalMuxRouter.StrictSlash(true) + internalMuxRouter.StrictSlash(!s.globalConfiguration.KeepTrailingSlash) internalMuxRouter.SkipClean(true) if entryPoint, ok := s.entryPoints[entryPointName]; ok && entryPoint.InternalRouter != nil { diff --git a/server/server_configuration.go b/server/server_configuration.go index 3d224e1c4..2646505f8 100644 --- a/server/server_configuration.go +++ b/server/server_configuration.go @@ -633,7 +633,7 @@ func buildDefaultCertificate(defaultCertificate *traefiktls.Certificate) (*tls.C func (s *Server) buildDefaultHTTPRouter() *mux.Router { rt := mux.NewRouter() rt.NotFoundHandler = s.wrapHTTPHandlerWithAccessLog(http.HandlerFunc(http.NotFound), "backend not found") - rt.StrictSlash(true) + rt.StrictSlash(!s.globalConfiguration.KeepTrailingSlash) rt.SkipClean(true) return rt } From e6a88f3531564ebca418b9354da87648300da2e2 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?G=C3=A9rald=20Cro=C3=ABs?= Date: Wed, 17 Oct 2018 16:24:04 +0200 Subject: [PATCH 04/29] Uses ASCII characters to spell Traefik --- CONTRIBUTING.md | 8 +- README.md | 30 ++++---- docs/basics.md | 38 +++++----- docs/configuration/acme.md | 14 ++-- docs/configuration/api.md | 12 +-- docs/configuration/backends/boltdb.md | 2 +- docs/configuration/backends/consul.md | 2 +- docs/configuration/backends/consulcatalog.md | 8 +- docs/configuration/backends/docker.md | 14 ++-- docs/configuration/backends/dynamodb.md | 2 +- docs/configuration/backends/ecs.md | 6 +- docs/configuration/backends/etcd.md | 2 +- docs/configuration/backends/eureka.md | 2 +- docs/configuration/backends/file.md | 24 +++--- docs/configuration/backends/kubernetes.md | 4 +- docs/configuration/backends/marathon.md | 4 +- docs/configuration/backends/mesos.md | 4 +- docs/configuration/backends/rancher.md | 4 +- docs/configuration/backends/rest.md | 2 +- docs/configuration/backends/servicefabric.md | 10 +-- docs/configuration/backends/web.md | 16 ++-- docs/configuration/backends/zookeeper.md | 2 +- docs/configuration/commons.md | 6 +- docs/configuration/entrypoints.md | 6 +- docs/configuration/logs.md | 2 +- docs/configuration/ping.md | 2 +- docs/configuration/tracing.md | 4 +- docs/index.md | 56 +++++++------- docs/user-guide/cluster-docker-consul.md | 26 +++---- docs/user-guide/cluster.md | 18 ++--- docs/user-guide/docker-and-lets-encrypt.md | 50 ++++++------ docs/user-guide/examples.md | 14 ++-- docs/user-guide/grpc.md | 12 +-- docs/user-guide/kubernetes.md | 80 ++++++++++---------- docs/user-guide/kv-config.md | 48 ++++++------ docs/user-guide/swarm-mode.md | 24 +++--- docs/user-guide/swarm.md | 8 +- examples/quickstart/README.md | 32 ++++---- examples/quickstart/docker-compose.yml | 4 +- integration/consul_test.go | 2 +- integration/etcd3_test.go | 4 +- integration/etcd_test.go | 10 +-- middlewares/accesslog/logger_formatters.go | 4 +- mkdocs.yml | 4 +- provider/docker/docker.go | 2 +- provider/label/label.go | 2 +- server/server_middlewares.go | 4 +- webui/readme.md | 6 +- 48 files changed, 320 insertions(+), 320 deletions(-) diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md index 980f06153..ca87b0b48 100644 --- a/CONTRIBUTING.md +++ b/CONTRIBUTING.md @@ -32,7 +32,7 @@ traefik* ##### Setting up your `go` environment - You need `go` v1.9+ -- It is recommended you clone Træfik into a directory like `~/go/src/github.com/containous/traefik` (This is the official golang workspace hierarchy, and will allow dependencies to resolve properly) +- It is recommended you clone Traefik into a directory like `~/go/src/github.com/containous/traefik` (This is the official golang workspace hierarchy, and will allow dependencies to resolve properly) - Set your `GOPATH` and `PATH` variable to be set to `~/go` via: ```bash @@ -56,9 +56,9 @@ GORACE="" ## more go env's will be listed ``` -##### Build Træfik +##### Build Traefik -Once your environment is set up and the Træfik repository cloned you can build Træfik. You need get `go-bindata` once to be able to use `go generate` command as part of the build. The steps to build are: +Once your environment is set up and the Traefik repository cloned you can build Traefik. You need get `go-bindata` once to be able to use `go generate` command as part of the build. The steps to build are: ```bash cd ~/go/src/github.com/containous/traefik @@ -77,7 +77,7 @@ go build ./cmd/traefik # run other commands like tests ``` -You will find the Træfik executable in the `~/go/src/github.com/containous/traefik` folder as `traefik`. +You will find the Traefik executable in the `~/go/src/github.com/containous/traefik` folder as `traefik`. ### Updating the templates diff --git a/README.md b/README.md index 021d8b4d5..965261222 100644 --- a/README.md +++ b/README.md @@ -1,6 +1,6 @@

-Træfik +Traefik

[![Build Status SemaphoreCI](https://semaphoreci.com/api/v1/containous/traefik/branches/master/shields_badge.svg)](https://semaphoreci.com/containous/traefik) @@ -12,9 +12,9 @@ [![Twitter](https://img.shields.io/twitter/follow/traefik.svg?style=social)](https://twitter.com/intent/follow?screen_name=traefik) -Træfik is a modern HTTP reverse proxy and load balancer that makes deploying microservices easy. -Træfik integrates with your existing infrastructure components ([Docker](https://www.docker.com/), [Swarm mode](https://docs.docker.com/engine/swarm/), [Kubernetes](https://kubernetes.io), [Marathon](https://mesosphere.github.io/marathon/), [Consul](https://www.consul.io/), [Etcd](https://coreos.com/etcd/), [Rancher](https://rancher.com), [Amazon ECS](https://aws.amazon.com/ecs), ...) and configures itself automatically and dynamically. -Pointing Træfik at your orchestrator should be the _only_ configuration step you need. +Traefik is a modern HTTP reverse proxy and load balancer that makes deploying microservices easy. +Traefik integrates with your existing infrastructure components ([Docker](https://www.docker.com/), [Swarm mode](https://docs.docker.com/engine/swarm/), [Kubernetes](https://kubernetes.io), [Marathon](https://mesosphere.github.io/marathon/), [Consul](https://www.consul.io/), [Etcd](https://coreos.com/etcd/), [Rancher](https://rancher.com), [Amazon ECS](https://aws.amazon.com/ecs), ...) and configures itself automatically and dynamically. +Pointing Traefik at your orchestrator should be the _only_ configuration step you need. --- @@ -43,12 +43,12 @@ Now you want users to access these microservices, and you need a reverse proxy. Traditional reverse-proxies require that you configure _each_ route that will connect paths and subdomains to _each_ microservice. In an environment where you add, remove, kill, upgrade, or scale your services _many_ times a day, the task of keeping the routes up to date becomes tedious. -**This is when Træfik can help you!** +**This is when Traefik can help you!** -Træfik listens to your service registry/orchestrator API and instantly generates the routes so your microservices are connected to the outside world -- without further intervention from your part. +Traefik listens to your service registry/orchestrator API and instantly generates the routes so your microservices are connected to the outside world -- without further intervention from your part. -**Run Træfik and let it do the work for you!** -_(But if you'd rather configure some of your routes manually, Træfik supports that too!)_ +**Run Traefik and let it do the work for you!** +_(But if you'd rather configure some of your routes manually, Traefik supports that too!)_ ![Architecture](docs/img/architecture.png) @@ -85,15 +85,15 @@ _(But if you'd rather configure some of your routes manually, Træfik supports t ## Quickstart -To get your hands on Træfik, you can use the [5-Minute Quickstart](http://docs.traefik.io/#the-trfik-quickstart-using-docker) in our documentation (you will need Docker). +To get your hands on Traefik, you can use the [5-Minute Quickstart](http://docs.traefik.io/#the-traefik-quickstart-using-docker) in our documentation (you will need Docker). -Alternatively, if you don't want to install anything on your computer, you can try Træfik online in this great [Katacoda tutorial](https://www.katacoda.com/courses/traefik/deploy-load-balancer) that shows how to load balance requests between multiple Docker containers. +Alternatively, if you don't want to install anything on your computer, you can try Traefik online in this great [Katacoda tutorial](https://www.katacoda.com/courses/traefik/deploy-load-balancer) that shows how to load balance requests between multiple Docker containers. If you are looking for a more comprehensive and real use-case example, you can also check [Play-With-Docker](http://training.play-with-docker.com/traefik-load-balancing/) to see how to load balance between multiple nodes. ## Web UI -You can access the simple HTML frontend of Træfik. +You can access the simple HTML frontend of Traefik. ![Web UI Providers](docs/img/web.frontend.png) ![Web UI Health](docs/img/traefik-health.png) @@ -101,12 +101,12 @@ You can access the simple HTML frontend of Træfik. ## Documentation You can find the complete documentation at [https://docs.traefik.io](https://docs.traefik.io). -A collection of contributions around Træfik can be found at [https://awesome.traefik.io](https://awesome.traefik.io). +A collection of contributions around Traefik can be found at [https://awesome.traefik.io](https://awesome.traefik.io). ## Support To get community support, you can: -- join the Træfik community Slack channel: [![Join the chat at https://slack.traefik.io](https://img.shields.io/badge/style-register-green.svg?style=social&label=Slack)](https://slack.traefik.io) +- join the Traefik community Slack channel: [![Join the chat at https://slack.traefik.io](https://img.shields.io/badge/style-register-green.svg?style=social&label=Slack)](https://slack.traefik.io) - use [Stack Overflow](https://stackoverflow.com/questions/tagged/traefik) (using the `traefik` tag) If you need commercial support, please contact [Containo.us](https://containo.us) by mail: . @@ -134,12 +134,12 @@ git clone https://github.com/containous/traefik ## Introductory Videos Here is a talk given by [Emile Vauge](https://github.com/emilevauge) at [GopherCon 2017](https://gophercon.com/). -You will learn Træfik basics in less than 10 minutes. +You will learn Traefik basics in less than 10 minutes. [![Traefik GopherCon 2017](https://img.youtube.com/vi/RgudiksfL-k/0.jpg)](https://www.youtube.com/watch?v=RgudiksfL-k) Here is a talk given by [Ed Robinson](https://github.com/errm) at [ContainerCamp UK](https://container.camp) conference. -You will learn fundamental Træfik features and see some demos with Kubernetes. +You will learn fundamental Traefik features and see some demos with Kubernetes. [![Traefik ContainerCamp UK](https://img.youtube.com/vi/aFtpIShV60I/0.jpg)](https://www.youtube.com/watch?v=aFtpIShV60I) diff --git a/docs/basics.md b/docs/basics.md index 754a3bf30..9dd553076 100644 --- a/docs/basics.md +++ b/docs/basics.md @@ -14,12 +14,12 @@ Let's take our example from the [overview](/#overview) again: > ![Architecture](img/architecture.png) -Let's zoom on Træfik and have an overview of its internal architecture: +Let's zoom on Traefik and have an overview of its internal architecture: ![Architecture](img/internal.png) -- Incoming requests end on [entrypoints](#entrypoints), as the name suggests, they are the network entry points into Træfik (listening port, SSL, traffic redirection...). +- Incoming requests end on [entrypoints](#entrypoints), as the name suggests, they are the network entry points into Traefik (listening port, SSL, traffic redirection...). - Traffic is then forwarded to a matching [frontend](#frontends). A frontend defines routes from [entrypoints](#entrypoints) to [backends](#backends). Routes are created using requests fields (`Host`, `Path`, `Headers`...) and can match or not a request. - The [frontend](#frontends) will then send the request to a [backend](#backends). A backend can be composed by one or more [servers](#servers), and by a load-balancing strategy. @@ -27,7 +27,7 @@ Routes are created using requests fields (`Host`, `Path`, `Headers`...) and can ### Entrypoints -Entrypoints are the network entry points into Træfik. +Entrypoints are the network entry points into Traefik. They can be defined using: - a port (80, 443...) @@ -514,16 +514,16 @@ Additional http headers and hostname to health check request can be specified, f ## Configuration -Træfik's configuration has two parts: +Traefik's configuration has two parts: -- The [static Træfik configuration](/basics#static-trfik-configuration) which is loaded only at the beginning. -- The [dynamic Træfik configuration](/basics#dynamic-trfik-configuration) which can be hot-reloaded (no need to restart the process). +- The [static Traefik configuration](/basics#static-traefik-configuration) which is loaded only at the beginning. +- The [dynamic Traefik configuration](/basics#dynamic-traefik-configuration) which can be hot-reloaded (no need to restart the process). -### Static Træfik configuration +### Static Traefik configuration The static configuration is the global configuration which is setting up connections to configuration backends and entrypoints. -Træfik can be configured using many configuration sources with the following precedence order. +Traefik can be configured using many configuration sources with the following precedence order. Each item takes precedence over the item below it: - [Key-value store](/basics/#key-value-stores) @@ -539,7 +539,7 @@ It means that arguments override configuration file, and key-value store overrid #### Configuration file -By default, Træfik will try to find a `traefik.toml` in the following places: +By default, Traefik will try to find a `traefik.toml` in the following places: - `/etc/traefik/` - `$HOME/.traefik/` @@ -565,7 +565,7 @@ Note that all default values will be displayed as well. #### Key-value stores -Træfik supports several Key-value stores: +Traefik supports several Key-value stores: - [Consul](https://consul.io) - [etcd](https://coreos.com/etcd/) @@ -574,7 +574,7 @@ Træfik supports several Key-value stores: Please refer to the [User Guide Key-value store configuration](/user-guide/kv-config/) section to get documentation on it. -### Dynamic Træfik configuration +### Dynamic Traefik configuration The dynamic configuration concerns : @@ -583,9 +583,9 @@ The dynamic configuration concerns : - [Servers](/basics/#servers) - HTTPS Certificates -Træfik can hot-reload those rules which could be provided by [multiple configuration backends](/configuration/commons). +Traefik can hot-reload those rules which could be provided by [multiple configuration backends](/configuration/commons). -We only need to enable `watch` option to make Træfik watch configuration backend changes and generate its configuration automatically. +We only need to enable `watch` option to make Traefik watch configuration backend changes and generate its configuration automatically. Routes to services will be created and updated instantly at any changes. Please refer to the [configuration backends](/configuration/commons) section to get documentation on it. @@ -599,10 +599,10 @@ Usage: traefik [command] [--flag=flag_argument] ``` -List of Træfik available commands with description : +List of Traefik available commands with description : - `version` : Print version -- `storeconfig` : Store the static Traefik configuration into a Key-value stores. Please refer to the [Store Træfik configuration](/user-guide/kv-config/#store-configuration-in-key-value-store) section to get documentation on it. +- `storeconfig` : Store the static Traefik configuration into a Key-value stores. Please refer to the [Store Traefik configuration](/user-guide/kv-config/#store-configuration-in-key-value-store) section to get documentation on it. - `bug`: The easiest way to submit a pre-filled issue. - `healthcheck`: Calls Traefik `/ping` to check health. @@ -627,7 +627,7 @@ docker run traefik[:version] --help ### Command: bug -Here is the easiest way to submit a pre-filled issue on [Træfik GitHub](https://github.com/containous/traefik). +Here is the easiest way to submit a pre-filled issue on [Traefik GitHub](https://github.com/containous/traefik). ```bash traefik bug @@ -660,14 +660,14 @@ You can read the public proposal on this topic [here](https://github.com/contain ### Why ? -In order to help us learn more about how Træfik is being used and improve it, we collect anonymous usage statistics from running instances. +In order to help us learn more about how Traefik is being used and improve it, we collect anonymous usage statistics from running instances. Those data help us prioritize our developments and focus on what's more important (for example, which configuration backend is used and which is not used). ### What ? -Once a day (the first call begins 10 minutes after the start of Træfik), we collect: +Once a day (the first call begins 10 minutes after the start of Traefik), we collect: -- the Træfik version +- the Traefik version - a hash of the configuration - an **anonymous version** of the static configuration: - token, user name, password, URL, IP, domain, email, etc, are removed diff --git a/docs/configuration/acme.md b/docs/configuration/acme.md index 91e21ff26..a0ede414d 100644 --- a/docs/configuration/acme.md +++ b/docs/configuration/acme.md @@ -279,7 +279,7 @@ Here is a list of supported `provider`s, that can automate the DNS verification, | [Lightsail](https://aws.amazon.com/lightsail/) | `lightsail` | `AWS_ACCESS_KEY_ID`, `AWS_SECRET_ACCESS_KEY`, `DNS_ZONE` | Not tested yet | | [Linode](https://www.linode.com) | `linode` | `LINODE_API_KEY` | Not tested yet | | [Linode v4](https://www.linode.com) | `linodev4` | `LINODE_TOKEN` | Not tested yet | -| manual | - | none, but you need to run Træfik interactively, turn on `acmeLogging` to see instructions and press Enter. | YES | +| manual | - | none, but you need to run Traefik interactively, turn on `acmeLogging` to see instructions and press Enter. | YES | | [Namecheap](https://www.namecheap.com) | `namecheap` | `NAMECHEAP_API_USER`, `NAMECHEAP_API_KEY` | YES | | [name.com](https://www.name.com/) | `namedotcom` | `NAMECOM_USERNAME`, `NAMECOM_API_TOKEN`, `NAMECOM_SERVER` | Not tested yet | | [Netcup](https://www.netcup.eu/) | `netcup` | `NETCUP_CUSTOMER_NUMBER`, `NETCUP_API_KEY`, `NETCUP_API_PASSWORD` | Not tested yet | @@ -299,7 +299,7 @@ Here is a list of supported `provider`s, that can automate the DNS verification, ### `domains` You can provide SANs (alternative domains) to each main domain. -All domains must have A/AAAA records pointing to Træfik. +All domains must have A/AAAA records pointing to Traefik. Each domain & SAN will lead to a certificate request. ```toml @@ -341,7 +341,7 @@ Due to ACME limitation it is not possible to define wildcards in SANs (alternati Most likely the root domain should receive a certificate too, so it needs to be specified as SAN and 2 `DNS-01` challenges are executed. In this case the generated DNS TXT record for both domains is the same. Eventhough this behaviour is [DNS RFC](https://community.letsencrypt.org/t/wildcard-issuance-two-txt-records-for-the-same-name/54528/2) compliant, it can lead to problems as all DNS providers keep DNS records cached for a certain time (TTL) and this TTL can be superior to the challenge timeout making the `DNS-01` challenge fail. -The Træfik ACME client library [LEGO](https://github.com/xenolf/lego) supports some but not all DNS providers to work around this issue. +The Traefik ACME client library [LEGO](https://github.com/xenolf/lego) supports some but not all DNS providers to work around this issue. The [`provider` table](/configuration/acme/#provider) indicates if they allow generating certificates for a wildcard domain and its root domain. ### `onDemand` (Deprecated) @@ -421,7 +421,7 @@ docker run -v "/my/host/acme:/etc/traefik/acme" traefik ``` !!! warning - This file cannot be shared across multiple instances of Træfik at the same time. Please use a [KV Store entry](/configuration/acme/#as-a-key-value-store-entry) instead. + This file cannot be shared across multiple instances of Traefik at the same time. Please use a [KV Store entry](/configuration/acme/#as-a-key-value-store-entry) instead. #### As a Key Value Store Entry @@ -443,8 +443,8 @@ During migration from ACME v1 to ACME v2, using a storage file, a backup of the For example: if `acme.storage`'s value is `/etc/traefik/acme/acme.json`, the backup file will be `/etc/traefik/acme/acme.json.bak`. !!! note - When Træfik is launched in a container, the storage file's parent directory needs to be mounted to be able to access the backup file on the host. - Otherwise the backup file will be deleted when the container is stopped. Træfik will only generate it once! + When Traefik is launched in a container, the storage file's parent directory needs to be mounted to be able to access the backup file on the host. + Otherwise the backup file will be deleted when the container is stopped. Traefik will only generate it once! ### `dnsProvider` (Deprecated) @@ -465,4 +465,4 @@ If Let's Encrypt is not reachable, these certificates will be used: 1. Provided certificates !!! note - For new (sub)domains which need Let's Encrypt authentification, the default Træfik certificate will be used until Træfik is restarted. + For new (sub)domains which need Let's Encrypt authentification, the default Traefik certificate will be used until Traefik is restarted. diff --git a/docs/configuration/api.md b/docs/configuration/api.md index 215e2ce7c..62e1a9537 100644 --- a/docs/configuration/api.md +++ b/docs/configuration/api.md @@ -4,7 +4,7 @@ ```toml # API definition -# Warning: Enabling API will expose Træfik's configuration. +# Warning: Enabling API will expose Traefik's configuration. # It is not recommended in production, # unless secured by authentication and authorizations [api] @@ -61,7 +61,7 @@ keeping it restricted over internal networks | Path | Method | Description | |-----------------------------------------------------------------|------------------|-------------------------------------------| -| `/` | `GET` | Provides a simple HTML frontend of Træfik | +| `/` | `GET` | Provides a simple HTML frontend of Traefik | | `/cluster/leader` | `GET` | JSON leader true/false response | | `/health` | `GET` | JSON health metrics | | `/api` | `GET` | Configuration for all providers | @@ -268,11 +268,11 @@ curl -s "http://localhost:8080/health" | jq . ``` ```json { - // Træfik PID + // Traefik PID "pid": 2458, - // Træfik server uptime (formated time) + // Traefik server uptime (formated time) "uptime": "39m6.885931127s", - // Træfik server uptime in seconds + // Traefik server uptime in seconds "uptime_sec": 2346.885931127, // current server date "time": "2015-10-07 18:32:24.362238909 +0200 CEST", @@ -282,7 +282,7 @@ curl -s "http://localhost:8080/health" | jq . "status_code_count": { "502": 1 }, - // count HTTP response status code since Træfik started + // count HTTP response status code since Traefik started "total_status_code_count": { "200": 7, "404": 21, diff --git a/docs/configuration/backends/boltdb.md b/docs/configuration/backends/boltdb.md index cc9dfc1ce..8c4ee6f20 100644 --- a/docs/configuration/backends/boltdb.md +++ b/docs/configuration/backends/boltdb.md @@ -1,6 +1,6 @@ # BoltDB Provider -Træfik can be configured to use BoltDB as a provider. +Traefik can be configured to use BoltDB as a provider. ```toml ################################################################ diff --git a/docs/configuration/backends/consul.md b/docs/configuration/backends/consul.md index 46ec56ea9..84ddaa188 100644 --- a/docs/configuration/backends/consul.md +++ b/docs/configuration/backends/consul.md @@ -1,6 +1,6 @@ # Consul Key-Value Provider -Træfik can be configured to use Consul as a provider. +Traefik can be configured to use Consul as a provider. ```toml ################################################################ diff --git a/docs/configuration/backends/consulcatalog.md b/docs/configuration/backends/consulcatalog.md index 82d693d14..d161eadfc 100644 --- a/docs/configuration/backends/consulcatalog.md +++ b/docs/configuration/backends/consulcatalog.md @@ -1,6 +1,6 @@ # Consul Catalog Provider -Træfik can be configured to use service discovery catalog of Consul as a provider. +Traefik can be configured to use service discovery catalog of Consul as a provider. ```toml ################################################################ @@ -96,7 +96,7 @@ Additional settings can be defined using Consul Catalog tags. | Label | Description | |----------------------------------------------------------------------|-------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| -| `.enable=false` | Disables this container in Træfik. | +| `.enable=false` | Disables this container in Traefik. | | `.protocol=https` | Overrides the default `http` protocol. | | `.weight=10` | Assigns this weight to the container. | | `traefik.backend.buffering.maxRequestBodyBytes=0` | See [buffering](/configuration/commons/#buffering) section. | @@ -214,7 +214,7 @@ If you need to support multiple frontends for a service, for example when having ### Examples -If you want that Træfik uses Consul tags correctly you need to defined them like that: +If you want that Traefik uses Consul tags correctly you need to defined them like that: ```js traefik.enable=true @@ -222,7 +222,7 @@ traefik.tags=api traefik.tags=external ``` -If the prefix defined in Træfik configuration is `bla`, tags need to be defined like that: +If the prefix defined in Traefik configuration is `bla`, tags need to be defined like that: ```js bla.enable=true diff --git a/docs/configuration/backends/docker.md b/docs/configuration/backends/docker.md index a4165f052..2ab4aae71 100644 --- a/docs/configuration/backends/docker.md +++ b/docs/configuration/backends/docker.md @@ -1,7 +1,7 @@ # Docker Provider -Træfik can be configured to use Docker as a provider. +Traefik can be configured to use Docker as a provider. ## Docker @@ -213,9 +213,9 @@ Labels can be used on containers to override default behavior. |---------------------------------------------------------------------|----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| | `traefik.docker.network` | Overrides the default docker network to use for connections to the container. [1] | | `traefik.domain` | Sets the default base domain for the frontend rules. For more information, check the [Container Labels section's of the user guide "Let's Encrypt & Docker"](/user-guide/docker-and-lets-encrypt/#container-labels) | -| `traefik.enable=false` | Disables this container in Træfik. | +| `traefik.enable=false` | Disables this container in Traefik. | | `traefik.port=80` | Registers this port. Useful when the container exposes multiples ports. | -| `traefik.tags=foo,bar,myTag` | Adds Træfik tags to the Docker container/service to be used in [constraints](/configuration/commons/#constraints). | +| `traefik.tags=foo,bar,myTag` | Adds Traefik tags to the Docker container/service to be used in [constraints](/configuration/commons/#constraints). | | `traefik.protocol=https` | Overrides the default `http` protocol | | `traefik.weight=10` | Assigns this weight to the container | | `traefik.backend=foo` | Gives the name `foo` to the generated backend for this container. | @@ -426,16 +426,16 @@ Segment labels override the default behavior. More details in this [example](/user-guide/docker-and-lets-encrypt/#labels). !!! warning - When running inside a container, Træfik will need network access through: + When running inside a container, Traefik will need network access through: `docker network connect ` ## usebindportip -The default behavior of Træfik is to route requests to the IP/Port of the matching container. -When setting `usebindportip` to true, you tell Træfik to use the IP/Port attached to the container's binding instead of the inner network IP/Port. +The default behavior of Traefik is to route requests to the IP/Port of the matching container. +When setting `usebindportip` to true, you tell Traefik to use the IP/Port attached to the container's binding instead of the inner network IP/Port. -When used in conjunction with the `traefik.port` label (that tells Træfik to route requests to a specific port), Træfik tries to find a binding with `traefik.port` port to select the container. If it can't find such a binding, Træfik falls back on the internal network IP of the container, but still uses the `traefik.port` that is set in the label. +When used in conjunction with the `traefik.port` label (that tells Traefik to route requests to a specific port), Traefik tries to find a binding with `traefik.port` port to select the container. If it can't find such a binding, Traefik falls back on the internal network IP of the container, but still uses the `traefik.port` that is set in the label. Below is a recap of the behavior of `usebindportip` in different situations. diff --git a/docs/configuration/backends/dynamodb.md b/docs/configuration/backends/dynamodb.md index 3eb09131f..37b15f624 100644 --- a/docs/configuration/backends/dynamodb.md +++ b/docs/configuration/backends/dynamodb.md @@ -1,6 +1,6 @@ # DynamoDB Provider -Træfik can be configured to use Amazon DynamoDB as a provider. +Traefik can be configured to use Amazon DynamoDB as a provider. ## Configuration diff --git a/docs/configuration/backends/ecs.md b/docs/configuration/backends/ecs.md index 81a2d7ba5..74effb56b 100644 --- a/docs/configuration/backends/ecs.md +++ b/docs/configuration/backends/ecs.md @@ -1,6 +1,6 @@ # ECS Provider -Træfik can be configured to use Amazon ECS as a provider. +Traefik can be configured to use Amazon ECS as a provider. ## Configuration @@ -106,7 +106,7 @@ To enable constraints see [provider-specific constraints section](/configuration ## Policy -Træfik needs the following policy to read ECS information: +Traefik needs the following policy to read ECS information: ```json { @@ -139,7 +139,7 @@ Labels can be used on task containers to override default behaviour: | Label | Description | |---------------------------------------------------------------------|-------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| | `traefik.domain` | Sets the default base domain for frontend rules. | -| `traefik.enable=false` | Disables this container in Træfik. | +| `traefik.enable=false` | Disables this container in Traefik. | | `traefik.port=80` | Overrides the default `port` value. Overrides `NetworkBindings` from Docker Container | | `traefik.protocol=https` | Overrides the default `http` protocol | | `traefik.weight=10` | Assigns this weight to the container | diff --git a/docs/configuration/backends/etcd.md b/docs/configuration/backends/etcd.md index eaa09f3f2..def016a1c 100644 --- a/docs/configuration/backends/etcd.md +++ b/docs/configuration/backends/etcd.md @@ -1,6 +1,6 @@ # Etcd Provider -Træfik can be configured to use Etcd as a provider. +Traefik can be configured to use Etcd as a provider. ```toml ################################################################ diff --git a/docs/configuration/backends/eureka.md b/docs/configuration/backends/eureka.md index c5c330a15..0341cabc4 100644 --- a/docs/configuration/backends/eureka.md +++ b/docs/configuration/backends/eureka.md @@ -1,6 +1,6 @@ # Eureka Provider -Træfik can be configured to use Eureka as a provider. +Traefik can be configured to use Eureka as a provider. ```toml ################################################################ diff --git a/docs/configuration/backends/file.md b/docs/configuration/backends/file.md index f77aafb2d..50c0fe496 100644 --- a/docs/configuration/backends/file.md +++ b/docs/configuration/backends/file.md @@ -1,6 +1,6 @@ # File Provider -Træfik can be configured with a file. +Traefik can be configured with a file. ## Reference @@ -192,16 +192,16 @@ Træfik can be configured with a file. You have two choices: -- [Rules in Træfik configuration file](/configuration/backends/file/#rules-in-trfik-configuration-file) +- [Rules in Traefik configuration file](/configuration/backends/file/#rules-in-traefik-configuration-file) - [Rules in dedicated files](/configuration/backends/file/#rules-in-dedicated-files) -To enable the file backend, you must either pass the `--file` option to the Træfik binary or put the `[file]` section (with or without inner settings) in the configuration file. +To enable the file backend, you must either pass the `--file` option to the Traefik binary or put the `[file]` section (with or without inner settings) in the configuration file. -The configuration file allows managing both backends/frontends and HTTPS certificates (which are not [Let's Encrypt](https://letsencrypt.org) certificates generated through Træfik). +The configuration file allows managing both backends/frontends and HTTPS certificates (which are not [Let's Encrypt](https://letsencrypt.org) certificates generated through Traefik). -TOML templating can be used if rules are not defined in the Træfik configuration file. +TOML templating can be used if rules are not defined in the Traefik configuration file. -### Rules in Træfik Configuration File +### Rules in Traefik Configuration File Add your configuration at the end of the global configuration file `traefik.toml`: @@ -247,11 +247,11 @@ defaultEntryPoints = ["http", "https"] It's recommended to use the file provider to declare certificates. !!! warning - TOML templating cannot be used if rules are defined in the Træfik configuration file. + TOML templating cannot be used if rules are defined in the Traefik configuration file. ### Rules in Dedicated Files -Træfik allows defining rules in one or more separate files. +Traefik allows defining rules in one or more separate files. #### One Separate File @@ -272,7 +272,7 @@ defaultEntryPoints = ["http", "https"] watch = true ``` -The option `file.watch` allows Træfik to watch file changes automatically. +The option `file.watch` allows Traefik to watch file changes automatically. #### Multiple Separated Files @@ -284,7 +284,7 @@ You could have multiple `.toml` files in a directory (and recursively in its sub watch = true ``` -The option `file.watch` allows Træfik to watch file changes automatically. +The option `file.watch` allows Traefik to watch file changes automatically. #### Separate Files Content @@ -322,9 +322,9 @@ Backends, Frontends and TLS certificates are defined one at time, as described i !!! warning TOML templating can only be used **if rules are defined in one or more separate files**. - Templating will not work in the Træfik configuration file. + Templating will not work in the Traefik configuration file. -Træfik allows using TOML templating. +Traefik allows using TOML templating. Thus, it's possible to define easily lot of Backends, Frontends and TLS certificates as described in the file `template-rules.toml` : diff --git a/docs/configuration/backends/kubernetes.md b/docs/configuration/backends/kubernetes.md index 945168df9..962cc3225 100644 --- a/docs/configuration/backends/kubernetes.md +++ b/docs/configuration/backends/kubernetes.md @@ -1,6 +1,6 @@ # Kubernetes Ingress Provider -Træfik can be configured to use Kubernetes Ingress as a provider. +Traefik can be configured to use Kubernetes Ingress as a provider. See also [Kubernetes user guide](/user-guide/kubernetes). @@ -357,5 +357,5 @@ This ingress follows the [Global Default Backend](https://kubernetes.io/docs/con This will allow users to create a "default backend" that will match all unmatched requests. !!! note - Due to Træfik's use of priorities, you may have to set this ingress priority lower than other ingresses in your environment, to avoid this global ingress from satisfying requests that _could_ match other ingresses. + Due to Traefik's use of priorities, you may have to set this ingress priority lower than other ingresses in your environment, to avoid this global ingress from satisfying requests that _could_ match other ingresses. To do this, use the `traefik.ingress.kubernetes.io/priority` annotation (as seen in [General Annotations](/configuration/backends/kubernetes/#general-annotations)) on your ingresses accordingly. diff --git a/docs/configuration/backends/marathon.md b/docs/configuration/backends/marathon.md index 7241e41ec..4b7f41397 100644 --- a/docs/configuration/backends/marathon.md +++ b/docs/configuration/backends/marathon.md @@ -1,6 +1,6 @@ # Marathon Provider -Træfik can be configured to use Marathon as a provider. +Traefik can be configured to use Marathon as a provider. See also [Marathon user guide](/user-guide/marathon). @@ -196,7 +196,7 @@ The following labels can be defined on Marathon applications. They adjust the be | Label | Description | |---------------------------------------------------------------------|-------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| | `traefik.domain` | Sets the default base domain used for the frontend rules. | -| `traefik.enable=false` | Disables this container in Træfik. | +| `traefik.enable=false` | Disables this container in Traefik. | | `traefik.port=80` | Registers this port. Useful when the container exposes multiples ports. | | `traefik.portIndex=1` | Registers port by index in the application's ports array. Useful when the application exposes multiple ports. | | `traefik.protocol=https` | Overrides the default `http` protocol. | diff --git a/docs/configuration/backends/mesos.md b/docs/configuration/backends/mesos.md index 6e701bf62..2c2628390 100644 --- a/docs/configuration/backends/mesos.md +++ b/docs/configuration/backends/mesos.md @@ -1,6 +1,6 @@ # Mesos Generic Provider -Træfik can be configured to use Mesos as a provider. +Traefik can be configured to use Mesos as a provider. ```toml ################################################################ @@ -109,7 +109,7 @@ The following labels can be defined on Mesos tasks. They adjust the behavior for | Label | Description | |---------------------------------------------------------------------|-------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| | `traefik.domain` | Sets the default base domain for the frontend rules. | -| `traefik.enable=false` | Disables this container in Træfik. | +| `traefik.enable=false` | Disables this container in Traefik. | | `traefik.port=80` | Registers this port. Useful when the application exposes multiple ports. | | `traefik.portName=web` | Registers port by name in the application's ports array. Useful when the application exposes multiple ports. | | `traefik.portIndex=1` | Registers port by index in the application's ports array. Useful when the application exposes multiple ports. | diff --git a/docs/configuration/backends/rancher.md b/docs/configuration/backends/rancher.md index bb84fe8ee..f91c4d9b9 100644 --- a/docs/configuration/backends/rancher.md +++ b/docs/configuration/backends/rancher.md @@ -1,6 +1,6 @@ # Rancher Provider -Træfik can be configured to use Rancher as a provider. +Traefik can be configured to use Rancher as a provider. ## Global Configuration @@ -141,7 +141,7 @@ Labels can be used on task containers to override default behavior: | Label | Description | |---------------------------------------------------------------------|----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| | `traefik.domain` | Sets the default base domain for the frontend rules. | -| `traefik.enable=false` | Disables this container in Træfik. | +| `traefik.enable=false` | Disables this container in Traefik. | | `traefik.port=80` | Registers this port. Useful when the container exposes multiple ports. | | `traefik.protocol=https` | Overrides the default `http` protocol. | | `traefik.weight=10` | Assigns this weight to the container. | diff --git a/docs/configuration/backends/rest.md b/docs/configuration/backends/rest.md index fba802c8f..b83027f0d 100644 --- a/docs/configuration/backends/rest.md +++ b/docs/configuration/backends/rest.md @@ -1,6 +1,6 @@ # Rest Provider -Træfik can be configured: +Traefik can be configured: - using a RESTful api. diff --git a/docs/configuration/backends/servicefabric.md b/docs/configuration/backends/servicefabric.md index 04609d61c..62944e6d3 100644 --- a/docs/configuration/backends/servicefabric.md +++ b/docs/configuration/backends/servicefabric.md @@ -1,6 +1,6 @@ # Azure Service Fabric Provider -Træfik can be configured to use Azure Service Fabric as a provider. +Traefik can be configured to use Azure Service Fabric as a provider. See [this repository for an example deployment package and further documentation.](https://aka.ms/traefikonsf) @@ -47,13 +47,13 @@ refreshSeconds = 10 ## Labels -The provider uses labels to configure how services are exposed through Træfik. +The provider uses labels to configure how services are exposed through Traefik. These can be set using Extensions and the Property Manager API #### Extensions Set labels with extensions through the services `ServiceManifest.xml` file. -Here is an example of an extension setting Træfik labels: +Here is an example of an extension setting Traefik labels: ```xml @@ -96,9 +96,9 @@ Labels, set through extensions or the property manager, can be used on services | Label | Description | |------------------------------------------------------------|---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| -| `traefik.enable=false` | Disable this container in Træfik | +| `traefik.enable=false` | Disable this container in Traefik | | `traefik.backend.circuitbreaker.expression=EXPR` | Create a [circuit breaker](/basics/#backends) to be used against the backend | -| `traefik.servicefabric.groupname` | Group all services with the same name into a single backend in Træfik | +| `traefik.servicefabric.groupname` | Group all services with the same name into a single backend in Traefik | | `traefik.servicefabric.groupweight` | Set the weighting of the current services nodes in the backend group | | `traefik.servicefabric.enablelabeloverrides` | Toggle whether labels can be overridden using the Service Fabric Property Manager API | | `traefik.backend.healthcheck.path=/health` | Enable health check for the backend, hitting the container at `path`. | diff --git a/docs/configuration/backends/web.md b/docs/configuration/backends/web.md index 489fa9363..f592fc730 100644 --- a/docs/configuration/backends/web.md +++ b/docs/configuration/backends/web.md @@ -3,7 +3,7 @@ !!! danger "DEPRECATED" The web provider is deprecated, please use the [api](/configuration/api.md), the [ping](/configuration/ping.md), the [metrics](/configuration/metrics) and the [rest](/configuration/backends/rest.md) provider. -Træfik can be configured: +Traefik can be configured: - using a RESTful api. - to use a monitoring system (like Prometheus, DataDog or StatD, ...). @@ -97,7 +97,7 @@ usersFile = "/path/to/.htdigest" ## Metrics -You can enable Træfik to export internal metrics to different monitoring systems. +You can enable Traefik to export internal metrics to different monitoring systems. ### Prometheus @@ -239,8 +239,8 @@ recentErrors = 10 | Path | Method | Description | |-----------------------------------------------------------------|:-------------:|----------------------------------------------------------------------------------------------------| -| `/` | `GET` | Provides a simple HTML frontend of Træfik | -| `/ping` | `GET`, `HEAD` | A simple endpoint to check for Træfik process liveness. Return a code `200` with the content: `OK` | +| `/` | `GET` | Provides a simple HTML frontend of Traefik | +| `/ping` | `GET`, `HEAD` | A simple endpoint to check for Traefik process liveness. Return a code `200` with the content: `OK` | | `/health` | `GET` | JSON health metrics | | `/api` | `GET` | Configuration for all providers | | `/api/providers` | `GET` | Providers | @@ -286,11 +286,11 @@ curl -s "http://localhost:8080/health" | jq . ``` ```json { - // Træfik PID + // Traefik PID "pid": 2458, - // Træfik server uptime (formated time) + // Traefik server uptime (formated time) "uptime": "39m6.885931127s", - // Træfik server uptime in seconds + // Traefik server uptime in seconds "uptime_sec": 2346.885931127, // current server date "time": "2015-10-07 18:32:24.362238909 +0200 CEST", @@ -300,7 +300,7 @@ curl -s "http://localhost:8080/health" | jq . "status_code_count": { "502": 1 }, - // count HTTP response status code since Træfik started + // count HTTP response status code since Traefik started "total_status_code_count": { "200": 7, "404": 21, diff --git a/docs/configuration/backends/zookeeper.md b/docs/configuration/backends/zookeeper.md index 2ef55b3af..34aa4e2f4 100644 --- a/docs/configuration/backends/zookeeper.md +++ b/docs/configuration/backends/zookeeper.md @@ -1,6 +1,6 @@ # Zookeeper Provider -Træfik can be configured to use Zookeeper as a provider. +Traefik can be configured to use Zookeeper as a provider. ```toml ################################################################ diff --git a/docs/configuration/commons.md b/docs/configuration/commons.md index 4f59d698f..6ff2dcb3a 100644 --- a/docs/configuration/commons.md +++ b/docs/configuration/commons.md @@ -132,9 +132,9 @@ Each frontend can specify its own entrypoints. ## Constraints -In a micro-service architecture, with a central service discovery, setting constraints limits Træfik scope to a smaller number of routes. +In a micro-service architecture, with a central service discovery, setting constraints limits Traefik scope to a smaller number of routes. -Træfik filters services according to service attributes/tags set in your providers. +Traefik filters services according to service attributes/tags set in your providers. Supported filters: @@ -255,7 +255,7 @@ These can "burst" up to 10 and 200 in each period respectively. ## Buffering In some cases request/buffering can be enabled for a specific backend. -By enabling this, Træfik will read the entire request into memory (possibly buffering large requests into disk) and will reject requests that are over a specified limit. +By enabling this, Traefik will read the entire request into memory (possibly buffering large requests into disk) and will reject requests that are over a specified limit. This may help services deal with large data (multipart/form-data for example) more efficiently and should minimise time spent when sending data to a backend server. For more information please check [oxy/buffer](http://godoc.org/github.com/vulcand/oxy/buffer) documentation. diff --git a/docs/configuration/entrypoints.md b/docs/configuration/entrypoints.md index 1aa81b454..006731872 100644 --- a/docs/configuration/entrypoints.md +++ b/docs/configuration/entrypoints.md @@ -235,8 +235,8 @@ If you need to add or remove TLS certificates while Traefik is started, Dynamic ## TLS Mutual Authentication TLS Mutual Authentication can be `optional` or not. -If it's `optional`, Træfik will authorize connection with certificates not signed by a specified Certificate Authority (CA). -Otherwise, Træfik will only accept clients that present a certificate signed by a specified Certificate Authority (CA). +If it's `optional`, Traefik will authorize connection with certificates not signed by a specified Certificate Authority (CA). +Otherwise, Traefik will only accept clients that present a certificate signed by a specified Certificate Authority (CA). `ClientCAFiles` can be configured with multiple `CA:s` in the same file or use multiple files containing one or several `CA:s`. The `CA:s` has to be in PEM format. @@ -486,7 +486,7 @@ To enable [ProxyProtocol](https://www.haproxy.org/download/1.8/doc/proxy-protoco Only IPs in `trustedIPs` will lead to remote client address replacement: you should declare your load-balancer IP or CIDR range here (in testing environment, you can trust everyone using `insecure = true`). !!! danger - When queuing Træfik behind another load-balancer, be sure to carefully configure Proxy Protocol on both sides. + When queuing Traefik behind another load-balancer, be sure to carefully configure Proxy Protocol on both sides. Otherwise, it could introduce a security risk in your system by forging requests. ```toml diff --git a/docs/configuration/logs.md b/docs/configuration/logs.md index 4b9d4a9f2..fa88f6526 100644 --- a/docs/configuration/logs.md +++ b/docs/configuration/logs.md @@ -278,7 +278,7 @@ accessLogsFile = "log/access.log" ### CLF - Common Log Format -By default, Træfik use the CLF (`common`) as access log format. +By default, Traefik use the CLF (`common`) as access log format. ```html - [] " " "" "" "" "" ms diff --git a/docs/configuration/ping.md b/docs/configuration/ping.md index e6b99bfe1..8c523f4c0 100644 --- a/docs/configuration/ping.md +++ b/docs/configuration/ping.md @@ -15,7 +15,7 @@ | Path | Method | Description | |---------|---------------|----------------------------------------------------------------------------------------------------| -| `/ping` | `GET`, `HEAD` | A simple endpoint to check for Træfik process liveness. Return a code `200` with the content: `OK` | +| `/ping` | `GET`, `HEAD` | A simple endpoint to check for Traefik process liveness. Return a code `200` with the content: `OK` | !!! warning diff --git a/docs/configuration/tracing.md b/docs/configuration/tracing.md index 275a39073..f8eaf2d9f 100644 --- a/docs/configuration/tracing.md +++ b/docs/configuration/tracing.md @@ -4,7 +4,7 @@ The tracing system allows developers to visualize call flows in their infrastruc We use [OpenTracing](http://opentracing.io). It is an open standard designed for distributed tracing. -Træfik supports three tracing backends: Jaeger, Zipkin and DataDog. +Traefik supports three tracing backends: Jaeger, Zipkin and DataDog. ## Jaeger @@ -61,7 +61,7 @@ Træfik supports three tracing backends: Jaeger, Zipkin and DataDog. ``` !!! warning - Træfik is only able to send data over compact thrift protocol to the [Jaeger agent](https://www.jaegertracing.io/docs/deployment/#agent). + Traefik is only able to send data over compact thrift protocol to the [Jaeger agent](https://www.jaegertracing.io/docs/deployment/#agent). ## Zipkin diff --git a/docs/index.md b/docs/index.md index a24a20dd2..bcbcc4489 100644 --- a/docs/index.md +++ b/docs/index.md @@ -1,5 +1,5 @@

-Træfik +Traefik

[![Build Status SemaphoreCI](https://semaphoreci.com/api/v1/containous/traefik/branches/master/shields_badge.svg)](https://semaphoreci.com/containous/traefik) @@ -10,9 +10,9 @@ [![Twitter](https://img.shields.io/twitter/follow/traefik.svg?style=social)](https://twitter.com/intent/follow?screen_name=traefik) -Træfik is a modern HTTP reverse proxy and load balancer that makes deploying microservices easy. -Træfik integrates with your existing infrastructure components ([Docker](https://www.docker.com/), [Swarm mode](https://docs.docker.com/engine/swarm/), [Kubernetes](https://kubernetes.io), [Marathon](https://mesosphere.github.io/marathon/), [Consul](https://www.consul.io/), [Etcd](https://coreos.com/etcd/), [Rancher](https://rancher.com), [Amazon ECS](https://aws.amazon.com/ecs), ...) and configures itself automatically and dynamically. -Pointing Træfik at your orchestrator should be the _only_ configuration step you need. +Traefik is a modern HTTP reverse proxy and load balancer that makes deploying microservices easy. +Traefik integrates with your existing infrastructure components ([Docker](https://www.docker.com/), [Swarm mode](https://docs.docker.com/engine/swarm/), [Kubernetes](https://kubernetes.io), [Marathon](https://mesosphere.github.io/marathon/), [Consul](https://www.consul.io/), [Etcd](https://coreos.com/etcd/), [Rancher](https://rancher.com), [Amazon ECS](https://aws.amazon.com/ecs), ...) and configures itself automatically and dynamically. +Pointing Traefik at your orchestrator should be the _only_ configuration step you need. ## Overview @@ -22,12 +22,12 @@ Now you want users to access these microservices, and you need a reverse proxy. Traditional reverse-proxies require that you configure _each_ route that will connect paths and subdomains to _each_ microservice. In an environment where you add, remove, kill, upgrade, or scale your services _many_ times a day, the task of keeping the routes up to date becomes tedious. -**This is when Træfik can help you!** +**This is when Traefik can help you!** -Træfik listens to your service registry/orchestrator API and instantly generates the routes so your microservices are connected to the outside world -- without further intervention from your part. +Traefik listens to your service registry/orchestrator API and instantly generates the routes so your microservices are connected to the outside world -- without further intervention from your part. -**Run Træfik and let it do the work for you!** -_(But if you'd rather configure some of your routes manually, Træfik supports that too!)_ +**Run Traefik and let it do the work for you!** +_(But if you'd rather configure some of your routes manually, Traefik supports that too!)_ ![Architecture](img/architecture.png) @@ -62,15 +62,15 @@ _(But if you'd rather configure some of your routes manually, Træfik supports t - [File](/configuration/backends/file/) - [Rest](/configuration/backends/rest/) -## The Træfik Quickstart (Using Docker) +## The Traefik Quickstart (Using Docker) In this quickstart, we'll use [Docker compose](https://docs.docker.com/compose) to create our demo infrastructure. -To save some time, you can clone [Træfik's repository](https://github.com/containous/traefik) and use the quickstart files located in the [examples/quickstart](https://github.com/containous/traefik/tree/master/examples/quickstart/) directory. +To save some time, you can clone [Traefik's repository](https://github.com/containous/traefik) and use the quickstart files located in the [examples/quickstart](https://github.com/containous/traefik/tree/master/examples/quickstart/) directory. -### 1 — Launch Træfik — Tell It to Listen to Docker +### 1 — Launch Traefik — Tell It to Listen to Docker -Create a `docker-compose.yml` file where you will define a `reverse-proxy` service that uses the official Træfik image: +Create a `docker-compose.yml` file where you will define a `reverse-proxy` service that uses the official Traefik image: ```yaml version: '3' @@ -78,7 +78,7 @@ version: '3' services: reverse-proxy: image: traefik # The official Traefik docker image - command: --api --docker # Enables the web UI and tells Træfik to listen to docker + command: --api --docker # Enables the web UI and tells Traefik to listen to docker ports: - "80:80" # The HTTP port - "8080:8080" # The Web UI (enabled by --api) @@ -90,7 +90,7 @@ services: Enabling the Web UI with the `--api` flag might expose configuration elements. You can read more about this on the [API/Dashboard's Security section](/configuration/api#security). -**That's it. Now you can launch Træfik!** +**That's it. Now you can launch Traefik!** Start your `reverse-proxy` with the following command: @@ -98,11 +98,11 @@ Start your `reverse-proxy` with the following command: docker-compose up -d reverse-proxy ``` -You can open a browser and go to [http://localhost:8080](http://localhost:8080) to see Træfik's dashboard (we'll go back there once we have launched a service in step 2). +You can open a browser and go to [http://localhost:8080](http://localhost:8080) to see Traefik's dashboard (we'll go back there once we have launched a service in step 2). -### 2 — Launch a Service — Træfik Detects It and Creates a Route for You +### 2 — Launch a Service — Traefik Detects It and Creates a Route for You -Now that we have a Træfik instance up and running, we will deploy new services. +Now that we have a Traefik instance up and running, we will deploy new services. Edit your `docker-compose.yml` file and add the following at the end of your file. @@ -122,7 +122,7 @@ Start the `whoami` service with the following command: docker-compose up -d whoami ``` -Go back to your browser ([http://localhost:8080](http://localhost:8080)) and see that Træfik has automatically detected the new container and updated its own configuration. +Go back to your browser ([http://localhost:8080](http://localhost:8080)) and see that Traefik has automatically detected the new container and updated its own configuration. When Traefik detects new services, it creates the corresponding routes so you can call them ... _let's see!_ (Here, we're using curl) @@ -145,9 +145,9 @@ Run more instances of your `whoami` service with the following command: docker-compose scale whoami=2 ``` -Go back to your browser ([http://localhost:8080](http://localhost:8080)) and see that Træfik has automatically detected the new instance of the container. +Go back to your browser ([http://localhost:8080](http://localhost:8080)) and see that Traefik has automatically detected the new instance of the container. -Finally, see that Træfik load-balances between the two instances of your services by running twice the following command: +Finally, see that Traefik load-balances between the two instances of your services by running twice the following command: ```shell curl -H Host:whoami.docker.localhost http://127.0.0.1 @@ -167,22 +167,22 @@ IP: 172.27.0.4 # ... ``` -### 4 — Enjoy Træfik's Magic +### 4 — Enjoy Traefik's Magic -Now that you have a basic understanding of how Træfik can automatically create the routes to your services and load balance them, it might be time to dive into [the documentation](/) and let Træfik work for you! -Whatever your infrastructure is, there is probably [an available Træfik provider](/#supported-providers) that will do the job. +Now that you have a basic understanding of how Traefik can automatically create the routes to your services and load balance them, it might be time to dive into [the documentation](/) and let Traefik work for you! +Whatever your infrastructure is, there is probably [an available Traefik provider](/#supported-providers) that will do the job. -Our recommendation would be to see for yourself how simple it is to enable HTTPS with [Træfik's let's encrypt integration](/user-guide/examples/#lets-encrypt-support) using the dedicated [user guide](/user-guide/docker-and-lets-encrypt/). +Our recommendation would be to see for yourself how simple it is to enable HTTPS with [Traefik's let's encrypt integration](/user-guide/examples/#lets-encrypt-support) using the dedicated [user guide](/user-guide/docker-and-lets-encrypt/). ## Resources Here is a talk given by [Emile Vauge](https://github.com/emilevauge) at [GopherCon 2017](https://gophercon.com). -You will learn Træfik basics in less than 10 minutes. +You will learn Traefik basics in less than 10 minutes. [![Traefik GopherCon 2017](https://img.youtube.com/vi/RgudiksfL-k/0.jpg)](https://www.youtube.com/watch?v=RgudiksfL-k) Here is a talk given by [Ed Robinson](https://github.com/errm) at [ContainerCamp UK](https://container.camp) conference. -You will learn fundamental Træfik features and see some demos with Kubernetes. +You will learn fundamental Traefik features and see some demos with Kubernetes. [![Traefik ContainerCamp UK](https://img.youtube.com/vi/aFtpIShV60I/0.jpg)](https://www.youtube.com/watch?v=aFtpIShV60I) @@ -217,5 +217,5 @@ Reported vulnerabilities can be found on ### Report a Vulnerability -We want to keep Træfik safe for everyone. -If you've discovered a security vulnerability in Træfik, we appreciate your help in disclosing it to us in a responsible manner, using [this form](https://security.traefik.io). +We want to keep Traefik safe for everyone. +If you've discovered a security vulnerability in Traefik, we appreciate your help in disclosing it to us in a responsible manner, using [this form](https://security.traefik.io). diff --git a/docs/user-guide/cluster-docker-consul.md b/docs/user-guide/cluster-docker-consul.md index d9e8417bb..bdd6d73d1 100644 --- a/docs/user-guide/cluster-docker-consul.md +++ b/docs/user-guide/cluster-docker-consul.md @@ -1,15 +1,15 @@ # Clustering / High Availability on Docker Swarm with Consul -This guide explains how to use Træfik in high availability mode in a Docker Swarm and with Let's Encrypt. +This guide explains how to use Traefik in high availability mode in a Docker Swarm and with Let's Encrypt. -Why do we need Træfik in cluster mode? Running multiple instances should work out of the box? +Why do we need Traefik in cluster mode? Running multiple instances should work out of the box? -If you want to use Let's Encrypt with Træfik, sharing configuration or TLS certificates between many Træfik instances, you need Træfik cluster/HA. +If you want to use Let's Encrypt with Traefik, sharing configuration or TLS certificates between many Traefik instances, you need Traefik cluster/HA. Ok, could we mount a shared volume used by all my instances? Yes, you can, but it will not work. When you use Let's Encrypt, you need to store certificates, but not only. -When Træfik generates a new certificate, it configures a challenge and once Let's Encrypt will verify the ownership of the domain, it will ping back the challenge. -If the challenge is not known by other Træfik instances, the validation will fail. +When Traefik generates a new certificate, it configures a challenge and once Let's Encrypt will verify the ownership of the domain, it will ping back the challenge. +If the challenge is not known by other Traefik instances, the validation will fail. For more information about the challenge: [Automatic Certificate Management Environment (ACME)](https://github.com/ietf-wg-acme/acme/blob/master/draft-ietf-acme-acme.md#http-challenge) @@ -17,12 +17,12 @@ For more information about the challenge: [Automatic Certificate Management Envi You will need a working Docker Swarm cluster. -## Træfik configuration +## Traefik configuration In this guide, we will not use a TOML configuration file, but only command line flag. With that, we can use the base image without mounting configuration file or building custom image. -What Træfik should do: +What Traefik should do: - Listen to 80 and 443 - Redirect HTTP traffic to HTTPS @@ -64,7 +64,7 @@ Let's Encrypt needs 4 parameters: an TLS entry point to listen to, a non-TLS ent To enable Let's Encrypt support, you need to add `--acme` flag. -Now, Træfik needs to know where to store the certificates, we can choose between a key in a Key-Value store, or a file path: `--acme.storage=my/key` or `--acme.storage=/path/to/acme.json`. +Now, Traefik needs to know where to store the certificates, we can choose between a key in a Key-Value store, or a file path: `--acme.storage=my/key` or `--acme.storage=/path/to/acme.json`. The `acme.httpChallenge.entryPoint` flag enables the `HTTP-01` challenge and specifies the entryPoint to use during the challenges. @@ -143,9 +143,9 @@ networks: ## Migrate configuration to Consul -We created a special Træfik command to help configuring your Key Value store from a Træfik TOML configuration file and/or CLI flags. +We created a special Traefik command to help configuring your Key Value store from a Traefik TOML configuration file and/or CLI flags. -## Deploy a Træfik cluster +## Deploy a Traefik cluster The best way we found is to have an initializer service. This service will push the config to Consul via the `storeconfig` sub-command. @@ -173,7 +173,7 @@ The initializer in a docker-compose file will be: - consul ``` -And now, the Træfik part will only have the Consul configuration. +And now, the Traefik part will only have the Consul configuration. ```yaml traefik: @@ -189,10 +189,10 @@ And now, the Træfik part will only have the Consul configuration. ``` !!! note - For Træfik <1.5.0 add `acme.storage=traefik/acme/account` because Træfik is not reading it from Consul. + For Traefik <1.5.0 add `acme.storage=traefik/acme/account` because Traefik is not reading it from Consul. If you have some update to do, update the initializer service and re-deploy it. -The new configuration will be stored in Consul, and you need to restart the Træfik node: `docker service update --force traefik_traefik`. +The new configuration will be stored in Consul, and you need to restart the Traefik node: `docker service update --force traefik_traefik`. ## Full docker-compose file diff --git a/docs/user-guide/cluster.md b/docs/user-guide/cluster.md index 8c03d2798..b08d2b937 100644 --- a/docs/user-guide/cluster.md +++ b/docs/user-guide/cluster.md @@ -1,8 +1,8 @@ # Clustering / High Availability (beta) -This guide explains how to use Træfik in high availability mode. +This guide explains how to use Traefik in high availability mode. -In order to deploy and configure multiple Træfik instances, without copying the same configuration file on each instance, we will use a distributed Key-Value store. +In order to deploy and configure multiple Traefik instances, without copying the same configuration file on each instance, we will use a distributed Key-Value store. ## Prerequisites @@ -11,23 +11,23 @@ _(Currently, we recommend [Consul](https://consul.io) .)_ ## File configuration to KV store migration -We created a special Træfik command to help configuring your Key Value store from a Træfik TOML configuration file. +We created a special Traefik command to help configuring your Key Value store from a Traefik TOML configuration file. Please refer to [this section](/user-guide/kv-config/#store-configuration-in-key-value-store) to get more details. -## Deploy a Træfik cluster +## Deploy a Traefik cluster -Once your Træfik configuration is uploaded on your KV store, you can start each Træfik instance. +Once your Traefik configuration is uploaded on your KV store, you can start each Traefik instance. -A Træfik cluster is based on a manager/worker model. +A Traefik cluster is based on a manager/worker model. -When starting, Træfik will elect a manager. +When starting, Traefik will elect a manager. If this instance fails, another manager will be automatically elected. -## Træfik cluster and Let's Encrypt +## Traefik cluster and Let's Encrypt **In cluster mode, ACME certificates have to be stored in [a KV Store entry](/configuration/acme/#as-a-key-value-store-entry).** -Thanks to the Træfik cluster mode algorithm (based on [the Raft Consensus Algorithm](https://raft.github.io/)), only one instance will contact Let's encrypt to solve the challenges. +Thanks to the Traefik cluster mode algorithm (based on [the Raft Consensus Algorithm](https://raft.github.io/)), only one instance will contact Let's encrypt to solve the challenges. The others instances will get ACME certificate from the KV Store entry. diff --git a/docs/user-guide/docker-and-lets-encrypt.md b/docs/user-guide/docker-and-lets-encrypt.md index 9c3c95b49..daf94d907 100644 --- a/docs/user-guide/docker-and-lets-encrypt.md +++ b/docs/user-guide/docker-and-lets-encrypt.md @@ -1,8 +1,8 @@ # Let's Encrypt & Docker -In this use case, we want to use Træfik as a _layer-7_ load balancer with SSL termination for a set of micro-services used to run a web application. +In this use case, we want to use Traefik as a _layer-7_ load balancer with SSL termination for a set of micro-services used to run a web application. -We also want to automatically _discover any services_ on the Docker host and let Træfik reconfigure itself automatically when containers get created (or shut down) so HTTP traffic can be routed accordingly. +We also want to automatically _discover any services_ on the Docker host and let Traefik reconfigure itself automatically when containers get created (or shut down) so HTTP traffic can be routed accordingly. In addition, we want to use Let's Encrypt to automatically generate and renew SSL certificates per hostname. @@ -19,7 +19,7 @@ In real-life, you'll want to use your own domain and have the DNS configured acc Docker containers can only communicate with each other over TCP when they share at least one network. This makes sense from a topological point of view in the context of networking, since Docker under the hood creates IPTable rules so containers can't reach other containers _unless you'd want to_. -In this example, we're going to use a single network called `web` where all containers that are handling HTTP traffic (including Træfik) will reside in. +In this example, we're going to use a single network called `web` where all containers that are handling HTTP traffic (including Traefik) will reside in. On the Docker host, run the following command: @@ -27,7 +27,7 @@ On the Docker host, run the following command: docker network create web ``` -Now, let's create a directory on the server where we will configure the rest of Træfik: +Now, let's create a directory on the server where we will configure the rest of Traefik: ```shell mkdir -p /opt/traefik @@ -41,7 +41,7 @@ touch /opt/traefik/acme.json && chmod 600 /opt/traefik/acme.json touch /opt/traefik/traefik.toml ``` -The `docker-compose.yml` file will provide us with a simple, consistent and more importantly, a deterministic way to create Træfik. +The `docker-compose.yml` file will provide us with a simple, consistent and more importantly, a deterministic way to create Traefik. The contents of the file is as follows: @@ -69,12 +69,12 @@ networks: ``` As you can see, we're mounting the `traefik.toml` file as well as the (empty) `acme.json` file in the container. -Also, we're mounting the `/var/run/docker.sock` Docker socket in the container as well, so Træfik can listen to Docker events and reconfigure its own internal configuration when containers are created (or shut down). +Also, we're mounting the `/var/run/docker.sock` Docker socket in the container as well, so Traefik can listen to Docker events and reconfigure its own internal configuration when containers are created (or shut down). Also, we're making sure the container is automatically restarted by the Docker engine in case of problems (or: if the server is rebooted). We're publishing the default HTTP ports `80` and `443` on the host, and making sure the container is placed within the `web` network we've created earlier on. Finally, we're giving this container a static name called `traefik`. -Let's take a look at a simple `traefik.toml` configuration as well before we'll create the Træfik container: +Let's take a look at a simple `traefik.toml` configuration as well before we'll create the Traefik container: ```toml debug = false @@ -111,17 +111,17 @@ entryPoint = "http" This is the minimum configuration required to do the following: - Log `ERROR`-level messages (or more severe) to the console, but silence `DEBUG`-level messages -- Check for new versions of Træfik periodically +- Check for new versions of Traefik periodically - Create two entry points, namely an `HTTP` endpoint on port `80`, and an `HTTPS` endpoint on port `443` where all incoming traffic on port `80` will immediately get redirected to `HTTPS`. -- Enable the Docker provider and listen for container events on the Docker unix socket we've mounted earlier. However, **new containers will not be exposed by Træfik by default, we'll get into this in a bit!** +- Enable the Docker provider and listen for container events on the Docker unix socket we've mounted earlier. However, **new containers will not be exposed by Traefik by default, we'll get into this in a bit!** - Enable automatic request and configuration of SSL certificates using Let's Encrypt. These certificates will be stored in the `acme.json` file, which you can back-up yourself and store off-premises. -Alright, let's boot the container. From the `/opt/traefik` directory, run `docker-compose up -d` which will create and start the Træfik container. +Alright, let's boot the container. From the `/opt/traefik` directory, run `docker-compose up -d` which will create and start the Traefik container. ## Exposing Web Services to the Outside World -Now that we've fully configured and started Træfik, it's time to get our applications running! +Now that we've fully configured and started Traefik, it's time to get our applications running! Let's take a simple example of a micro-service project consisting of various services, where some will be exposed to the outside world and some will not. @@ -195,10 +195,10 @@ Since the `traefik` container we've created and started earlier is also attached ### Labels -As mentioned earlier, we don't want containers exposed automatically by Træfik. +As mentioned earlier, we don't want containers exposed automatically by Traefik. The reason behind this is simple: we want to have control over this process ourselves. -Thanks to Docker labels, we can tell Træfik how to create its internal routing configuration. +Thanks to Docker labels, we can tell Traefik how to create its internal routing configuration. Let's take a look at the labels themselves for the `app` service, which is a HTTP webservice listing on port 9000: @@ -219,13 +219,13 @@ We use both `container labels` and `service labels`. First, we specify the `backend` name which corresponds to the actual service we're routing **to**. -We also tell Træfik to use the `web` network to route HTTP traffic to this container. -With the `traefik.enable` label, we tell Træfik to include this container in its internal configuration. +We also tell Traefik to use the `web` network to route HTTP traffic to this container. +With the `traefik.enable` label, we tell Traefik to include this container in its internal configuration. -With the `frontend.rule` label, we tell Træfik that we want to route to this container if the incoming HTTP request contains the `Host` `app.my-awesome-app.org`. +With the `frontend.rule` label, we tell Traefik that we want to route to this container if the incoming HTTP request contains the `Host` `app.my-awesome-app.org`. Essentially, this is the actual rule used for Layer-7 load balancing. -Finally but not unimportantly, we tell Træfik to route **to** port `9000`, since that is the actual TCP/IP port the container actually listens on. +Finally but not unimportantly, we tell Traefik to route **to** port `9000`, since that is the actual TCP/IP port the container actually listens on. ### Service labels @@ -238,25 +238,25 @@ In the example, two service names are defined : `basic` and `admin`. They allow creating two frontends and two backends. - `basic` has only one `service label` : `traefik.basic.protocol`. -Træfik will use values set in `traefik.frontend.rule` and `traefik.port` to create the `basic` frontend and backend. +Traefik will use values set in `traefik.frontend.rule` and `traefik.port` to create the `basic` frontend and backend. The frontend listens to incoming HTTP requests which contain the `Host` `app.my-awesome-app.org` and redirect them in `HTTP` to the port `9000` of the backend. - `admin` has all the `services labels` needed to create the `admin` frontend and backend (`traefik.admin.frontend.rule`, `traefik.admin.protocol`, `traefik.admin.port`). -Træfik will create a frontend to listen to incoming HTTP requests which contain the `Host` `admin-app.my-awesome-app.org` and redirect them in `HTTPS` to the port `9443` of the backend. +Traefik will create a frontend to listen to incoming HTTP requests which contain the `Host` `admin-app.my-awesome-app.org` and redirect them in `HTTPS` to the port `9443` of the backend. #### Gotchas and tips - Always specify the correct port where the container expects HTTP traffic using `traefik.port` label. - If a container exposes multiple ports, Træfik may forward traffic to the wrong port. + If a container exposes multiple ports, Traefik may forward traffic to the wrong port. Even if a container only exposes one port, you should always write configuration defensively and explicitly. -- Should you choose to enable the `exposedByDefault` flag in the `traefik.toml` configuration, be aware that all containers that are placed in the same network as Træfik will automatically be reachable from the outside world, for everyone and everyone to see. +- Should you choose to enable the `exposedByDefault` flag in the `traefik.toml` configuration, be aware that all containers that are placed in the same network as Traefik will automatically be reachable from the outside world, for everyone and everyone to see. Usually, this is a bad idea. -- With the `traefik.frontend.auth.basic` label, it's possible for Træfik to provide a HTTP basic-auth challenge for the endpoints you provide the label for. -- Træfik has built-in support to automatically export [Prometheus](https://prometheus.io) metrics -- Træfik supports websockets out of the box. In the example above, the `events`-service could be a NodeJS-based application which allows clients to connect using websocket protocol. +- With the `traefik.frontend.auth.basic` label, it's possible for Traefik to provide a HTTP basic-auth challenge for the endpoints you provide the label for. +- Traefik has built-in support to automatically export [Prometheus](https://prometheus.io) metrics +- Traefik supports websockets out of the box. In the example above, the `events`-service could be a NodeJS-based application which allows clients to connect using websocket protocol. Thanks to the fact that HTTPS in our example is enforced, these websockets are automatically secure as well (WSS) ### Final thoughts -Using Træfik as a Layer-7 load balancer in combination with both Docker and Let's Encrypt provides you with an extremely flexible, powerful and self-configuring solution for your projects. +Using Traefik as a Layer-7 load balancer in combination with both Docker and Let's Encrypt provides you with an extremely flexible, powerful and self-configuring solution for your projects. With Let's Encrypt, your endpoints are automatically secured with production-ready SSL certificates that are renewed automatically as well. diff --git a/docs/user-guide/examples.md b/docs/user-guide/examples.md index d8a8e8698..2d5ab03a8 100644 --- a/docs/user-guide/examples.md +++ b/docs/user-guide/examples.md @@ -1,6 +1,6 @@ # Examples -You will find here some configuration examples of Træfik. +You will find here some configuration examples of Traefik. ## HTTP only @@ -87,7 +87,7 @@ entryPoint = "https" This configuration allows generating Let's Encrypt certificates (thanks to `HTTP-01` challenge) for the four domains `local[1-4].com` with described SANs. -Træfik generates these certificates when it starts and it needs to be restart if new domains are added. +Traefik generates these certificates when it starts and it needs to be restart if new domains are added. ### onHostRule option (with HTTP challenge) @@ -122,9 +122,9 @@ entryPoint = "https" This configuration allows generating Let's Encrypt certificates (thanks to `HTTP-01` challenge) for the four domains `local[1-4].com`. -Træfik generates these certificates when it starts. +Traefik generates these certificates when it starts. -If a backend is added with a `onHost` rule, Træfik will automatically generate the Let's Encrypt certificate for the new domain (for frontends wired on the `acme.entryPoint`). +If a backend is added with a `onHost` rule, Traefik will automatically generate the Let's Encrypt certificate for the new domain (for frontends wired on the `acme.entryPoint`). ### OnDemand option (with HTTP challenge) @@ -186,7 +186,7 @@ entryPoint = "https" ``` DNS challenge needs environment variables to be executed. -These variables have to be set on the machine/container that host Træfik. +These variables have to be set on the machine/container that host Traefik. These variables are described [in this section](/configuration/acme/#provider). @@ -219,7 +219,7 @@ entryPoint = "https" ``` DNS challenge needs environment variables to be executed. -These variables have to be set on the machine/container that host Træfik. +These variables have to be set on the machine/container that host Traefik. These variables are described [in this section](/configuration/acme/#provider). @@ -248,7 +248,7 @@ entryPoint = "https" entryPoint = "http" ``` -Træfik will only try to generate a Let's encrypt certificate (thanks to `HTTP-01` challenge) if the domain cannot be checked by the provided certificates. +Traefik will only try to generate a Let's encrypt certificate (thanks to `HTTP-01` challenge) if the domain cannot be checked by the provided certificates. ### Cluster mode diff --git a/docs/user-guide/grpc.md b/docs/user-guide/grpc.md index 55ba25bc9..d1ad358c9 100644 --- a/docs/user-guide/grpc.md +++ b/docs/user-guide/grpc.md @@ -4,9 +4,9 @@ This section explains how to use Traefik as reverse proxy for gRPC application. -### Træfik configuration +### Traefik configuration -At last, we configure our Træfik instance to use both self-signed certificates. +At last, we configure our Traefik instance to use both self-signed certificates. ```toml defaultEntryPoints = ["https"] @@ -39,7 +39,7 @@ defaultEntryPoints = ["https"] ### Conclusion -We don't need specific configuration to use gRPC in Træfik, we just need to use `h2c` protocol, or use HTTPS communications to have HTTP2 with the backend. +We don't need specific configuration to use gRPC in Traefik, we just need to use `h2c` protocol, or use HTTPS communications to have HTTP2 with the backend. ## With HTTPS @@ -75,9 +75,9 @@ with Common Name (e.g. server FQDN or YOUR name) []: frontend.local ``` -### Træfik configuration +### Traefik configuration -At last, we configure our Træfik instance to use both self-signed certificates. +At last, we configure our Traefik instance to use both self-signed certificates. ```toml defaultEntryPoints = ["https"] @@ -152,7 +152,7 @@ err := s.Serve(lis) // ... ``` -Next we will modify gRPC Client to use our Træfik self-signed certificate: +Next we will modify gRPC Client to use our Traefik self-signed certificate: ```go // ... diff --git a/docs/user-guide/kubernetes.md b/docs/user-guide/kubernetes.md index e08e796c7..e8b196760 100644 --- a/docs/user-guide/kubernetes.md +++ b/docs/user-guide/kubernetes.md @@ -1,6 +1,6 @@ # Kubernetes Ingress Controller -This guide explains how to use Træfik as an Ingress controller for a Kubernetes cluster. +This guide explains how to use Traefik as an Ingress controller for a Kubernetes cluster. If you are not familiar with Ingresses in Kubernetes you might want to read the [Kubernetes user guide](https://kubernetes.io/docs/concepts/services-networking/ingress/) @@ -19,12 +19,12 @@ The config files used in this guide can be found in the [examples directory](htt Kubernetes introduces [Role Based Access Control (RBAC)](https://kubernetes.io/docs/reference/access-authn-authz/rbac/) in 1.6+ to allow fine-grained control of Kubernetes resources and API. -If your cluster is configured with RBAC, you will need to authorize Træfik to use the Kubernetes API. There are two ways to set up the proper permission: Via namespace-specific RoleBindings or a single, global ClusterRoleBinding. +If your cluster is configured with RBAC, you will need to authorize Traefik to use the Kubernetes API. There are two ways to set up the proper permission: Via namespace-specific RoleBindings or a single, global ClusterRoleBinding. -RoleBindings per namespace enable to restrict granted permissions to the very namespaces only that Træfik is watching over, thereby following the least-privileges principle. This is the preferred approach if Træfik is not supposed to watch all namespaces, and the set of namespaces does not change dynamically. Otherwise, a single ClusterRoleBinding must be employed. +RoleBindings per namespace enable to restrict granted permissions to the very namespaces only that Traefik is watching over, thereby following the least-privileges principle. This is the preferred approach if Traefik is not supposed to watch all namespaces, and the set of namespaces does not change dynamically. Otherwise, a single ClusterRoleBinding must be employed. !!! note - RoleBindings per namespace are available in Træfik 1.5 and later. Please use ClusterRoleBindings for older versions. + RoleBindings per namespace are available in Traefik 1.5 and later. Please use ClusterRoleBindings for older versions. For the sake of simplicity, this guide will use a ClusterRoleBinding: @@ -74,11 +74,11 @@ subjects: kubectl apply -f https://raw.githubusercontent.com/containous/traefik/master/examples/k8s/traefik-rbac.yaml ``` -For namespaced restrictions, one RoleBinding is required per watched namespace along with a corresponding configuration of Træfik's `kubernetes.namespaces` parameter. +For namespaced restrictions, one RoleBinding is required per watched namespace along with a corresponding configuration of Traefik's `kubernetes.namespaces` parameter. -## Deploy Træfik using a Deployment or DaemonSet +## Deploy Traefik using a Deployment or DaemonSet -It is possible to use Træfik with a [Deployment](https://kubernetes.io/docs/concepts/workloads/controllers/deployment/) or a [DaemonSet](https://kubernetes.io/docs/concepts/workloads/controllers/daemonset/) object, +It is possible to use Traefik with a [Deployment](https://kubernetes.io/docs/concepts/workloads/controllers/deployment/) or a [DaemonSet](https://kubernetes.io/docs/concepts/workloads/controllers/daemonset/) object, whereas both options have their own pros and cons: - The scalability can be much better when using a Deployment, because you will have a Single-Pod-per-Node model when using a DaemonSet, whereas you may need less replicas based on your environment when using a Deployment. @@ -221,7 +221,7 @@ spec: !!! note This will create a Daemonset that uses privileged ports 80/8080 on the host. This may not work on all providers, but illustrates the static (non-NodePort) hostPort binding. The `traefik-ingress-service` can still be used inside the cluster to access the DaemonSet pods. -To deploy Træfik to your cluster start by submitting one of the YAML files to the cluster with `kubectl`: +To deploy Traefik to your cluster start by submitting one of the YAML files to the cluster with `kubectl`: ```shell kubectl apply -f https://raw.githubusercontent.com/containous/traefik/master/examples/k8s/traefik-deployment.yaml @@ -257,14 +257,14 @@ traefik-ingress-controller-678226159-eqseo 1/1 Running 0 7m ``` You should see that after submitting the Deployment or DaemonSet to Kubernetes it has launched a Pod, and it is now running. -_It might take a few moments for Kubernetes to pull the Træfik image and start the container._ +_It might take a few moments for Kubernetes to pull the Traefik image and start the container._ !!! note You could also check the deployment with the Kubernetes dashboard, run `minikube dashboard` to open it in your browser, then choose the `kube-system` namespace from the menu at the top right of the screen. -You should now be able to access Træfik on port 80 of your Minikube instance when using the DaemonSet: +You should now be able to access Traefik on port 80 of your Minikube instance when using the DaemonSet: ```shell curl $(minikube ip) @@ -285,23 +285,23 @@ curl $(minikube ip): ``` !!! note - We expect to see a 404 response here as we haven't yet given Træfik any configuration. + We expect to see a 404 response here as we haven't yet given Traefik any configuration. All further examples below assume a DaemonSet installation. Deployment users will need to append the NodePort when constructing requests. -## Deploy Træfik using Helm Chart +## Deploy Traefik using Helm Chart !!! note - The Helm Chart is maintained by the community, not the Træfik project maintainers. + The Helm Chart is maintained by the community, not the Traefik project maintainers. -Instead of installing Træfik via Kubernetes object directly, you can also use the Træfik Helm chart. +Instead of installing Traefik via Kubernetes object directly, you can also use the Traefik Helm chart. -Install the Træfik chart by: +Install the Traefik chart by: ```shell helm install stable/traefik ``` -Install the Træfik chart using a values.yaml file. +Install the Traefik chart using a values.yaml file. ```shell helm install --values values.yaml stable/traefik @@ -320,7 +320,7 @@ For more information, check out [the documentation](https://github.com/kubernete ## Submitting an Ingress to the Cluster -Lets start by creating a Service and an Ingress that will expose the [Træfik Web UI](https://github.com/containous/traefik#web-ui). +Lets start by creating a Service and an Ingress that will expose the [Traefik Web UI](https://github.com/containous/traefik#web-ui). ```yaml apiVersion: v1 @@ -367,7 +367,7 @@ You can get the IP address of your minikube instance by running `minikube ip`: echo "$(minikube ip) traefik-ui.minikube" | sudo tee -a /etc/hosts ``` -We should now be able to visit [traefik-ui.minikube](http://traefik-ui.minikube) in the browser and view the Træfik web UI. +We should now be able to visit [traefik-ui.minikube](http://traefik-ui.minikube) in the browser and view the Traefik web UI. ### Add a TLS Certificate to the Ingress @@ -421,7 +421,7 @@ If there are any errors while loading the TLS section of an ingress, the whole i ## Basic Authentication -It's possible to protect access to Træfik through basic authentication. (See the [Kubernetes Ingress](/configuration/backends/kubernetes) configuration page for syntactical details and restrictions.) +It's possible to protect access to Traefik through basic authentication. (See the [Kubernetes Ingress](/configuration/backends/kubernetes) configuration page for syntactical details and restrictions.) ### Creating the Secret @@ -677,7 +677,7 @@ spec: kubectl apply -f https://raw.githubusercontent.com/containous/traefik/master/examples/k8s/cheese-ingress.yaml ``` -Now visit the [Træfik dashboard](http://traefik-ui.minikube/) and you should see a frontend for each host. +Now visit the [Traefik dashboard](http://traefik-ui.minikube/) and you should see a frontend for each host. Along with a backend listing for each service with a server set up for each pod. If you edit your `/etc/hosts` again you should be able to access the cheese websites in your browser. @@ -726,7 +726,7 @@ spec: [examples/k8s/cheeses-ingress.yaml](https://github.com/containous/traefik/tree/master/examples/k8s/cheeses-ingress.yaml) !!! note - We are configuring Træfik to strip the prefix from the url path with the `traefik.frontend.rule.type` annotation so that we can use the containers from the previous example without modification. + We are configuring Traefik to strip the prefix from the url path with the `traefik.frontend.rule.type` annotation so that we can use the containers from the previous example without modification. ```shell kubectl apply -f https://raw.githubusercontent.com/containous/traefik/master/examples/k8s/cheeses-ingress.yaml @@ -744,7 +744,7 @@ You should now be able to visit the websites in your browser. ## Multiple Ingress Definitions for the Same Host (or Host+Path) -Træfik will merge multiple Ingress definitions for the same host/path pair into one definition. +Traefik will merge multiple Ingress definitions for the same host/path pair into one definition. Let's say the number of cheese services is growing. It is now time to move the cheese services to a dedicated cheese namespace to simplify the managements of cheese and non-cheese services. @@ -771,7 +771,7 @@ spec: servicePort: http ``` -Træfik will now look for cheddar service endpoints (ports on healthy pods) in both the cheese and the default namespace. +Traefik will now look for cheddar service endpoints (ports on healthy pods) in both the cheese and the default namespace. Deploying cheddar into the cheese namespace and afterwards shutting down cheddar in the default namespace is enough to migrate the traffic. !!! note @@ -824,12 +824,12 @@ Note that priority values must be quoted to avoid numeric interpretation (which ## Forwarding to ExternalNames When specifying an [ExternalName](https://kubernetes.io/docs/concepts/services-networking/service/#services-without-selectors), -Træfik will forward requests to the given host accordingly and use HTTPS when the Service port matches 443. +Traefik will forward requests to the given host accordingly and use HTTPS when the Service port matches 443. This still requires setting up a proper port mapping on the Service from the Ingress port to the (external) Service port. ## Disable passing the Host Header -By default Træfik will pass the incoming Host header to the upstream resource. +By default Traefik will pass the incoming Host header to the upstream resource. However, there are times when you may not want this to be the case. For example, if your service is of the ExternalName type. @@ -889,38 +889,38 @@ If you were to visit `example.com/static` the request would then be passed on to ## Partitioning the Ingress object space -By default, Træfik processes every Ingress objects it observes. At times, however, it may be desirable to ignore certain objects. The following sub-sections describe common use cases and how they can be handled with Træfik. +By default, Traefik processes every Ingress objects it observes. At times, however, it may be desirable to ignore certain objects. The following sub-sections describe common use cases and how they can be handled with Traefik. -### Between Træfik and other Ingress controller implementations +### Between Traefik and other Ingress controller implementations -Sometimes Træfik runs along other Ingress controller implementations. One such example is when both Træfik and a cloud provider Ingress controller are active. +Sometimes Traefik runs along other Ingress controller implementations. One such example is when both Traefik and a cloud provider Ingress controller are active. -The `kubernetes.io/ingress.class` annotation can be attached to any Ingress object in order to control whether Træfik should handle it. +The `kubernetes.io/ingress.class` annotation can be attached to any Ingress object in order to control whether Traefik should handle it. -If the annotation is missing, contains an empty value, or the value `traefik`, then the Træfik controller will take responsibility and process the associated Ingress object. +If the annotation is missing, contains an empty value, or the value `traefik`, then the Traefik controller will take responsibility and process the associated Ingress object. -It is also possible to set the `ingressClass` option in Træfik to a particular value. Træfik will only process matching Ingress objects. -For instance, setting the option to `traefik-internal` causes Træfik to process Ingress objects with the same `kubernetes.io/ingress.class` annotation value, ignoring all other objects (including those with a `traefik` value, empty value, and missing annotation). +It is also possible to set the `ingressClass` option in Traefik to a particular value. Traefik will only process matching Ingress objects. +For instance, setting the option to `traefik-internal` causes Traefik to process Ingress objects with the same `kubernetes.io/ingress.class` annotation value, ignoring all other objects (including those with a `traefik` value, empty value, and missing annotation). !!! note Letting multiple ingress controllers handle the same ingress objects can lead to unintended behavior. It is recommended to prefix all ingressClass values with `traefik` to avoid unintended collisions with other ingress implementations. -### Between multiple Træfik Deployments +### Between multiple Traefik Deployments -Sometimes multiple Træfik Deployments are supposed to run concurrently. +Sometimes multiple Traefik Deployments are supposed to run concurrently. For instance, it is conceivable to have one Deployment deal with internal and another one with external traffic. -For such cases, it is advisable to classify Ingress objects through a label and configure the `labelSelector` option per each Træfik Deployment accordingly. +For such cases, it is advisable to classify Ingress objects through a label and configure the `labelSelector` option per each Traefik Deployment accordingly. To stick with the internal/external example above, all Ingress objects meant for internal traffic could receive a `traffic-type: internal` label while objects designated for external traffic receive a `traffic-type: external` label. -The label selectors on the Træfik Deployments would then be `traffic-type=internal` and `traffic-type=external`, respectively. +The label selectors on the Traefik Deployments would then be `traffic-type=internal` and `traffic-type=external`, respectively. ## Traffic Splitting It is possible to split Ingress traffic in a fine-grained manner between multiple deployments using _service weights_. One canonical use case is canary releases where a deployment representing a newer release is to receive an initially small but ever-increasing fraction of the requests over time. -The way this can be done in Træfik is to specify a percentage of requests that should go into each deployment. +The way this can be done in Traefik is to specify a percentage of requests that should go into each deployment. For instance, say that an application `my-app` runs in version 1. A newer version 2 is about to be released, but confidence in the robustness and reliability of new version running in production can only be gained gradually. @@ -953,7 +953,7 @@ spec: ``` Take note of the `traefik.ingress.kubernetes.io/service-weights` annotation: It specifies the distribution of requests among the referenced backend services, `my-app` and `my-app-canary`. -With this definition, Træfik will route 99% of the requests to the pods backed by the `my-app` deployment, and 1% to those backed by `my-app-canary`. +With this definition, Traefik will route 99% of the requests to the pods backed by the `my-app` deployment, and 1% to those backed by `my-app-canary`. Over time, the ratio may slowly shift towards the canary deployment until it is deemed to replace the previous main application, in steps such as 5%/95%, 10%/90%, 50%/50%, and finally 100%/0%. A few conditions must hold for service weights to be applied correctly: @@ -1006,7 +1006,7 @@ The examples shown deliberately do not specify any [resource limitations](https: In a production environment, however, it is important to set proper bounds, especially with regards to CPU: -- too strict and Træfik will be throttled while serving requests (as Kubernetes imposes hard quotas) -- too loose and Træfik may waste resources not available for other containers +- too strict and Traefik will be throttled while serving requests (as Kubernetes imposes hard quotas) +- too loose and Traefik may waste resources not available for other containers When in doubt, you should measure your resource needs, and adjust requests and limits accordingly. diff --git a/docs/user-guide/kv-config.md b/docs/user-guide/kv-config.md index 7fe28ee73..47dba912e 100644 --- a/docs/user-guide/kv-config.md +++ b/docs/user-guide/kv-config.md @@ -2,9 +2,9 @@ Both [static global configuration](/user-guide/kv-config/#static-configuration-in-key-value-store) and [dynamic](/user-guide/kv-config/#dynamic-configuration-in-key-value-store) configuration can be stored in a Key-value store. -This section explains how to launch Træfik using a configuration loaded from a Key-value store. +This section explains how to launch Traefik using a configuration loaded from a Key-value store. -Træfik supports several Key-value stores: +Traefik supports several Key-value stores: - [Consul](https://consul.io) - [etcd](https://coreos.com/etcd/) @@ -20,7 +20,7 @@ We will see the steps to set it up with an easy example. ### docker-compose file for Consul -The Træfik global configuration will be retrieved from a [Consul](https://consul.io) store. +The Traefik global configuration will be retrieved from a [Consul](https://consul.io) store. First we have to launch Consul in a container. @@ -56,11 +56,11 @@ whoami4: ### Upload the configuration in the Key-value store -We should now fill the store with the Træfik global configuration. +We should now fill the store with the Traefik global configuration. To do that, we can send the Key-value pairs via [curl commands](https://www.consul.io/intro/getting-started/kv.html) or via the [Web UI](https://www.consul.io/intro/getting-started/ui.html). -Fortunately, Træfik allows automation of this process using the `storeconfig` subcommand. -Please refer to the [store Træfik configuration](/user-guide/kv-config/#store-configuration-in-key-value-store) section to get documentation on it. +Fortunately, Traefik allows automation of this process using the `storeconfig` subcommand. +Please refer to the [store Traefik configuration](/user-guide/kv-config/#store-configuration-in-key-value-store) section to get documentation on it. Here is the toml configuration we would like to store in the Key-value Store : @@ -128,11 +128,11 @@ In case you are setting key values manually: Note that we can either give path to certificate file or directly the file content itself. -### Launch Træfik +### Launch Traefik -We will now launch Træfik in a container. +We will now launch Traefik in a container. -We use CLI flags to setup the connection between Træfik and Consul. +We use CLI flags to setup the connection between Traefik and Consul. All the rest of the global configuration is stored in Consul. Here is the [docker-compose file](https://docs.docker.com/compose/compose-file/) : @@ -156,7 +156,7 @@ This variable must be initialized with the ACL token value. If Traefik is launched into a Docker container, the variable `CONSUL_HTTP_TOKEN` can be initialized with the `-e` Docker option : `-e "CONSUL_HTTP_TOKEN=[consul-acl-token-value]"` -If a Consul ACL is used to restrict Træfik read/write access, one of the following configurations is needed. +If a Consul ACL is used to restrict Traefik read/write access, one of the following configurations is needed. - HCL format : @@ -199,7 +199,7 @@ So far, only [Consul](https://consul.io) and [etcd](https://coreos.com/etcd/) su To set it up, we should enable [consul security](https://www.consul.io/docs/internals/security.html) (or [etcd security](https://coreos.com/etcd/docs/latest/security.html)). -Then, we have to provide CA, Cert and Key to Træfik using `consul` flags : +Then, we have to provide CA, Cert and Key to Traefik using `consul` flags : - `--consul.tls` - `--consul.tls.ca=path/to/the/file` @@ -220,10 +220,10 @@ Remember the command `traefik --help` to display the updated list of flags. ## Dynamic configuration in Key-value store -Following our example, we will provide backends/frontends rules and HTTPS certificates to Træfik. +Following our example, we will provide backends/frontends rules and HTTPS certificates to Traefik. !!! note - This section is independent of the way Træfik got its static configuration. + This section is independent of the way Traefik got its static configuration. It means that the static configuration can either come from the same Key-value store or from any other sources. ### Key-value storage structure @@ -360,21 +360,21 @@ And there, the same dynamic configuration in a KV Store (using `prefix = "traefi ### Atomic configuration changes -Træfik can watch the backends/frontends configuration changes and generate its configuration automatically. +Traefik can watch the backends/frontends configuration changes and generate its configuration automatically. !!! note - Only backends/frontends rules are dynamic, the rest of the Træfik configuration stay static. + Only backends/frontends rules are dynamic, the rest of the Traefik configuration stay static. The [Etcd](https://github.com/coreos/etcd/issues/860) and [Consul](https://github.com/hashicorp/consul/issues/886) backends do not support updating multiple keys atomically. -As a result, it may be possible for Træfik to read an intermediate configuration state despite judicious use of the `--providersThrottleDuration` flag. -To solve this problem, Træfik supports a special key called `/traefik/alias`. -If set, Træfik use the value as an alternative key prefix. +As a result, it may be possible for Traefik to read an intermediate configuration state despite judicious use of the `--providersThrottleDuration` flag. +To solve this problem, Traefik supports a special key called `/traefik/alias`. +If set, Traefik use the value as an alternative key prefix. !!! note The field `useAPIV3` allows using Etcd V3 API which should support updating multiple keys atomically with Etcd. - Etcd API V2 is deprecated and, in the future, Træfik will support API V3 by default. + Etcd API V2 is deprecated and, in the future, Traefik will support API V3 by default. -Given the key structure below, Træfik will use the `http://172.17.0.2:80` as its only backend (frontend keys have been omitted for brevity). +Given the key structure below, Traefik will use the `http://172.17.0.2:80` as its only backend (frontend keys have been omitted for brevity). | Key | Value | |-------------------------------------------------------------------------|-----------------------------| @@ -411,21 +411,21 @@ Here, we have a 50% balance between the `http://172.17.0.3:80` and the `http://1 | `/traefik_configurations/2/backends/backend1/servers/server2/weight` | `5` | !!! note - Træfik *will not watch for key changes in the `/traefik_configurations` prefix*. It will only watch for changes in the `/traefik/alias`. + Traefik *will not watch for key changes in the `/traefik_configurations` prefix*. It will only watch for changes in the `/traefik/alias`. Further, if the `/traefik/alias` key is set, all other configuration with `/traefik/backends` or `/traefik/frontends` prefix are ignored. ## Store configuration in Key-value store !!! note - Don't forget to [setup the connection between Træfik and Key-value store](/user-guide/kv-config/#launch-trfik). + Don't forget to [setup the connection between Traefik and Key-value store](/user-guide/kv-config/#launch-traefik). -The static Træfik configuration in a key-value store can be automatically created and updated, using the [`storeconfig` subcommand](/basics/#commands). +The static Traefik configuration in a key-value store can be automatically created and updated, using the [`storeconfig` subcommand](/basics/#commands). ```bash traefik storeconfig [flags] ... ``` This command is here only to automate the [process which upload the configuration into the Key-value store](/user-guide/kv-config/#upload-the-configuration-in-the-key-value-store). -Træfik will not start but the [static configuration](/basics/#static-trfik-configuration) will be uploaded into the Key-value store. +Traefik will not start but the [static configuration](/basics/#static-traefik-configuration) will be uploaded into the Key-value store. If you configured ACME (Let's Encrypt), your registration account and your certificates will also be uploaded. diff --git a/docs/user-guide/swarm-mode.md b/docs/user-guide/swarm-mode.md index 0ffbbf799..9b23ac60e 100644 --- a/docs/user-guide/swarm-mode.md +++ b/docs/user-guide/swarm-mode.md @@ -1,6 +1,6 @@ # Docker Swarm (mode) cluster -This section explains how to create a multi-host docker cluster with swarm mode using [docker-machine](https://docs.docker.com/machine) and how to deploy Træfik on it. +This section explains how to create a multi-host docker cluster with swarm mode using [docker-machine](https://docs.docker.com/machine) and how to deploy Traefik on it. The cluster consists of: @@ -66,17 +66,17 @@ ID HOSTNAME STATUS AVAILABILITY MANAGER STATUS fnpj8ozfc85zvahx2r540xfcf * manager Ready Active Leader ``` -Finally, let's create a network for Træfik to use. +Finally, let's create a network for Traefik to use. ```shell docker-machine ssh manager "docker network create --driver=overlay traefik-net" ``` -## Deploy Træfik +## Deploy Traefik -Let's deploy Træfik as a docker service in our cluster. -The only requirement for Træfik to work with swarm mode is that it needs to run on a manager node - we are going to use a [constraint](https://docs.docker.com/engine/reference/commandline/service_create/#/specify-service-constraints-constraint) for that. +Let's deploy Traefik as a docker service in our cluster. +The only requirement for Traefik to work with swarm mode is that it needs to run on a manager node - we are going to use a [constraint](https://docs.docker.com/engine/reference/commandline/service_create/#/specify-service-constraints-constraint) for that. ```shell docker-machine ssh manager "docker service create \ @@ -98,10 +98,10 @@ Let's explain this command: | Option | Description | |-----------------------------------------------------------------------------|------------------------------------------------------------------------------------------------| | `--publish 80:80 --publish 8080:8080` | we publish port `80` and `8080` on the cluster. | -| `--constraint=node.role==manager` | we ask docker to schedule Træfik on a manager node. | -| `--mount type=bind,source=/var/run/docker.sock,target=/var/run/docker.sock` | we bind mount the docker socket where Træfik is scheduled to be able to speak to the daemon. | -| `--network traefik-net` | we attach the Træfik service (and thus the underlying container) to the `traefik-net` network. | -| `--docker` | enable docker provider, and `--docker.swarmMode` to enable the swarm mode on Træfik. | +| `--constraint=node.role==manager` | we ask docker to schedule Traefik on a manager node. | +| `--mount type=bind,source=/var/run/docker.sock,target=/var/run/docker.sock` | we bind mount the docker socket where Traefik is scheduled to be able to speak to the daemon. | +| `--network traefik-net` | we attach the Traefik service (and thus the underlying container) to the `traefik-net` network. | +| `--docker` | enable docker provider, and `--docker.swarmMode` to enable the swarm mode on Traefik. | | `--api` | activate the webUI on port 8080 | @@ -145,7 +145,7 @@ z9re2mnl34k4 whoami1 replicated 1/1 emilevauge/whoami:latest ``` -## Access to your apps through Træfik +## Access to your apps through Traefik ```shell curl -H Host:whoami0.traefik http://$(docker-machine ip manager) @@ -187,7 +187,7 @@ X-Forwarded-Server: 77fc29c69fe4 ``` !!! note - As Træfik is published, you can access it from any machine and not only the manager. + As Traefik is published, you can access it from any machine and not only the manager. ```shell curl -H Host:whoami0.traefik http://$(docker-machine ip worker1) @@ -247,7 +247,7 @@ ysil6oto1wim whoami0 replicated 5/5 emilevauge/whoami:latest z9re2mnl34k4 whoami1 replicated 5/5 emilevauge/whoami:latest ``` -## Access to your `whoami0` through Træfik multiple times. +## Access to your `whoami0` through Traefik multiple times. Repeat the following command multiple times and note that the Hostname changes each time as Traefik load balances each request against the 5 tasks: diff --git a/docs/user-guide/swarm.md b/docs/user-guide/swarm.md index fa018476e..e08c6ff08 100644 --- a/docs/user-guide/swarm.md +++ b/docs/user-guide/swarm.md @@ -1,6 +1,6 @@ # Swarm cluster -This section explains how to create a multi-host [swarm](https://docs.docker.com/swarm) cluster using [docker-machine](https://docs.docker.com/machine/) and how to deploy Træfik on it. +This section explains how to create a multi-host [swarm](https://docs.docker.com/swarm) cluster using [docker-machine](https://docs.docker.com/machine/) and how to deploy Traefik on it. The cluster consists of: @@ -71,9 +71,9 @@ eval $(docker-machine env --swarm mhs-demo0) docker network create --driver overlay --subnet=10.0.9.0/24 my-net ``` -## Deploy Træfik +## Deploy Traefik -Deploy Træfik: +Deploy Traefik: ```shell docker $(docker-machine config mhs-demo0) run \ @@ -132,7 +132,7 @@ ba2c21488299 emilevauge/whoami "/whoamI" 8 seconds ago 8fbc39271b4c traefik "/traefik -l DEBUG -c" 36 seconds ago Up 37 seconds 192.168.99.101:80->80/tcp, 192.168.99.101:8080->8080/tcp mhs-demo0/serene_bhabha ``` -## Access to your apps through Træfik +## Access to your apps through Traefik ```shell curl -H Host:whoami0.traefik http://$(docker-machine ip mhs-demo0) diff --git a/examples/quickstart/README.md b/examples/quickstart/README.md index fa3690cbd..26ca61cf4 100644 --- a/examples/quickstart/README.md +++ b/examples/quickstart/README.md @@ -1,12 +1,12 @@ -## The Træfik Quickstart (Using Docker) +## The Traefik Quickstart (Using Docker) In this quickstart, we'll use [Docker compose](https://docs.docker.com/compose) to create our demo infrastructure. -To save some time, you can clone [Træfik's repository](https://github.com/containous/traefik) and use the quickstart files located in the [examples/quickstart](https://github.com/containous/traefik/tree/master/examples/quickstart/) directory. +To save some time, you can clone [Traefik's repository](https://github.com/containous/traefik) and use the quickstart files located in the [examples/quickstart](https://github.com/containous/traefik/tree/master/examples/quickstart/) directory. -### 1 — Launch Træfik — Tell It to Listen to Docker +### 1 — Launch Traefik — Tell It to Listen to Docker -Create a `docker-compose.yml` file where you will define a `reverse-proxy` service that uses the official Træfik image: +Create a `docker-compose.yml` file where you will define a `reverse-proxy` service that uses the official Traefik image: ```yaml version: '3' @@ -14,7 +14,7 @@ version: '3' services: reverse-proxy: image: traefik # The official Traefik docker image - command: --api --docker # Enables the web UI and tells Træfik to listen to docker + command: --api --docker # Enables the web UI and tells Traefik to listen to docker ports: - "80:80" # The HTTP port - "8080:8080" # The Web UI (enabled by --api) @@ -22,7 +22,7 @@ services: - /var/run/docker.sock:/var/run/docker.sock #So that Traefik can listen to the Docker events ``` -**That's it. Now you can launch Træfik!** +**That's it. Now you can launch Traefik!** Start your `reverse-proxy` with the following command: @@ -30,11 +30,11 @@ Start your `reverse-proxy` with the following command: docker-compose up -d reverse-proxy ``` -You can open a browser and go to [http://localhost:8080](http://localhost:8080) to see Træfik's dashboard (we'll go back there once we have launched a service in step 2). +You can open a browser and go to [http://localhost:8080](http://localhost:8080) to see Traefik's dashboard (we'll go back there once we have launched a service in step 2). -### 2 — Launch a Service — Træfik Detects It and Creates a Route for You +### 2 — Launch a Service — Traefik Detects It and Creates a Route for You -Now that we have a Træfik instance up and running, we will deploy new services. +Now that we have a Traefik instance up and running, we will deploy new services. Edit your `docker-compose.yml` file and add the following at the end of your file. @@ -54,7 +54,7 @@ Start the `whoami` service with the following command: docker-compose up -d whoami ``` -Go back to your browser ([http://localhost:8080](http://localhost:8080)) and see that Træfik has automatically detected the new container and updated its own configuration. +Go back to your browser ([http://localhost:8080](http://localhost:8080)) and see that Traefik has automatically detected the new container and updated its own configuration. When Traefik detects new services, it creates the corresponding routes so you can call them ... _let's see!_ (Here, we're using curl) @@ -77,9 +77,9 @@ Run more instances of your `whoami` service with the following command: docker-compose up -d --scale whoami=2 ``` -Go back to your browser ([http://localhost:8080](http://localhost:8080)) and see that Træfik has automatically detected the new instance of the container. +Go back to your browser ([http://localhost:8080](http://localhost:8080)) and see that Traefik has automatically detected the new instance of the container. -Finally, see that Træfik load-balances between the two instances of your services by running twice the following command: +Finally, see that Traefik load-balances between the two instances of your services by running twice the following command: ```shell curl -H Host:whoami.docker.localhost http://127.0.0.1 @@ -99,9 +99,9 @@ IP: 172.27.0.4 # ... ``` -### 4 — Enjoy Træfik's Magic +### 4 — Enjoy Traefik's Magic -Now that you have a basic understanding of how Træfik can automatically create the routes to your services and load balance them, it might be time to dive into [the documentation](https://docs.traefik.io/) and let Træfik work for you! -Whatever your infrastructure is, there is probably [an available Træfik backend](https://docs.traefik.io/#supported-backends) that will do the job. +Now that you have a basic understanding of how Traefik can automatically create the routes to your services and load balance them, it might be time to dive into [the documentation](https://docs.traefik.io/) and let Traefik work for you! +Whatever your infrastructure is, there is probably [an available Traefik backend](https://docs.traefik.io/#supported-backends) that will do the job. -Our recommendation would be to see for yourself how simple it is to enable HTTPS with [Træfik's let's encrypt integration](https://docs.traefik.io/user-guide/examples/#lets-encrypt-support) using the dedicated [user guide](https://docs.traefik.io/user-guide/docker-and-lets-encrypt/). +Our recommendation would be to see for yourself how simple it is to enable HTTPS with [Traefik's let's encrypt integration](https://docs.traefik.io/user-guide/examples/#lets-encrypt-support) using the dedicated [user guide](https://docs.traefik.io/user-guide/docker-and-lets-encrypt/). diff --git a/examples/quickstart/docker-compose.yml b/examples/quickstart/docker-compose.yml index bd1c8a202..4d9cfc608 100644 --- a/examples/quickstart/docker-compose.yml +++ b/examples/quickstart/docker-compose.yml @@ -1,10 +1,10 @@ version: '3' services: - # The reverse proxy service (Træfik) + # The reverse proxy service (Traefik) reverse-proxy: image: traefik # The official Traefik docker image - command: --api --docker # Enables the web UI and tells Træfik to listen to docker + command: --api --docker # Enables the web UI and tells Traefik to listen to docker ports: - "80:80" # The HTTP port - "8080:8080" # The Web UI (enabled by --api) diff --git a/integration/consul_test.go b/integration/consul_test.go index 39a321db6..ef1f274d2 100644 --- a/integration/consul_test.go +++ b/integration/consul_test.go @@ -478,7 +478,7 @@ func datastoreContains(datastore *cluster.Datastore, expectedValue string) func( func (s *ConsulSuite) TestSNIDynamicTlsConfig(c *check.C) { s.setupConsul(c) consulHost := s.composeProject.Container(c, "consul").NetworkSettings.IPAddress - // start Træfik + // start Traefik file := s.adaptFile(c, "fixtures/consul/simple_https.toml", struct{ ConsulHost string }{consulHost}) defer os.Remove(file) cmd, display := s.traefikCmd(withConfigFile(file)) diff --git a/integration/etcd3_test.go b/integration/etcd3_test.go index ad877469b..6d5c859a6 100644 --- a/integration/etcd3_test.go +++ b/integration/etcd3_test.go @@ -428,7 +428,7 @@ func (s *Etcd3Suite) TestCommandStoreConfig(c *check.C) { } func (s *Etcd3Suite) TestSNIDynamicTlsConfig(c *check.C) { - // start Træfik + // start Traefik cmd, display := s.traefikCmd( withConfigFile("fixtures/etcd/simple_https.toml"), "--etcd", @@ -566,7 +566,7 @@ func (s *Etcd3Suite) TestSNIDynamicTlsConfig(c *check.C) { } func (s *Etcd3Suite) TestDeleteSNIDynamicTlsConfig(c *check.C) { - // start Træfik + // start Traefik cmd, display := s.traefikCmd( withConfigFile("fixtures/etcd/simple_https.toml"), "--etcd", diff --git a/integration/etcd_test.go b/integration/etcd_test.go index 1da3f7408..2c70beab8 100644 --- a/integration/etcd_test.go +++ b/integration/etcd_test.go @@ -155,7 +155,7 @@ func (s *EtcdSuite) TestNominalConfiguration(c *check.C) { }) c.Assert(err, checker.IsNil) - // wait for Træfik + // wait for Traefik err = try.GetRequest("http://127.0.0.1:8081/api/providers", 60*time.Second, try.BodyContains("Path:/test")) c.Assert(err, checker.IsNil) @@ -213,7 +213,7 @@ func (s *EtcdSuite) TestGlobalConfiguration(c *check.C) { }) c.Assert(err, checker.IsNil) - // start Træfik + // start Traefik cmd, display := s.traefikCmd( withConfigFile("fixtures/simple_web.toml"), "--etcd", @@ -293,7 +293,7 @@ func (s *EtcdSuite) TestGlobalConfiguration(c *check.C) { func (s *EtcdSuite) TestCertificatesContentWithSNIConfigHandshake(c *check.C) { etcdHost := s.composeProject.Container(c, "etcd").NetworkSettings.IPAddress - // start Træfik + // start Traefik cmd, display := s.traefikCmd( withConfigFile("fixtures/simple_web.toml"), "--etcd", @@ -411,7 +411,7 @@ func (s *EtcdSuite) TestCommandStoreConfig(c *check.C) { err := cmd.Start() c.Assert(err, checker.IsNil) - // wait for Træfik finish without error + // wait for Traefik finish without error cmd.Wait() // CHECK @@ -437,7 +437,7 @@ func (s *EtcdSuite) TestCommandStoreConfig(c *check.C) { func (s *EtcdSuite) TestSNIDynamicTlsConfig(c *check.C) { etcdHost := s.composeProject.Container(c, "etcd").NetworkSettings.IPAddress - // start Træfik + // start Traefik cmd, display := s.traefikCmd( withConfigFile("fixtures/etcd/simple_https.toml"), "--etcd", diff --git a/middlewares/accesslog/logger_formatters.go b/middlewares/accesslog/logger_formatters.go index 4cad206b6..4755079fe 100644 --- a/middlewares/accesslog/logger_formatters.go +++ b/middlewares/accesslog/logger_formatters.go @@ -14,10 +14,10 @@ const ( defaultValue = "-" ) -// CommonLogFormatter provides formatting in the Træfik common log format +// CommonLogFormatter provides formatting in the Traefik common log format type CommonLogFormatter struct{} -// Format formats the log entry in the Træfik common log format +// Format formats the log entry in the Traefik common log format func (f *CommonLogFormatter) Format(entry *logrus.Entry) ([]byte, error) { b := &bytes.Buffer{} diff --git a/mkdocs.yml b/mkdocs.yml index 6de641a2a..1ab580e2c 100644 --- a/mkdocs.yml +++ b/mkdocs.yml @@ -1,5 +1,5 @@ -site_name: Træfik -site_description: Træfik Documentation +site_name: Traefik +site_description: Traefik Documentation site_author: containo.us site_url: https://docs.traefik.io dev_addr: 0.0.0.0:8000 diff --git a/provider/docker/docker.go b/provider/docker/docker.go index 250430998..1da8e0e05 100644 --- a/provider/docker/docker.go +++ b/provider/docker/docker.go @@ -396,7 +396,7 @@ func parseService(service swarmtypes.Service, networkMap map[string]*dockertypes if service.Spec.EndpointSpec != nil { if service.Spec.EndpointSpec.Mode == swarmtypes.ResolutionModeDNSRR { if isBackendLBSwarm(dData) { - log.Warnf("Ignored %s endpoint-mode not supported, service name: %s. Fallback to Træfik load balancing", swarmtypes.ResolutionModeDNSRR, service.Spec.Annotations.Name) + log.Warnf("Ignored %s endpoint-mode not supported, service name: %s. Fallback to Traefik load balancing", swarmtypes.ResolutionModeDNSRR, service.Spec.Annotations.Name) } } else if service.Spec.EndpointSpec.Mode == swarmtypes.ResolutionModeVIP { dData.NetworkSettings.Networks = make(map[string]*networkData) diff --git a/provider/label/label.go b/provider/label/label.go index b8f03c855..b0069eed3 100644 --- a/provider/label/label.go +++ b/provider/label/label.go @@ -162,7 +162,7 @@ func HasPrefix(labels map[string]string, prefix string) bool { return false } -// IsEnabled Check if a container is enabled in Træfik +// IsEnabled Check if a container is enabled in Traefik func IsEnabled(labels map[string]string, exposedByDefault bool) bool { return GetBoolValue(labels, TraefikEnable, exposedByDefault) } diff --git a/server/server_middlewares.go b/server/server_middlewares.go index bd9b97fa3..ae62f993d 100644 --- a/server/server_middlewares.go +++ b/server/server_middlewares.go @@ -309,7 +309,7 @@ func buildIPWhiteLister(whiteList *types.WhiteList, wlRange []string) (*middlewa func (s *Server) wrapNegroniHandlerWithAccessLog(handler negroni.Handler, frontendName string) negroni.Handler { if s.accessLoggerMiddleware != nil { - saveBackend := accesslog.NewSaveNegroniBackend(handler, "Træfik") + saveBackend := accesslog.NewSaveNegroniBackend(handler, "Traefik") saveFrontend := accesslog.NewSaveNegroniFrontend(saveBackend, frontendName) return saveFrontend } @@ -318,7 +318,7 @@ func (s *Server) wrapNegroniHandlerWithAccessLog(handler negroni.Handler, fronte func (s *Server) wrapHTTPHandlerWithAccessLog(handler http.Handler, frontendName string) http.Handler { if s.accessLoggerMiddleware != nil { - saveBackend := accesslog.NewSaveBackend(handler, "Træfik") + saveBackend := accesslog.NewSaveBackend(handler, "Traefik") saveFrontend := accesslog.NewSaveFrontend(saveBackend, frontendName) return saveFrontend } diff --git a/webui/readme.md b/webui/readme.md index e1ab0214e..6374851f2 100644 --- a/webui/readme.md +++ b/webui/readme.md @@ -1,10 +1,10 @@ -# Træfik Web UI +# Traefik Web UI -Access to Træfik Web UI, ex: http://localhost:8080 +Access to Traefik Web UI, ex: http://localhost:8080 ## Interface -Træfik Web UI provide 2 types of informations: +Traefik Web UI provide 2 types of informations: - Providers with their backends and frontends information. - Health of the web server. From 46ce80762451e0a986d750574da789873939c6db Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?G=C3=A9rald=20Cro=C3=ABs?= Date: Wed, 17 Oct 2018 16:30:04 +0200 Subject: [PATCH 05/29] Adds the note: acme.domains is a startup configuration --- docs/configuration/acme.md | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/docs/configuration/acme.md b/docs/configuration/acme.md index a0ede414d..7b453b248 100644 --- a/docs/configuration/acme.md +++ b/docs/configuration/acme.md @@ -144,6 +144,7 @@ entryPoint = "https" # Domains list. # Only domains defined here can generate wildcard certificates. +# The certificates for these domains are negotiated at traefik startup only. # # [[acme.domains]] # main = "local1.com" @@ -302,6 +303,9 @@ You can provide SANs (alternative domains) to each main domain. All domains must have A/AAAA records pointing to Traefik. Each domain & SAN will lead to a certificate request. +!!! note + The certificates for the domains listed in `acme.domains` are negotiated at traefik startup only. + ```toml [acme] # ... From 37d8e32e0bd2bfce3738b46607307d7c4b8862e1 Mon Sep 17 00:00:00 2001 From: Nick Maliwacki Date: Thu, 18 Oct 2018 07:42:03 -0700 Subject: [PATCH 06/29] clarify DuckDNS does not support multiple TXT records --- docs/configuration/acme.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docs/configuration/acme.md b/docs/configuration/acme.md index 7b453b248..cfa527b8a 100644 --- a/docs/configuration/acme.md +++ b/docs/configuration/acme.md @@ -265,7 +265,7 @@ Here is a list of supported `provider`s, that can automate the DNS verification, | [DNS Made Easy](https://dnsmadeeasy.com) | `dnsmadeeasy` | `DNSMADEEASY_API_KEY`, `DNSMADEEASY_API_SECRET`, `DNSMADEEASY_SANDBOX` | Not tested yet | | [DNSPod](http://www.dnspod.net/) | `dnspod` | `DNSPOD_API_KEY` | Not tested yet | | [DreamHost](https://www.dreamhost.com/) | `dreamhost` | `DREAMHOST_API_KEY` | YES | -| [Duck DNS](https://www.duckdns.org/) | `duckdns` | `DUCKDNS_TOKEN` | Not tested yet | +| [Duck DNS](https://www.duckdns.org/) | `duckdns` | `DUCKDNS_TOKEN` | No | | [Dyn](https://dyn.com) | `dyn` | `DYN_CUSTOMER_NAME`, `DYN_USER_NAME`, `DYN_PASSWORD` | Not tested yet | | External Program | `exec` | `EXEC_PATH` | Not tested yet | | [Exoscale](https://www.exoscale.ch) | `exoscale` | `EXOSCALE_API_KEY`, `EXOSCALE_API_SECRET`, `EXOSCALE_ENDPOINT` | YES | From 3f044c48fad3fbf7f2e70a28afc7fc2ddb3af85d Mon Sep 17 00:00:00 2001 From: Ludovic Fernandez Date: Tue, 23 Oct 2018 10:10:04 +0200 Subject: [PATCH 07/29] Nil request body with retry --- healthcheck/healthcheck.go | 2 +- middlewares/auth/forward.go | 2 +- middlewares/errorpages/error_pages.go | 2 +- middlewares/retry.go | 3 +++ 4 files changed, 6 insertions(+), 3 deletions(-) diff --git a/healthcheck/healthcheck.go b/healthcheck/healthcheck.go index 8a6e9bba0..222c6328a 100644 --- a/healthcheck/healthcheck.go +++ b/healthcheck/healthcheck.go @@ -71,7 +71,7 @@ func (b *BackendConfig) newRequest(serverURL *url.URL) (*http.Request, error) { u.Path += b.Path - return http.NewRequest(http.MethodGet, u.String(), nil) + return http.NewRequest(http.MethodGet, u.String(), http.NoBody) } // this function adds additional http headers and hostname to http.request diff --git a/middlewares/auth/forward.go b/middlewares/auth/forward.go index 5292c282c..7afc767c6 100644 --- a/middlewares/auth/forward.go +++ b/middlewares/auth/forward.go @@ -40,7 +40,7 @@ func Forward(config *types.Forward, w http.ResponseWriter, r *http.Request, next } } - forwardReq, err := http.NewRequest(http.MethodGet, config.Address, nil) + forwardReq, err := http.NewRequest(http.MethodGet, config.Address, http.NoBody) tracing.LogRequest(tracing.GetSpan(r), forwardReq) if err != nil { tracing.SetErrorAndDebugLog(r, "Error calling %s. Cause %s", config.Address, err) diff --git a/middlewares/errorpages/error_pages.go b/middlewares/errorpages/error_pages.go index 9fbe84706..10f241bb4 100644 --- a/middlewares/errorpages/error_pages.go +++ b/middlewares/errorpages/error_pages.go @@ -120,7 +120,7 @@ func newRequest(baseURL string) (*http.Request, error) { return nil, fmt.Errorf("error pages: error when parse URL: %v", err) } - req, err := http.NewRequest(http.MethodGet, u.String(), nil) + req, err := http.NewRequest(http.MethodGet, u.String(), http.NoBody) if err != nil { return nil, fmt.Errorf("error pages: error when create query: %v", err) } diff --git a/middlewares/retry.go b/middlewares/retry.go index ed9f339d9..79a05d900 100644 --- a/middlewares/retry.go +++ b/middlewares/retry.go @@ -35,6 +35,9 @@ func (retry *Retry) ServeHTTP(rw http.ResponseWriter, r *http.Request) { // cf https://github.com/containous/traefik/issues/1008 if retry.attempts > 1 { body := r.Body + if body == nil { + body = http.NoBody + } defer body.Close() r.Body = ioutil.NopCloser(body) } From 8e9b8a09536dbd02b4e565dd2221f861f9e582e6 Mon Sep 17 00:00:00 2001 From: Ludovic Fernandez Date: Tue, 23 Oct 2018 11:18:02 +0200 Subject: [PATCH 08/29] fix: netcup and DuckDNS. --- Gopkg.lock | 30 +- docs/configuration/acme.md | 89 ++-- .../go-autorest/autorest/azure/auth/auth.go | 408 ++++++++++++++++++ vendor/github.com/dimchansky/utfbom/LICENSE | 201 +++++++++ vendor/github.com/dimchansky/utfbom/utfbom.go | 174 ++++++++ .../edeckers/auroradnsclient/client.go | 22 - .../edeckers/auroradnsclient/errors.go | 11 - .../edeckers/auroradnsclient/records.go | 75 ---- .../auroradnsclient/records/datatypes.go | 31 -- .../auroradnsclient/requests/errors/errors.go | 19 - .../auroradnsclient/requests/requestor.go | 124 ------ .../auroradnsclient/tokens/generator.go | 35 -- .../edeckers/auroradnsclient/zones.go | 29 -- .../auroradnsclient/zones/datatypes.go | 7 - .../go-auroradns}/LICENSE | 0 vendor/github.com/ldez/go-auroradns/auth.go | 98 +++++ vendor/github.com/ldez/go-auroradns/client.go | 144 +++++++ .../github.com/ldez/go-auroradns/records.go | 91 ++++ vendor/github.com/ldez/go-auroradns/zones.go | 69 +++ .../xenolf/lego/acme/dns_challenge.go | 9 +- .../lego/providers/dns/auroradns/auroradns.go | 45 +- .../xenolf/lego/providers/dns/azure/azure.go | 165 ++++--- .../lego/providers/dns/dnsmadeeasy/client.go | 15 +- .../providers/dns/dnsmadeeasy/dnsmadeeasy.go | 21 +- .../lego/providers/dns/dreamhost/dreamhost.go | 2 +- .../lego/providers/dns/duckdns/duckdns.go | 37 +- .../lego/providers/dns/netcup/client.go | 229 +++++----- .../lego/providers/dns/netcup/netcup.go | 93 ++-- .../golang.org/x/crypto/pkcs12/bmp-string.go | 50 +++ vendor/golang.org/x/crypto/pkcs12/crypto.go | 131 ++++++ vendor/golang.org/x/crypto/pkcs12/errors.go | 23 + .../x/crypto/pkcs12/internal/rc2/rc2.go | 271 ++++++++++++ vendor/golang.org/x/crypto/pkcs12/mac.go | 45 ++ vendor/golang.org/x/crypto/pkcs12/pbkdf.go | 170 ++++++++ vendor/golang.org/x/crypto/pkcs12/pkcs12.go | 346 +++++++++++++++ vendor/golang.org/x/crypto/pkcs12/safebags.go | 57 +++ 36 files changed, 2716 insertions(+), 650 deletions(-) create mode 100644 vendor/github.com/Azure/go-autorest/autorest/azure/auth/auth.go create mode 100644 vendor/github.com/dimchansky/utfbom/LICENSE create mode 100644 vendor/github.com/dimchansky/utfbom/utfbom.go delete mode 100644 vendor/github.com/edeckers/auroradnsclient/client.go delete mode 100644 vendor/github.com/edeckers/auroradnsclient/errors.go delete mode 100644 vendor/github.com/edeckers/auroradnsclient/records.go delete mode 100644 vendor/github.com/edeckers/auroradnsclient/records/datatypes.go delete mode 100644 vendor/github.com/edeckers/auroradnsclient/requests/errors/errors.go delete mode 100644 vendor/github.com/edeckers/auroradnsclient/requests/requestor.go delete mode 100644 vendor/github.com/edeckers/auroradnsclient/tokens/generator.go delete mode 100644 vendor/github.com/edeckers/auroradnsclient/zones.go delete mode 100644 vendor/github.com/edeckers/auroradnsclient/zones/datatypes.go rename vendor/github.com/{edeckers/auroradnsclient => ldez/go-auroradns}/LICENSE (100%) create mode 100644 vendor/github.com/ldez/go-auroradns/auth.go create mode 100644 vendor/github.com/ldez/go-auroradns/client.go create mode 100644 vendor/github.com/ldez/go-auroradns/records.go create mode 100644 vendor/github.com/ldez/go-auroradns/zones.go create mode 100644 vendor/golang.org/x/crypto/pkcs12/bmp-string.go create mode 100644 vendor/golang.org/x/crypto/pkcs12/crypto.go create mode 100644 vendor/golang.org/x/crypto/pkcs12/errors.go create mode 100644 vendor/golang.org/x/crypto/pkcs12/internal/rc2/rc2.go create mode 100644 vendor/golang.org/x/crypto/pkcs12/mac.go create mode 100644 vendor/golang.org/x/crypto/pkcs12/pbkdf.go create mode 100644 vendor/golang.org/x/crypto/pkcs12/pkcs12.go create mode 100644 vendor/golang.org/x/crypto/pkcs12/safebags.go diff --git a/Gopkg.lock b/Gopkg.lock index c8b9f4239..e8b4586ec 100644 --- a/Gopkg.lock +++ b/Gopkg.lock @@ -49,6 +49,7 @@ "autorest", "autorest/adal", "autorest/azure", + "autorest/azure/auth", "autorest/date", "autorest/to" ] @@ -363,6 +364,12 @@ revision = "06ea1031745cb8b3dab3f6a236daf2b0aa468b7e" version = "v3.2.0" +[[projects]] + name = "github.com/dimchansky/utfbom" + packages = ["."] + revision = "5448fe645cb1964ba70ac8f9f2ffe975e61a536c" + version = "v1.0.0" + [[projects]] branch = "master" name = "github.com/dnsimple/dnsimple-go" @@ -533,19 +540,6 @@ revision = "44cc805cf13205b55f69e14bcb69867d1ae92f98" version = "v1.1.0" -[[projects]] - name = "github.com/edeckers/auroradnsclient" - packages = [ - ".", - "records", - "requests", - "requests/errors", - "tokens", - "zones" - ] - revision = "1563e622aaca0a8bb895a448f31d4a430ab97586" - version = "v1.0.3" - [[projects]] branch = "master" name = "github.com/elazarl/go-bindata-assetfs" @@ -863,6 +857,12 @@ packages = ["."] revision = "b84e30acd515aadc4b783ad4ff83aff3299bdfe0" +[[projects]] + name = "github.com/ldez/go-auroradns" + packages = ["."] + revision = "b40dfcae7c417f8129579362695dc1f3cfe5928d" + version = "v2.0.0" + [[projects]] branch = "master" name = "github.com/libkermit/compose" @@ -1398,7 +1398,7 @@ "providers/dns/vegadns", "providers/dns/vultr" ] - revision = "160d6fe60303699067faad57dc0b1e147ac499ef" + revision = "1151b4e3befc51b7b215179c87791753721dc6d5" [[projects]] branch = "master" @@ -1410,6 +1410,8 @@ "ed25519/internal/edwards25519", "ocsp", "pbkdf2", + "pkcs12", + "pkcs12/internal/rc2", "scrypt", "ssh/terminal" ] diff --git a/docs/configuration/acme.md b/docs/configuration/acme.md index cfa527b8a..6cb617744 100644 --- a/docs/configuration/acme.md +++ b/docs/configuration/acme.md @@ -252,50 +252,51 @@ Useful if internal networks block external DNS queries. Here is a list of supported `provider`s, that can automate the DNS verification, along with the required environment variables and their [wildcard & root domain support](/configuration/acme/#wildcard-domains) for each. Do not hesitate to complete it. -| Provider Name | Provider Code | Environment Variables | Wildcard & Root Domain Support | -|--------------------------------------------------------|----------------|---------------------------------------------------------------------------------------------------------------------------------|--------------------------------| -| [Alibaba Cloud](https://www.vultr.com) | `alidns` | `ALICLOUD_ACCESS_KEY`, `ALICLOUD_SECRET_KEY`, `ALICLOUD_REGION_ID` | Not tested yet | -| [Auroradns](https://www.pcextreme.com/aurora/dns) | `auroradns` | `AURORA_USER_ID`, `AURORA_KEY`, `AURORA_ENDPOINT` | Not tested yet | -| [Azure](https://azure.microsoft.com/services/dns/) | `azure` | `AZURE_CLIENT_ID`, `AZURE_CLIENT_SECRET`, `AZURE_SUBSCRIPTION_ID`, `AZURE_TENANT_ID`, `AZURE_RESOURCE_GROUP` | Not tested yet | -| [Blue Cat](https://www.bluecatnetworks.com/) | `bluecat` | `BLUECAT_SERVER_URL`, `BLUECAT_USER_NAME`, `BLUECAT_PASSWORD`, `BLUECAT_CONFIG_NAME`, `BLUECAT_DNS_VIEW` | Not tested yet | -| [Cloudflare](https://www.cloudflare.com) | `cloudflare` | `CF_API_EMAIL`, `CF_API_KEY` - The `Global API Key` needs to be used, not the `Origin CA Key` | YES | -| [CloudXNS](https://www.cloudxns.net) | `cloudxns` | `CLOUDXNS_API_KEY`, `CLOUDXNS_SECRET_KEY` | Not tested yet | -| [DigitalOcean](https://www.digitalocean.com) | `digitalocean` | `DO_AUTH_TOKEN` | YES | -| [DNSimple](https://dnsimple.com) | `dnsimple` | `DNSIMPLE_OAUTH_TOKEN`, `DNSIMPLE_BASE_URL` | Not tested yet | -| [DNS Made Easy](https://dnsmadeeasy.com) | `dnsmadeeasy` | `DNSMADEEASY_API_KEY`, `DNSMADEEASY_API_SECRET`, `DNSMADEEASY_SANDBOX` | Not tested yet | -| [DNSPod](http://www.dnspod.net/) | `dnspod` | `DNSPOD_API_KEY` | Not tested yet | -| [DreamHost](https://www.dreamhost.com/) | `dreamhost` | `DREAMHOST_API_KEY` | YES | -| [Duck DNS](https://www.duckdns.org/) | `duckdns` | `DUCKDNS_TOKEN` | No | -| [Dyn](https://dyn.com) | `dyn` | `DYN_CUSTOMER_NAME`, `DYN_USER_NAME`, `DYN_PASSWORD` | Not tested yet | -| External Program | `exec` | `EXEC_PATH` | Not tested yet | -| [Exoscale](https://www.exoscale.ch) | `exoscale` | `EXOSCALE_API_KEY`, `EXOSCALE_API_SECRET`, `EXOSCALE_ENDPOINT` | YES | -| [Fast DNS](https://www.akamai.com/) | `fastdns` | `AKAMAI_CLIENT_TOKEN`, `AKAMAI_CLIENT_SECRET`, `AKAMAI_ACCESS_TOKEN` | Not tested yet | -| [Gandi](https://www.gandi.net) | `gandi` | `GANDI_API_KEY` | Not tested yet | -| [Gandi v5](http://doc.livedns.gandi.net) | `gandiv5` | `GANDIV5_API_KEY` | YES | -| [Glesys](https://glesys.com/) | `glesys` | `GLESYS_API_USER`, `GLESYS_API_KEY`, `GLESYS_DOMAIN` | Not tested yet | -| [GoDaddy](https://godaddy.com/domains) | `godaddy` | `GODADDY_API_KEY`, `GODADDY_API_SECRET` | Not tested yet | -| [Google Cloud DNS](https://cloud.google.com/dns/docs/) | `gcloud` | `GCE_PROJECT`, `GCE_SERVICE_ACCOUNT_FILE` | YES | -| [hosting.de](https://www.hosting.de) | `hostingde` | `HOSTINGDE_API_KEY`, `HOSTINGDE_ZONE_NAME` | Not tested yet | -| [IIJ](https://www.iij.ad.jp/) | `iij` | `IIJ_API_ACCESS_KEY`, `IIJ_API_SECRET_KEY`, `IIJ_DO_SERVICE_CODE` | Not tested yet | -| [Lightsail](https://aws.amazon.com/lightsail/) | `lightsail` | `AWS_ACCESS_KEY_ID`, `AWS_SECRET_ACCESS_KEY`, `DNS_ZONE` | Not tested yet | -| [Linode](https://www.linode.com) | `linode` | `LINODE_API_KEY` | Not tested yet | -| [Linode v4](https://www.linode.com) | `linodev4` | `LINODE_TOKEN` | Not tested yet | -| manual | - | none, but you need to run Traefik interactively, turn on `acmeLogging` to see instructions and press Enter. | YES | -| [Namecheap](https://www.namecheap.com) | `namecheap` | `NAMECHEAP_API_USER`, `NAMECHEAP_API_KEY` | YES | -| [name.com](https://www.name.com/) | `namedotcom` | `NAMECOM_USERNAME`, `NAMECOM_API_TOKEN`, `NAMECOM_SERVER` | Not tested yet | -| [Netcup](https://www.netcup.eu/) | `netcup` | `NETCUP_CUSTOMER_NUMBER`, `NETCUP_API_KEY`, `NETCUP_API_PASSWORD` | Not tested yet | -| [NIFCloud](https://cloud.nifty.com/service/dns.htm) | `nifcloud` | `NIFCLOUD_ACCESS_KEY_ID`, `NIFCLOUD_SECRET_ACCESS_KEY` | Not tested yet | -| [Ns1](https://ns1.com/) | `ns1` | `NS1_API_KEY` | Not tested yet | -| [Open Telekom Cloud](https://cloud.telekom.de) | `otc` | `OTC_DOMAIN_NAME`, `OTC_USER_NAME`, `OTC_PASSWORD`, `OTC_PROJECT_NAME`, `OTC_IDENTITY_ENDPOINT` | Not tested yet | -| [OVH](https://www.ovh.com) | `ovh` | `OVH_ENDPOINT`, `OVH_APPLICATION_KEY`, `OVH_APPLICATION_SECRET`, `OVH_CONSUMER_KEY` | YES | -| [PowerDNS](https://www.powerdns.com) | `pdns` | `PDNS_API_KEY`, `PDNS_API_URL` | Not tested yet | -| [Rackspace](https://www.rackspace.com/cloud/dns) | `rackspace` | `RACKSPACE_USER`, `RACKSPACE_API_KEY` | Not tested yet | -| [RFC2136](https://tools.ietf.org/html/rfc2136) | `rfc2136` | `RFC2136_TSIG_KEY`, `RFC2136_TSIG_SECRET`, `RFC2136_TSIG_ALGORITHM`, `RFC2136_NAMESERVER` | Not tested yet | -| [Route 53](https://aws.amazon.com/route53/) | `route53` | `AWS_ACCESS_KEY_ID`, `AWS_SECRET_ACCESS_KEY`, `[AWS_REGION]`, `[AWS_HOSTED_ZONE_ID]` or a configured user/instance IAM profile. | YES | -| [Sakura Cloud](https://cloud.sakura.ad.jp/) | `sakuracloud` | `SAKURACLOUD_ACCESS_TOKEN`, `SAKURACLOUD_ACCESS_TOKEN_SECRET` | Not tested yet | -| [Stackpath](https://www.stackpath.com/) | `stackpath` | `STACKPATH_CLIENT_ID`, `STACKPATH_CLIENT_SECRET`, `STACKPATH_STACK_ID` | Not tested yet | -| [VegaDNS](https://github.com/shupp/VegaDNS-API) | `vegadns` | `SECRET_VEGADNS_KEY`, `SECRET_VEGADNS_SECRET`, `VEGADNS_URL` | Not tested yet | -| [VULTR](https://www.vultr.com) | `vultr` | `VULTR_API_KEY` | Not tested yet | +| Provider Name | Provider Code | Environment Variables | Wildcard & Root Domain Support | +|--------------------------------------------------------|----------------|-------------------------------------------------------------------------------------------------------------------------------------------|--------------------------------| +| [ACME DNS](https://github.com/joohoi/acme-dns) | `acmedns` | `ACME_DNS_API_BASE`, `ACME_DNS_STORAGE_PATH` | Not tested yet | +| [Alibaba Cloud](https://www.vultr.com) | `alidns` | `ALICLOUD_ACCESS_KEY`, `ALICLOUD_SECRET_KEY`, `ALICLOUD_REGION_ID` | Not tested yet | +| [Auroradns](https://www.pcextreme.com/aurora/dns) | `auroradns` | `AURORA_USER_ID`, `AURORA_KEY`, `AURORA_ENDPOINT` | Not tested yet | +| [Azure](https://azure.microsoft.com/services/dns/) | `azure` | `AZURE_CLIENT_ID`, `AZURE_CLIENT_SECRET`, `AZURE_SUBSCRIPTION_ID`, `AZURE_TENANT_ID`, `AZURE_RESOURCE_GROUP`, `[AZURE_METADATA_ENDPOINT]` | Not tested yet | +| [Blue Cat](https://www.bluecatnetworks.com/) | `bluecat` | `BLUECAT_SERVER_URL`, `BLUECAT_USER_NAME`, `BLUECAT_PASSWORD`, `BLUECAT_CONFIG_NAME`, `BLUECAT_DNS_VIEW` | Not tested yet | +| [Cloudflare](https://www.cloudflare.com) | `cloudflare` | `CF_API_EMAIL`, `CF_API_KEY` - The `Global API Key` needs to be used, not the `Origin CA Key` | YES | +| [CloudXNS](https://www.cloudxns.net) | `cloudxns` | `CLOUDXNS_API_KEY`, `CLOUDXNS_SECRET_KEY` | Not tested yet | +| [DigitalOcean](https://www.digitalocean.com) | `digitalocean` | `DO_AUTH_TOKEN` | YES | +| [DNSimple](https://dnsimple.com) | `dnsimple` | `DNSIMPLE_OAUTH_TOKEN`, `DNSIMPLE_BASE_URL` | Not tested yet | +| [DNS Made Easy](https://dnsmadeeasy.com) | `dnsmadeeasy` | `DNSMADEEASY_API_KEY`, `DNSMADEEASY_API_SECRET`, `DNSMADEEASY_SANDBOX` | Not tested yet | +| [DNSPod](http://www.dnspod.net/) | `dnspod` | `DNSPOD_API_KEY` | Not tested yet | +| [DreamHost](https://www.dreamhost.com/) | `dreamhost` | `DREAMHOST_API_KEY` | YES | +| [Duck DNS](https://www.duckdns.org/) | `duckdns` | `DUCKDNS_TOKEN` | No | +| [Dyn](https://dyn.com) | `dyn` | `DYN_CUSTOMER_NAME`, `DYN_USER_NAME`, `DYN_PASSWORD` | Not tested yet | +| External Program | `exec` | `EXEC_PATH` | Not tested yet | +| [Exoscale](https://www.exoscale.ch) | `exoscale` | `EXOSCALE_API_KEY`, `EXOSCALE_API_SECRET`, `EXOSCALE_ENDPOINT` | YES | +| [Fast DNS](https://www.akamai.com/) | `fastdns` | `AKAMAI_CLIENT_TOKEN`, `AKAMAI_CLIENT_SECRET`, `AKAMAI_ACCESS_TOKEN` | Not tested yet | +| [Gandi](https://www.gandi.net) | `gandi` | `GANDI_API_KEY` | Not tested yet | +| [Gandi v5](http://doc.livedns.gandi.net) | `gandiv5` | `GANDIV5_API_KEY` | YES | +| [Glesys](https://glesys.com/) | `glesys` | `GLESYS_API_USER`, `GLESYS_API_KEY`, `GLESYS_DOMAIN` | Not tested yet | +| [GoDaddy](https://godaddy.com/domains) | `godaddy` | `GODADDY_API_KEY`, `GODADDY_API_SECRET` | Not tested yet | +| [Google Cloud DNS](https://cloud.google.com/dns/docs/) | `gcloud` | `GCE_PROJECT`, `GCE_SERVICE_ACCOUNT_FILE` | YES | +| [hosting.de](https://www.hosting.de) | `hostingde` | `HOSTINGDE_API_KEY`, `HOSTINGDE_ZONE_NAME` | Not tested yet | +| [IIJ](https://www.iij.ad.jp/) | `iij` | `IIJ_API_ACCESS_KEY`, `IIJ_API_SECRET_KEY`, `IIJ_DO_SERVICE_CODE` | Not tested yet | +| [Lightsail](https://aws.amazon.com/lightsail/) | `lightsail` | `AWS_ACCESS_KEY_ID`, `AWS_SECRET_ACCESS_KEY`, `DNS_ZONE` | Not tested yet | +| [Linode](https://www.linode.com) | `linode` | `LINODE_API_KEY` | Not tested yet | +| [Linode v4](https://www.linode.com) | `linodev4` | `LINODE_TOKEN` | Not tested yet | +| manual | - | none, but you need to run Traefik interactively, turn on `acmeLogging` to see instructions and press Enter. | YES | +| [Namecheap](https://www.namecheap.com) | `namecheap` | `NAMECHEAP_API_USER`, `NAMECHEAP_API_KEY` | YES | +| [name.com](https://www.name.com/) | `namedotcom` | `NAMECOM_USERNAME`, `NAMECOM_API_TOKEN`, `NAMECOM_SERVER` | Not tested yet | +| [Netcup](https://www.netcup.eu/) | `netcup` | `NETCUP_CUSTOMER_NUMBER`, `NETCUP_API_KEY`, `NETCUP_API_PASSWORD` | Not tested yet | +| [NIFCloud](https://cloud.nifty.com/service/dns.htm) | `nifcloud` | `NIFCLOUD_ACCESS_KEY_ID`, `NIFCLOUD_SECRET_ACCESS_KEY` | Not tested yet | +| [Ns1](https://ns1.com/) | `ns1` | `NS1_API_KEY` | Not tested yet | +| [Open Telekom Cloud](https://cloud.telekom.de) | `otc` | `OTC_DOMAIN_NAME`, `OTC_USER_NAME`, `OTC_PASSWORD`, `OTC_PROJECT_NAME`, `OTC_IDENTITY_ENDPOINT` | Not tested yet | +| [OVH](https://www.ovh.com) | `ovh` | `OVH_ENDPOINT`, `OVH_APPLICATION_KEY`, `OVH_APPLICATION_SECRET`, `OVH_CONSUMER_KEY` | YES | +| [PowerDNS](https://www.powerdns.com) | `pdns` | `PDNS_API_KEY`, `PDNS_API_URL` | Not tested yet | +| [Rackspace](https://www.rackspace.com/cloud/dns) | `rackspace` | `RACKSPACE_USER`, `RACKSPACE_API_KEY` | Not tested yet | +| [RFC2136](https://tools.ietf.org/html/rfc2136) | `rfc2136` | `RFC2136_TSIG_KEY`, `RFC2136_TSIG_SECRET`, `RFC2136_TSIG_ALGORITHM`, `RFC2136_NAMESERVER` | Not tested yet | +| [Route 53](https://aws.amazon.com/route53/) | `route53` | `AWS_ACCESS_KEY_ID`, `AWS_SECRET_ACCESS_KEY`, `[AWS_REGION]`, `[AWS_HOSTED_ZONE_ID]` or a configured user/instance IAM profile. | YES | +| [Sakura Cloud](https://cloud.sakura.ad.jp/) | `sakuracloud` | `SAKURACLOUD_ACCESS_TOKEN`, `SAKURACLOUD_ACCESS_TOKEN_SECRET` | Not tested yet | +| [Stackpath](https://www.stackpath.com/) | `stackpath` | `STACKPATH_CLIENT_ID`, `STACKPATH_CLIENT_SECRET`, `STACKPATH_STACK_ID` | Not tested yet | +| [VegaDNS](https://github.com/shupp/VegaDNS-API) | `vegadns` | `SECRET_VEGADNS_KEY`, `SECRET_VEGADNS_SECRET`, `VEGADNS_URL` | Not tested yet | +| [VULTR](https://www.vultr.com) | `vultr` | `VULTR_API_KEY` | Not tested yet | ### `domains` diff --git a/vendor/github.com/Azure/go-autorest/autorest/azure/auth/auth.go b/vendor/github.com/Azure/go-autorest/autorest/azure/auth/auth.go new file mode 100644 index 000000000..dd89d9c9d --- /dev/null +++ b/vendor/github.com/Azure/go-autorest/autorest/azure/auth/auth.go @@ -0,0 +1,408 @@ +package auth + +// Copyright 2017 Microsoft Corporation +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +import ( + "bytes" + "crypto/rsa" + "crypto/x509" + "encoding/binary" + "encoding/json" + "errors" + "fmt" + "io/ioutil" + "log" + "os" + "strings" + "unicode/utf16" + + "github.com/Azure/go-autorest/autorest" + "github.com/Azure/go-autorest/autorest/adal" + "github.com/Azure/go-autorest/autorest/azure" + "github.com/dimchansky/utfbom" + "golang.org/x/crypto/pkcs12" +) + +// NewAuthorizerFromEnvironment creates an Authorizer configured from environment variables in the order: +// 1. Client credentials +// 2. Client certificate +// 3. Username password +// 4. MSI +func NewAuthorizerFromEnvironment() (autorest.Authorizer, error) { + tenantID := os.Getenv("AZURE_TENANT_ID") + clientID := os.Getenv("AZURE_CLIENT_ID") + clientSecret := os.Getenv("AZURE_CLIENT_SECRET") + certificatePath := os.Getenv("AZURE_CERTIFICATE_PATH") + certificatePassword := os.Getenv("AZURE_CERTIFICATE_PASSWORD") + username := os.Getenv("AZURE_USERNAME") + password := os.Getenv("AZURE_PASSWORD") + envName := os.Getenv("AZURE_ENVIRONMENT") + resource := os.Getenv("AZURE_AD_RESOURCE") + + var env azure.Environment + if envName == "" { + env = azure.PublicCloud + } else { + var err error + env, err = azure.EnvironmentFromName(envName) + if err != nil { + return nil, err + } + } + + if resource == "" { + resource = env.ResourceManagerEndpoint + } + + //1.Client Credentials + if clientSecret != "" { + config := NewClientCredentialsConfig(clientID, clientSecret, tenantID) + config.AADEndpoint = env.ActiveDirectoryEndpoint + config.Resource = resource + return config.Authorizer() + } + + //2. Client Certificate + if certificatePath != "" { + config := NewClientCertificateConfig(certificatePath, certificatePassword, clientID, tenantID) + config.AADEndpoint = env.ActiveDirectoryEndpoint + config.Resource = resource + return config.Authorizer() + } + + //3. Username Password + if username != "" && password != "" { + config := NewUsernamePasswordConfig(username, password, clientID, tenantID) + config.AADEndpoint = env.ActiveDirectoryEndpoint + config.Resource = resource + return config.Authorizer() + } + + // 4. MSI + config := NewMSIConfig() + config.Resource = resource + config.ClientID = clientID + return config.Authorizer() +} + +// NewAuthorizerFromFile creates an Authorizer configured from a configuration file. +func NewAuthorizerFromFile(baseURI string) (autorest.Authorizer, error) { + fileLocation := os.Getenv("AZURE_AUTH_LOCATION") + if fileLocation == "" { + return nil, errors.New("auth file not found. Environment variable AZURE_AUTH_LOCATION is not set") + } + + contents, err := ioutil.ReadFile(fileLocation) + if err != nil { + return nil, err + } + + // Auth file might be encoded + decoded, err := decode(contents) + if err != nil { + return nil, err + } + + file := file{} + err = json.Unmarshal(decoded, &file) + if err != nil { + return nil, err + } + + resource, err := getResourceForToken(file, baseURI) + if err != nil { + return nil, err + } + + config, err := adal.NewOAuthConfig(file.ActiveDirectoryEndpoint, file.TenantID) + if err != nil { + return nil, err + } + + spToken, err := adal.NewServicePrincipalToken(*config, file.ClientID, file.ClientSecret, resource) + if err != nil { + return nil, err + } + + return autorest.NewBearerAuthorizer(spToken), nil +} + +// File represents the authentication file +type file struct { + ClientID string `json:"clientId,omitempty"` + ClientSecret string `json:"clientSecret,omitempty"` + SubscriptionID string `json:"subscriptionId,omitempty"` + TenantID string `json:"tenantId,omitempty"` + ActiveDirectoryEndpoint string `json:"activeDirectoryEndpointUrl,omitempty"` + ResourceManagerEndpoint string `json:"resourceManagerEndpointUrl,omitempty"` + GraphResourceID string `json:"activeDirectoryGraphResourceId,omitempty"` + SQLManagementEndpoint string `json:"sqlManagementEndpointUrl,omitempty"` + GalleryEndpoint string `json:"galleryEndpointUrl,omitempty"` + ManagementEndpoint string `json:"managementEndpointUrl,omitempty"` +} + +func decode(b []byte) ([]byte, error) { + reader, enc := utfbom.Skip(bytes.NewReader(b)) + + switch enc { + case utfbom.UTF16LittleEndian: + u16 := make([]uint16, (len(b)/2)-1) + err := binary.Read(reader, binary.LittleEndian, &u16) + if err != nil { + return nil, err + } + return []byte(string(utf16.Decode(u16))), nil + case utfbom.UTF16BigEndian: + u16 := make([]uint16, (len(b)/2)-1) + err := binary.Read(reader, binary.BigEndian, &u16) + if err != nil { + return nil, err + } + return []byte(string(utf16.Decode(u16))), nil + } + return ioutil.ReadAll(reader) +} + +func getResourceForToken(f file, baseURI string) (string, error) { + // Compare dafault base URI from the SDK to the endpoints from the public cloud + // Base URI and token resource are the same string. This func finds the authentication + // file field that matches the SDK base URI. The SDK defines the public cloud + // endpoint as its default base URI + if !strings.HasSuffix(baseURI, "/") { + baseURI += "/" + } + switch baseURI { + case azure.PublicCloud.ServiceManagementEndpoint: + return f.ManagementEndpoint, nil + case azure.PublicCloud.ResourceManagerEndpoint: + return f.ResourceManagerEndpoint, nil + case azure.PublicCloud.ActiveDirectoryEndpoint: + return f.ActiveDirectoryEndpoint, nil + case azure.PublicCloud.GalleryEndpoint: + return f.GalleryEndpoint, nil + case azure.PublicCloud.GraphEndpoint: + return f.GraphResourceID, nil + } + return "", fmt.Errorf("auth: base URI not found in endpoints") +} + +// NewClientCredentialsConfig creates an AuthorizerConfig object configured to obtain an Authorizer through Client Credentials. +// Defaults to Public Cloud and Resource Manager Endpoint. +func NewClientCredentialsConfig(clientID string, clientSecret string, tenantID string) ClientCredentialsConfig { + return ClientCredentialsConfig{ + ClientID: clientID, + ClientSecret: clientSecret, + TenantID: tenantID, + Resource: azure.PublicCloud.ResourceManagerEndpoint, + AADEndpoint: azure.PublicCloud.ActiveDirectoryEndpoint, + } +} + +// NewClientCertificateConfig creates a ClientCertificateConfig object configured to obtain an Authorizer through client certificate. +// Defaults to Public Cloud and Resource Manager Endpoint. +func NewClientCertificateConfig(certificatePath string, certificatePassword string, clientID string, tenantID string) ClientCertificateConfig { + return ClientCertificateConfig{ + CertificatePath: certificatePath, + CertificatePassword: certificatePassword, + ClientID: clientID, + TenantID: tenantID, + Resource: azure.PublicCloud.ResourceManagerEndpoint, + AADEndpoint: azure.PublicCloud.ActiveDirectoryEndpoint, + } +} + +// NewUsernamePasswordConfig creates an UsernamePasswordConfig object configured to obtain an Authorizer through username and password. +// Defaults to Public Cloud and Resource Manager Endpoint. +func NewUsernamePasswordConfig(username string, password string, clientID string, tenantID string) UsernamePasswordConfig { + return UsernamePasswordConfig{ + Username: username, + Password: password, + ClientID: clientID, + TenantID: tenantID, + Resource: azure.PublicCloud.ResourceManagerEndpoint, + AADEndpoint: azure.PublicCloud.ActiveDirectoryEndpoint, + } +} + +// NewMSIConfig creates an MSIConfig object configured to obtain an Authorizer through MSI. +func NewMSIConfig() MSIConfig { + return MSIConfig{ + Resource: azure.PublicCloud.ResourceManagerEndpoint, + } +} + +// NewDeviceFlowConfig creates a DeviceFlowConfig object configured to obtain an Authorizer through device flow. +// Defaults to Public Cloud and Resource Manager Endpoint. +func NewDeviceFlowConfig(clientID string, tenantID string) DeviceFlowConfig { + return DeviceFlowConfig{ + ClientID: clientID, + TenantID: tenantID, + Resource: azure.PublicCloud.ResourceManagerEndpoint, + AADEndpoint: azure.PublicCloud.ActiveDirectoryEndpoint, + } +} + +//AuthorizerConfig provides an authorizer from the configuration provided. +type AuthorizerConfig interface { + Authorizer() (autorest.Authorizer, error) +} + +// ClientCredentialsConfig provides the options to get a bearer authorizer from client credentials. +type ClientCredentialsConfig struct { + ClientID string + ClientSecret string + TenantID string + AADEndpoint string + Resource string +} + +// Authorizer gets the authorizer from client credentials. +func (ccc ClientCredentialsConfig) Authorizer() (autorest.Authorizer, error) { + oauthConfig, err := adal.NewOAuthConfig(ccc.AADEndpoint, ccc.TenantID) + if err != nil { + return nil, err + } + + spToken, err := adal.NewServicePrincipalToken(*oauthConfig, ccc.ClientID, ccc.ClientSecret, ccc.Resource) + if err != nil { + return nil, fmt.Errorf("failed to get oauth token from client credentials: %v", err) + } + + return autorest.NewBearerAuthorizer(spToken), nil +} + +// ClientCertificateConfig provides the options to get a bearer authorizer from a client certificate. +type ClientCertificateConfig struct { + ClientID string + CertificatePath string + CertificatePassword string + TenantID string + AADEndpoint string + Resource string +} + +// Authorizer gets an authorizer object from client certificate. +func (ccc ClientCertificateConfig) Authorizer() (autorest.Authorizer, error) { + oauthConfig, err := adal.NewOAuthConfig(ccc.AADEndpoint, ccc.TenantID) + + certData, err := ioutil.ReadFile(ccc.CertificatePath) + if err != nil { + return nil, fmt.Errorf("failed to read the certificate file (%s): %v", ccc.CertificatePath, err) + } + + certificate, rsaPrivateKey, err := decodePkcs12(certData, ccc.CertificatePassword) + if err != nil { + return nil, fmt.Errorf("failed to decode pkcs12 certificate while creating spt: %v", err) + } + + spToken, err := adal.NewServicePrincipalTokenFromCertificate(*oauthConfig, ccc.ClientID, certificate, rsaPrivateKey, ccc.Resource) + + if err != nil { + return nil, fmt.Errorf("failed to get oauth token from certificate auth: %v", err) + } + + return autorest.NewBearerAuthorizer(spToken), nil +} + +// DeviceFlowConfig provides the options to get a bearer authorizer using device flow authentication. +type DeviceFlowConfig struct { + ClientID string + TenantID string + AADEndpoint string + Resource string +} + +// Authorizer gets the authorizer from device flow. +func (dfc DeviceFlowConfig) Authorizer() (autorest.Authorizer, error) { + oauthClient := &autorest.Client{} + oauthConfig, err := adal.NewOAuthConfig(dfc.AADEndpoint, dfc.TenantID) + deviceCode, err := adal.InitiateDeviceAuth(oauthClient, *oauthConfig, dfc.ClientID, dfc.AADEndpoint) + if err != nil { + return nil, fmt.Errorf("failed to start device auth flow: %s", err) + } + + log.Println(*deviceCode.Message) + + token, err := adal.WaitForUserCompletion(oauthClient, deviceCode) + if err != nil { + return nil, fmt.Errorf("failed to finish device auth flow: %s", err) + } + + spToken, err := adal.NewServicePrincipalTokenFromManualToken(*oauthConfig, dfc.ClientID, dfc.Resource, *token) + if err != nil { + return nil, fmt.Errorf("failed to get oauth token from device flow: %v", err) + } + + return autorest.NewBearerAuthorizer(spToken), nil +} + +func decodePkcs12(pkcs []byte, password string) (*x509.Certificate, *rsa.PrivateKey, error) { + privateKey, certificate, err := pkcs12.Decode(pkcs, password) + if err != nil { + return nil, nil, err + } + + rsaPrivateKey, isRsaKey := privateKey.(*rsa.PrivateKey) + if !isRsaKey { + return nil, nil, fmt.Errorf("PKCS#12 certificate must contain an RSA private key") + } + + return certificate, rsaPrivateKey, nil +} + +// UsernamePasswordConfig provides the options to get a bearer authorizer from a username and a password. +type UsernamePasswordConfig struct { + ClientID string + Username string + Password string + TenantID string + AADEndpoint string + Resource string +} + +// Authorizer gets the authorizer from a username and a password. +func (ups UsernamePasswordConfig) Authorizer() (autorest.Authorizer, error) { + + oauthConfig, err := adal.NewOAuthConfig(ups.AADEndpoint, ups.TenantID) + + spToken, err := adal.NewServicePrincipalTokenFromUsernamePassword(*oauthConfig, ups.ClientID, ups.Username, ups.Password, ups.Resource) + + if err != nil { + return nil, fmt.Errorf("failed to get oauth token from username and password auth: %v", err) + } + + return autorest.NewBearerAuthorizer(spToken), nil +} + +// MSIConfig provides the options to get a bearer authorizer through MSI. +type MSIConfig struct { + Resource string + ClientID string +} + +// Authorizer gets the authorizer from MSI. +func (mc MSIConfig) Authorizer() (autorest.Authorizer, error) { + msiEndpoint, err := adal.GetMSIVMEndpoint() + if err != nil { + return nil, err + } + + spToken, err := adal.NewServicePrincipalTokenFromMSI(msiEndpoint, mc.Resource) + if err != nil { + return nil, fmt.Errorf("failed to get oauth token from MSI: %v", err) + } + + return autorest.NewBearerAuthorizer(spToken), nil +} diff --git a/vendor/github.com/dimchansky/utfbom/LICENSE b/vendor/github.com/dimchansky/utfbom/LICENSE new file mode 100644 index 000000000..8dada3eda --- /dev/null +++ b/vendor/github.com/dimchansky/utfbom/LICENSE @@ -0,0 +1,201 @@ + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "{}" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright {yyyy} {name of copyright owner} + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/vendor/github.com/dimchansky/utfbom/utfbom.go b/vendor/github.com/dimchansky/utfbom/utfbom.go new file mode 100644 index 000000000..648184a12 --- /dev/null +++ b/vendor/github.com/dimchansky/utfbom/utfbom.go @@ -0,0 +1,174 @@ +// Package utfbom implements the detection of the BOM (Unicode Byte Order Mark) and removing as necessary. +// It wraps an io.Reader object, creating another object (Reader) that also implements the io.Reader +// interface but provides automatic BOM checking and removing as necessary. +package utfbom + +import ( + "errors" + "io" +) + +// Encoding is type alias for detected UTF encoding. +type Encoding int + +// Constants to identify detected UTF encodings. +const ( + // Unknown encoding, returned when no BOM was detected + Unknown Encoding = iota + + // UTF8, BOM bytes: EF BB BF + UTF8 + + // UTF-16, big-endian, BOM bytes: FE FF + UTF16BigEndian + + // UTF-16, little-endian, BOM bytes: FF FE + UTF16LittleEndian + + // UTF-32, big-endian, BOM bytes: 00 00 FE FF + UTF32BigEndian + + // UTF-32, little-endian, BOM bytes: FF FE 00 00 + UTF32LittleEndian +) + +const maxConsecutiveEmptyReads = 100 + +// Skip creates Reader which automatically detects BOM (Unicode Byte Order Mark) and removes it as necessary. +// It also returns the encoding detected by the BOM. +// If the detected encoding is not needed, you can call the SkipOnly function. +func Skip(rd io.Reader) (*Reader, Encoding) { + // Is it already a Reader? + b, ok := rd.(*Reader) + if ok { + return b, Unknown + } + + enc, left, err := detectUtf(rd) + return &Reader{ + rd: rd, + buf: left, + err: err, + }, enc +} + +// SkipOnly creates Reader which automatically detects BOM (Unicode Byte Order Mark) and removes it as necessary. +func SkipOnly(rd io.Reader) *Reader { + r, _ := Skip(rd) + return r +} + +// Reader implements automatic BOM (Unicode Byte Order Mark) checking and +// removing as necessary for an io.Reader object. +type Reader struct { + rd io.Reader // reader provided by the client + buf []byte // buffered data + err error // last error +} + +// Read is an implementation of io.Reader interface. +// The bytes are taken from the underlying Reader, but it checks for BOMs, removing them as necessary. +func (r *Reader) Read(p []byte) (n int, err error) { + if len(p) == 0 { + return 0, nil + } + + if r.buf == nil { + if r.err != nil { + return 0, r.readErr() + } + + return r.rd.Read(p) + } + + // copy as much as we can + n = copy(p, r.buf) + r.buf = nilIfEmpty(r.buf[n:]) + return n, nil +} + +func (r *Reader) readErr() error { + err := r.err + r.err = nil + return err +} + +var errNegativeRead = errors.New("utfbom: reader returned negative count from Read") + +func detectUtf(rd io.Reader) (enc Encoding, buf []byte, err error) { + buf, err = readBOM(rd) + + if len(buf) >= 4 { + if isUTF32BigEndianBOM4(buf) { + return UTF32BigEndian, nilIfEmpty(buf[4:]), err + } + if isUTF32LittleEndianBOM4(buf) { + return UTF32LittleEndian, nilIfEmpty(buf[4:]), err + } + } + + if len(buf) > 2 && isUTF8BOM3(buf) { + return UTF8, nilIfEmpty(buf[3:]), err + } + + if (err != nil && err != io.EOF) || (len(buf) < 2) { + return Unknown, nilIfEmpty(buf), err + } + + if isUTF16BigEndianBOM2(buf) { + return UTF16BigEndian, nilIfEmpty(buf[2:]), err + } + if isUTF16LittleEndianBOM2(buf) { + return UTF16LittleEndian, nilIfEmpty(buf[2:]), err + } + + return Unknown, nilIfEmpty(buf), err +} + +func readBOM(rd io.Reader) (buf []byte, err error) { + const maxBOMSize = 4 + var bom [maxBOMSize]byte // used to read BOM + + // read as many bytes as possible + for nEmpty, n := 0, 0; err == nil && len(buf) < maxBOMSize; buf = bom[:len(buf)+n] { + if n, err = rd.Read(bom[len(buf):]); n < 0 { + panic(errNegativeRead) + } + if n > 0 { + nEmpty = 0 + } else { + nEmpty++ + if nEmpty >= maxConsecutiveEmptyReads { + err = io.ErrNoProgress + } + } + } + return +} + +func isUTF32BigEndianBOM4(buf []byte) bool { + return buf[0] == 0x00 && buf[1] == 0x00 && buf[2] == 0xFE && buf[3] == 0xFF +} + +func isUTF32LittleEndianBOM4(buf []byte) bool { + return buf[0] == 0xFF && buf[1] == 0xFE && buf[2] == 0x00 && buf[3] == 0x00 +} + +func isUTF8BOM3(buf []byte) bool { + return buf[0] == 0xEF && buf[1] == 0xBB && buf[2] == 0xBF +} + +func isUTF16BigEndianBOM2(buf []byte) bool { + return buf[0] == 0xFE && buf[1] == 0xFF +} + +func isUTF16LittleEndianBOM2(buf []byte) bool { + return buf[0] == 0xFF && buf[1] == 0xFE +} + +func nilIfEmpty(buf []byte) (res []byte) { + if len(buf) > 0 { + res = buf + } + return +} diff --git a/vendor/github.com/edeckers/auroradnsclient/client.go b/vendor/github.com/edeckers/auroradnsclient/client.go deleted file mode 100644 index d366afef3..000000000 --- a/vendor/github.com/edeckers/auroradnsclient/client.go +++ /dev/null @@ -1,22 +0,0 @@ -package auroradnsclient - -import ( - "github.com/edeckers/auroradnsclient/requests" -) - -// AuroraDNSClient is a client for accessing the Aurora DNS API -type AuroraDNSClient struct { - requestor *requests.AuroraRequestor -} - -// NewAuroraDNSClient instantiates a new client -func NewAuroraDNSClient(endpoint string, userID string, key string) (*AuroraDNSClient, error) { - requestor, err := requests.NewAuroraRequestor(endpoint, userID, key) - if err != nil { - return nil, err - } - - return &AuroraDNSClient{ - requestor: requestor, - }, nil -} diff --git a/vendor/github.com/edeckers/auroradnsclient/errors.go b/vendor/github.com/edeckers/auroradnsclient/errors.go deleted file mode 100644 index 452718aa4..000000000 --- a/vendor/github.com/edeckers/auroradnsclient/errors.go +++ /dev/null @@ -1,11 +0,0 @@ -package auroradnsclient - -// AuroraDNSError describes the format of a generic AuroraDNS API error -type AuroraDNSError struct { - ErrorCode string `json:"error"` - Message string `json:"errormsg"` -} - -func (e AuroraDNSError) Error() string { - return e.Message -} diff --git a/vendor/github.com/edeckers/auroradnsclient/records.go b/vendor/github.com/edeckers/auroradnsclient/records.go deleted file mode 100644 index e40786e80..000000000 --- a/vendor/github.com/edeckers/auroradnsclient/records.go +++ /dev/null @@ -1,75 +0,0 @@ -package auroradnsclient - -import ( - "encoding/json" - "fmt" - - "github.com/edeckers/auroradnsclient/records" - "github.com/sirupsen/logrus" -) - -// GetRecords returns a list of all records in given zone -func (client *AuroraDNSClient) GetRecords(zoneID string) ([]records.GetRecordsResponse, error) { - logrus.Debugf("GetRecords(%s)", zoneID) - relativeURL := fmt.Sprintf("zones/%s/records", zoneID) - - response, err := client.requestor.Request(relativeURL, "GET", []byte("")) - if err != nil { - logrus.Errorf("Failed to receive records: %s", err) - return nil, err - } - - var respData []records.GetRecordsResponse - err = json.Unmarshal(response, &respData) - if err != nil { - logrus.Errorf("Failed to unmarshall response: %s", err) - return nil, err - } - - return respData, nil -} - -// CreateRecord creates a new record in given zone -func (client *AuroraDNSClient) CreateRecord(zoneID string, data records.CreateRecordRequest) (*records.CreateRecordResponse, error) { - logrus.Debugf("CreateRecord(%s, %+v)", zoneID, data) - body, err := json.Marshal(data) - if err != nil { - logrus.Errorf("Failed to marshall request body: %s", err) - - return nil, err - } - - relativeURL := fmt.Sprintf("zones/%s/records", zoneID) - - response, err := client.requestor.Request(relativeURL, "POST", body) - if err != nil { - logrus.Errorf("Failed to create record: %s", err) - - return nil, err - } - - var respData *records.CreateRecordResponse - err = json.Unmarshal(response, &respData) - if err != nil { - logrus.Errorf("Failed to unmarshall response: %s", err) - - return nil, err - } - - return respData, nil -} - -// RemoveRecord removes a record corresponding to a particular id in a given zone -func (client *AuroraDNSClient) RemoveRecord(zoneID string, recordID string) (*records.RemoveRecordResponse, error) { - logrus.Debugf("RemoveRecord(%s, %s)", zoneID, recordID) - relativeURL := fmt.Sprintf("zones/%s/records/%s", zoneID, recordID) - - _, err := client.requestor.Request(relativeURL, "DELETE", nil) - if err != nil { - logrus.Errorf("Failed to remove record: %s", err) - - return nil, err - } - - return &records.RemoveRecordResponse{}, nil -} diff --git a/vendor/github.com/edeckers/auroradnsclient/records/datatypes.go b/vendor/github.com/edeckers/auroradnsclient/records/datatypes.go deleted file mode 100644 index ce7efa7ec..000000000 --- a/vendor/github.com/edeckers/auroradnsclient/records/datatypes.go +++ /dev/null @@ -1,31 +0,0 @@ -package records - -// CreateRecordRequest describes the json payload for creating a record -type CreateRecordRequest struct { - RecordType string `json:"type"` - Name string `json:"name"` - Content string `json:"content"` - TTL int `json:"ttl"` -} - -// CreateRecordResponse describes the json response for creating a record -type CreateRecordResponse struct { - ID string `json:"id"` - RecordType string `json:"type"` - Name string `json:"name"` - Content string `json:"content"` - TTL int `json:"ttl"` -} - -// GetRecordsResponse describes the json response of a single record -type GetRecordsResponse struct { - ID string `json:"id"` - RecordType string `json:"type"` - Name string `json:"name"` - Content string `json:"content"` - TTL int `json:"ttl"` -} - -// RemoveRecordResponse describes the json response for removing a record -type RemoveRecordResponse struct { -} diff --git a/vendor/github.com/edeckers/auroradnsclient/requests/errors/errors.go b/vendor/github.com/edeckers/auroradnsclient/requests/errors/errors.go deleted file mode 100644 index e6d7a5649..000000000 --- a/vendor/github.com/edeckers/auroradnsclient/requests/errors/errors.go +++ /dev/null @@ -1,19 +0,0 @@ -package errors - -// BadRequest HTTP error wrapper -type BadRequest error - -// Unauthorized HTTP error wrapper -type Unauthorized error - -// Forbidden HTTP error wrapper -type Forbidden error - -// NotFound HTTP error wrapper -type NotFound error - -// ServerError HTTP error wrapper -type ServerError error - -// InvalidStatusCodeError is used when none of the other types applies -type InvalidStatusCodeError error diff --git a/vendor/github.com/edeckers/auroradnsclient/requests/requestor.go b/vendor/github.com/edeckers/auroradnsclient/requests/requestor.go deleted file mode 100644 index b01829c9f..000000000 --- a/vendor/github.com/edeckers/auroradnsclient/requests/requestor.go +++ /dev/null @@ -1,124 +0,0 @@ -package requests - -import ( - "bytes" - "errors" - "fmt" - "io/ioutil" - "net/http" - "net/http/httputil" - "time" - - request_errors "github.com/edeckers/auroradnsclient/requests/errors" - "github.com/edeckers/auroradnsclient/tokens" - "github.com/sirupsen/logrus" -) - -// AuroraRequestor performs actual requests to API -type AuroraRequestor struct { - endpoint string - userID string - key string -} - -// NewAuroraRequestor instantiates a new requestor -func NewAuroraRequestor(endpoint string, userID string, key string) (*AuroraRequestor, error) { - if endpoint == "" { - return nil, fmt.Errorf("Aurora endpoint missing") - } - - if userID == "" || key == "" { - return nil, fmt.Errorf("Aurora credentials missing") - } - - return &AuroraRequestor{endpoint: endpoint, userID: userID, key: key}, nil -} - -func (requestor *AuroraRequestor) buildRequest(relativeURL string, method string, body []byte) (*http.Request, error) { - url := fmt.Sprintf("%s/%s", requestor.endpoint, relativeURL) - - request, err := http.NewRequest(method, url, bytes.NewReader(body)) - if err != nil { - logrus.Errorf("Failed to build request: %s", err) - - return request, err - } - - timestamp := time.Now().UTC() - fmtTime := timestamp.Format("20060102T150405Z") - - token := tokens.NewToken(requestor.userID, requestor.key, method, fmt.Sprintf("/%s", relativeURL), timestamp) - - request.Header.Set("X-AuroraDNS-Date", fmtTime) - request.Header.Set("Authorization", fmt.Sprintf("AuroraDNSv1 %s", token)) - - request.Header.Set("Content-Type", "application/json") - - rawRequest, err := httputil.DumpRequestOut(request, true) - if err != nil { - logrus.Errorf("Failed to dump request: %s", err) - } - - logrus.Debugf("Built request:\n%s", rawRequest) - - return request, err -} - -func (requestor *AuroraRequestor) testInvalidResponse(resp *http.Response, response []byte) ([]byte, error) { - if resp.StatusCode < 400 { - return response, nil - } - - logrus.Errorf("Received invalid status code %d:\n%s", resp.StatusCode, response) - - content := errors.New(string(response)) - - statusCodeErrorMap := map[int]error{ - 400: request_errors.BadRequest(content), - 401: request_errors.Unauthorized(content), - 403: request_errors.Forbidden(content), - 404: request_errors.NotFound(content), - 500: request_errors.ServerError(content), - } - - mappedError := statusCodeErrorMap[resp.StatusCode] - - if mappedError == nil { - return nil, request_errors.InvalidStatusCodeError(content) - } - - return nil, mappedError -} - -// Request builds and executues a request to the API -func (requestor *AuroraRequestor) Request(relativeURL string, method string, body []byte) ([]byte, error) { - req, err := requestor.buildRequest(relativeURL, method, body) - - client := http.Client{Timeout: 30 * time.Second} - resp, err := client.Do(req) - if err != nil { - logrus.Errorf("Failed request: %s", err) - return nil, err - } - - defer resp.Body.Close() - - rawResponse, err := httputil.DumpResponse(resp, true) - logrus.Debugf("Received raw response:\n%s", rawResponse) - if err != nil { - logrus.Errorf("Failed to dump response: %s", err) - } - - response, err := ioutil.ReadAll(resp.Body) - if err != nil { - logrus.Errorf("Failed to read response: %s", response) - return nil, err - } - - response, err = requestor.testInvalidResponse(resp, response) - if err != nil { - return nil, err - } - - return response, nil -} diff --git a/vendor/github.com/edeckers/auroradnsclient/tokens/generator.go b/vendor/github.com/edeckers/auroradnsclient/tokens/generator.go deleted file mode 100644 index bb47c25b3..000000000 --- a/vendor/github.com/edeckers/auroradnsclient/tokens/generator.go +++ /dev/null @@ -1,35 +0,0 @@ -package tokens - -import ( - "crypto/hmac" - "crypto/sha256" - "encoding/base64" - "fmt" - "strings" - "time" - - "github.com/sirupsen/logrus" -) - -// NewToken generates a token for accessing a specific method of the API -func NewToken(userID string, key string, method string, action string, timestamp time.Time) string { - fmtTime := timestamp.Format("20060102T150405Z") - logrus.Debugf("Built timestamp: %s", fmtTime) - - message := strings.Join([]string{method, action, fmtTime}, "") - logrus.Debugf("Built message: %s", message) - - signatureHmac := hmac.New(sha256.New, []byte(key)) - - signatureHmac.Write([]byte(message)) - - signature := base64.StdEncoding.EncodeToString([]byte(signatureHmac.Sum(nil))) - logrus.Debugf("Built signature: %s", signature) - - userIDAndSignature := fmt.Sprintf("%s:%s", userID, signature) - - token := base64.StdEncoding.EncodeToString([]byte(userIDAndSignature)) - logrus.Debugf("Built token: %s", token) - - return token -} diff --git a/vendor/github.com/edeckers/auroradnsclient/zones.go b/vendor/github.com/edeckers/auroradnsclient/zones.go deleted file mode 100644 index 49f3ce5ae..000000000 --- a/vendor/github.com/edeckers/auroradnsclient/zones.go +++ /dev/null @@ -1,29 +0,0 @@ -package auroradnsclient - -import ( - "encoding/json" - - "github.com/edeckers/auroradnsclient/zones" - "github.com/sirupsen/logrus" -) - -// GetZones returns a list of all zones -func (client *AuroraDNSClient) GetZones() ([]zones.ZoneRecord, error) { - logrus.Debugf("GetZones") - response, err := client.requestor.Request("zones", "GET", []byte("")) - - if err != nil { - logrus.Errorf("Failed to get zones: %s", err) - return nil, err - } - - var respData []zones.ZoneRecord - err = json.Unmarshal(response, &respData) - if err != nil { - logrus.Errorf("Failed to unmarshall response: %s", err) - return nil, err - } - - logrus.Debugf("Unmarshalled response: %+v", respData) - return respData, nil -} diff --git a/vendor/github.com/edeckers/auroradnsclient/zones/datatypes.go b/vendor/github.com/edeckers/auroradnsclient/zones/datatypes.go deleted file mode 100644 index 16841ec57..000000000 --- a/vendor/github.com/edeckers/auroradnsclient/zones/datatypes.go +++ /dev/null @@ -1,7 +0,0 @@ -package zones - -// ZoneRecord describes the json format for a zone -type ZoneRecord struct { - ID string `json:"id"` - Name string `json:"name"` -} diff --git a/vendor/github.com/edeckers/auroradnsclient/LICENSE b/vendor/github.com/ldez/go-auroradns/LICENSE similarity index 100% rename from vendor/github.com/edeckers/auroradnsclient/LICENSE rename to vendor/github.com/ldez/go-auroradns/LICENSE diff --git a/vendor/github.com/ldez/go-auroradns/auth.go b/vendor/github.com/ldez/go-auroradns/auth.go new file mode 100644 index 000000000..295c34c9b --- /dev/null +++ b/vendor/github.com/ldez/go-auroradns/auth.go @@ -0,0 +1,98 @@ +package auroradns + +import ( + "crypto/hmac" + "crypto/sha256" + "encoding/base64" + "fmt" + "net/http" + "strings" + "time" +) + +// TokenTransport HTTP transport for API authentication +type TokenTransport struct { + userID string + key string + + // Transport is the underlying HTTP transport to use when making requests. + // It will default to http.DefaultTransport if nil. + Transport http.RoundTripper +} + +// NewTokenTransport Creates a new TokenTransport +func NewTokenTransport(userID, key string) (*TokenTransport, error) { + if userID == "" || key == "" { + return nil, fmt.Errorf("credentials missing") + } + + return &TokenTransport{userID: userID, key: key}, nil +} + +// RoundTrip executes a single HTTP transaction +func (t *TokenTransport) RoundTrip(req *http.Request) (*http.Response, error) { + enrichedReq := &http.Request{} + *enrichedReq = *req + + enrichedReq.Header = make(http.Header, len(req.Header)) + for k, s := range req.Header { + enrichedReq.Header[k] = append([]string(nil), s...) + } + + if t.userID != "" && t.key != "" { + timestamp := time.Now().UTC() + + fmtTime := timestamp.Format("20060102T150405Z") + req.Header.Set("X-AuroraDNS-Date", fmtTime) + + token, err := newToken(t.userID, t.key, req.Method, req.URL.Path, timestamp) + if err == nil { + req.Header.Set("Authorization", fmt.Sprintf("AuroraDNSv1 %s", token)) + } + } + + return t.transport().RoundTrip(enrichedReq) +} + +// Wrap Wrap a HTTP client Transport with the TokenTransport +func (t *TokenTransport) Wrap(client *http.Client) *http.Client { + backup := client.Transport + t.Transport = backup + client.Transport = t + return client +} + +// Client Creates a new HTTP client +func (t *TokenTransport) Client() *http.Client { + return &http.Client{ + Transport: t, + Timeout: 30 * time.Second, + } +} + +func (t *TokenTransport) transport() http.RoundTripper { + if t.Transport != nil { + return t.Transport + } + return http.DefaultTransport +} + +// newToken generates a token for accessing a specific method of the API +func newToken(userID string, key string, method string, action string, timestamp time.Time) (string, error) { + fmtTime := timestamp.Format("20060102T150405Z") + message := strings.Join([]string{method, action, fmtTime}, "") + + signatureHmac := hmac.New(sha256.New, []byte(key)) + _, err := signatureHmac.Write([]byte(message)) + if err != nil { + return "", err + } + + signature := base64.StdEncoding.EncodeToString(signatureHmac.Sum(nil)) + + userIDAndSignature := fmt.Sprintf("%s:%s", userID, signature) + + token := base64.StdEncoding.EncodeToString([]byte(userIDAndSignature)) + + return token, nil +} diff --git a/vendor/github.com/ldez/go-auroradns/client.go b/vendor/github.com/ldez/go-auroradns/client.go new file mode 100644 index 000000000..4387b1a73 --- /dev/null +++ b/vendor/github.com/ldez/go-auroradns/client.go @@ -0,0 +1,144 @@ +package auroradns + +import ( + "encoding/json" + "fmt" + "io" + "io/ioutil" + "net/http" + "net/url" +) + +const defaultBaseURL = "https://api.auroradns.eu" + +const ( + contentTypeHeader = "Content-Type" + contentTypeJSON = "application/json" +) + +// ErrorResponse A representation of an API error message. +type ErrorResponse struct { + ErrorCode string `json:"error"` + Message string `json:"errormsg"` +} + +func (e *ErrorResponse) Error() string { + return fmt.Sprintf("%s - %s", e.ErrorCode, e.Message) +} + +// Option Type of a client option +type Option func(*Client) error + +// Client The API client +type Client struct { + baseURL *url.URL + UserAgent string + httpClient *http.Client +} + +// NewClient Creates a new client +func NewClient(httpClient *http.Client, opts ...Option) (*Client, error) { + if httpClient == nil { + httpClient = http.DefaultClient + } + + baseURL, _ := url.Parse(defaultBaseURL) + + client := &Client{ + baseURL: baseURL, + httpClient: httpClient, + } + + for _, opt := range opts { + err := opt(client) + if err != nil { + return nil, err + } + } + + return client, nil +} + +func (c *Client) newRequest(method, resource string, body io.Reader) (*http.Request, error) { + u, err := c.baseURL.Parse(resource) + if err != nil { + return nil, err + } + + req, err := http.NewRequest(method, u.String(), body) + if err != nil { + return nil, err + } + + req.Header.Set(contentTypeHeader, contentTypeJSON) + + if c.UserAgent != "" { + req.Header.Set("User-Agent", c.UserAgent) + } + + return req, nil +} + +func (c *Client) do(req *http.Request, v interface{}) (*http.Response, error) { + resp, err := c.httpClient.Do(req) + if err != nil { + return nil, err + } + defer func() { _ = resp.Body.Close() }() + + if err = checkResponse(resp); err != nil { + return resp, err + } + + if v == nil { + return resp, nil + } + + raw, err := ioutil.ReadAll(resp.Body) + if err != nil { + return resp, fmt.Errorf("failed to read body: %v", err) + } + + if err = json.Unmarshal(raw, v); err != nil { + return resp, fmt.Errorf("unmarshaling %T error: %v: %s", err, v, string(raw)) + } + + return resp, nil +} + +func checkResponse(resp *http.Response) error { + if c := resp.StatusCode; 200 <= c && c <= 299 { + return nil + } + + data, err := ioutil.ReadAll(resp.Body) + if err == nil && data != nil { + errorResponse := new(ErrorResponse) + err = json.Unmarshal(data, errorResponse) + if err != nil { + return fmt.Errorf("unmarshaling ErrorResponse error: %v: %s", err.Error(), string(data)) + } + + return errorResponse + } + defer func() { _ = resp.Body.Close() }() + + return nil +} + +// WithBaseURL Allows to define a custom base URL +func WithBaseURL(rawBaseURL string) func(*Client) error { + return func(client *Client) error { + if len(rawBaseURL) == 0 { + return nil + } + + baseURL, err := url.Parse(rawBaseURL) + if err != nil { + return err + } + + client.baseURL = baseURL + return nil + } +} diff --git a/vendor/github.com/ldez/go-auroradns/records.go b/vendor/github.com/ldez/go-auroradns/records.go new file mode 100644 index 000000000..a1cf08717 --- /dev/null +++ b/vendor/github.com/ldez/go-auroradns/records.go @@ -0,0 +1,91 @@ +package auroradns + +import ( + "bytes" + "encoding/json" + "fmt" + "net/http" +) + +// Record types +const ( + RecordTypeA = "A" + RecordTypeAAAA = "AAAA" + RecordTypeCNAME = "CNAME" + RecordTypeMX = "MX" + RecordTypeNS = "NS" + RecordTypeSOA = "SOA" + RecordTypeSRV = "SRV" + RecordTypeTXT = "TXT" + RecordTypeDS = "DS" + RecordTypePTR = "PTR" + RecordTypeSSHFP = "SSHFP" + RecordTypeTLSA = "TLS" +) + +// Record a DNS record +type Record struct { + ID string `json:"id,omitempty"` + RecordType string `json:"type"` + Name string `json:"name"` + Content string `json:"content"` + TTL int `json:"ttl,omitempty"` +} + +// CreateRecord Creates a new record. +func (c *Client) CreateRecord(zoneID string, record Record) (*Record, *http.Response, error) { + body, err := json.Marshal(record) + if err != nil { + return nil, nil, fmt.Errorf("failed to marshall request body: %v", err) + } + + resource := fmt.Sprintf("/zones/%s/records", zoneID) + + req, err := c.newRequest(http.MethodPost, resource, bytes.NewReader(body)) + if err != nil { + return nil, nil, err + } + + newRecord := new(Record) + resp, err := c.do(req, newRecord) + if err != nil { + return nil, resp, err + } + + return newRecord, resp, nil +} + +// DeleteRecord Delete a record. +func (c *Client) DeleteRecord(zoneID string, recordID string) (bool, *http.Response, error) { + resource := fmt.Sprintf("/zones/%s/records/%s", zoneID, recordID) + + req, err := c.newRequest(http.MethodDelete, resource, nil) + if err != nil { + return false, nil, err + } + + resp, err := c.do(req, nil) + if err != nil { + return false, resp, err + } + + return true, resp, nil +} + +// ListRecords returns a list of all records in given zone +func (c *Client) ListRecords(zoneID string) ([]Record, *http.Response, error) { + resource := fmt.Sprintf("/zones/%s/records", zoneID) + + req, err := c.newRequest(http.MethodGet, resource, nil) + if err != nil { + return nil, nil, err + } + + var records []Record + resp, err := c.do(req, &records) + if err != nil { + return nil, resp, err + } + + return records, resp, nil +} diff --git a/vendor/github.com/ldez/go-auroradns/zones.go b/vendor/github.com/ldez/go-auroradns/zones.go new file mode 100644 index 000000000..8e372188f --- /dev/null +++ b/vendor/github.com/ldez/go-auroradns/zones.go @@ -0,0 +1,69 @@ +package auroradns + +import ( + "bytes" + "encoding/json" + "fmt" + "net/http" +) + +// Zone a DNS zone +type Zone struct { + ID string `json:"id,omitempty"` + Name string `json:"name"` +} + +// CreateZone Creates a zone. +func (c *Client) CreateZone(domain string) (*Zone, *http.Response, error) { + body, err := json.Marshal(Zone{Name: domain}) + if err != nil { + return nil, nil, fmt.Errorf("failed to marshall request body: %v", err) + } + + req, err := c.newRequest(http.MethodPost, "/zones", bytes.NewReader(body)) + if err != nil { + return nil, nil, err + } + + zone := new(Zone) + resp, err := c.do(req, zone) + if err != nil { + return nil, resp, err + } + + return zone, resp, nil +} + +// DeleteZone Delete a zone. +func (c *Client) DeleteZone(zoneID string) (bool, *http.Response, error) { + resource := fmt.Sprintf("/zones/%s", zoneID) + + req, err := c.newRequest(http.MethodDelete, resource, nil) + if err != nil { + return false, nil, err + } + + resp, err := c.do(req, nil) + if err != nil { + return false, resp, err + } + + return true, resp, nil + +} + +// ListZones returns a list of all zones. +func (c *Client) ListZones() ([]Zone, *http.Response, error) { + req, err := c.newRequest(http.MethodGet, "/zones", nil) + if err != nil { + return nil, nil, err + } + + var zones []Zone + resp, err := c.do(req, &zones) + if err != nil { + return nil, resp, err + } + + return zones, resp, nil +} diff --git a/vendor/github.com/xenolf/lego/acme/dns_challenge.go b/vendor/github.com/xenolf/lego/acme/dns_challenge.go index f803d0a8c..d9c252e7e 100644 --- a/vendor/github.com/xenolf/lego/acme/dns_challenge.go +++ b/vendor/github.com/xenolf/lego/acme/dns_challenge.go @@ -7,6 +7,7 @@ import ( "fmt" "net" "strings" + "sync" "time" "github.com/miekg/dns" @@ -18,8 +19,9 @@ type preCheckDNSFunc func(fqdn, value string) (bool, error) var ( // PreCheckDNS checks DNS propagation before notifying ACME that // the DNS challenge is ready. - PreCheckDNS preCheckDNSFunc = checkDNSPropagation - fqdnToZone = map[string]string{} + PreCheckDNS preCheckDNSFunc = checkDNSPropagation + fqdnToZone = map[string]string{} + muFqdnToZone sync.Mutex ) const defaultResolvConf = "/etc/resolv.conf" @@ -262,6 +264,9 @@ func lookupNameservers(fqdn string) ([]string, error) { // FindZoneByFqdn determines the zone apex for the given fqdn by recursing up the // domain labels until the nameserver returns a SOA record in the answer section. func FindZoneByFqdn(fqdn string, nameservers []string) (string, error) { + muFqdnToZone.Lock() + defer muFqdnToZone.Unlock() + // Do we have it cached? if zone, ok := fqdnToZone[fqdn]; ok { return zone, nil diff --git a/vendor/github.com/xenolf/lego/providers/dns/auroradns/auroradns.go b/vendor/github.com/xenolf/lego/providers/dns/auroradns/auroradns.go index d3cb76da9..21e455c90 100644 --- a/vendor/github.com/xenolf/lego/providers/dns/auroradns/auroradns.go +++ b/vendor/github.com/xenolf/lego/providers/dns/auroradns/auroradns.go @@ -1,3 +1,4 @@ +// Package auroradns implements a DNS provider for solving the DNS-01 challenge using Aurora DNS. package auroradns import ( @@ -6,9 +7,7 @@ import ( "sync" "time" - "github.com/edeckers/auroradnsclient" - "github.com/edeckers/auroradnsclient/records" - "github.com/edeckers/auroradnsclient/zones" + "github.com/ldez/go-auroradns" "github.com/xenolf/lego/acme" "github.com/xenolf/lego/platform/config/env" ) @@ -39,7 +38,7 @@ type DNSProvider struct { recordIDs map[string]string recordIDsMu sync.Mutex config *Config - client *auroradnsclient.AuroraDNSClient + client *auroradns.Client } // NewDNSProvider returns a DNSProvider instance configured for AuroraDNS. @@ -85,7 +84,12 @@ func NewDNSProviderConfig(config *Config) (*DNSProvider, error) { config.BaseURL = defaultBaseURL } - client, err := auroradnsclient.NewAuroraDNSClient(config.BaseURL, config.UserID, config.Key) + tr, err := auroradns.NewTokenTransport(config.UserID, config.Key) + if err != nil { + return nil, fmt.Errorf("aurora: %v", err) + } + + client, err := auroradns.NewClient(tr.Client(), auroradns.WithBaseURL(config.BaseURL)) if err != nil { return nil, fmt.Errorf("aurora: %v", err) } @@ -117,26 +121,25 @@ func (d *DNSProvider) Present(domain, token, keyAuth string) error { authZone = acme.UnFqdn(authZone) - zoneRecord, err := d.getZoneInformationByName(authZone) + zone, err := d.getZoneInformationByName(authZone) if err != nil { return fmt.Errorf("aurora: could not create record: %v", err) } - reqData := - records.CreateRecordRequest{ - RecordType: "TXT", - Name: subdomain, - Content: value, - TTL: d.config.TTL, - } + record := auroradns.Record{ + RecordType: "TXT", + Name: subdomain, + Content: value, + TTL: d.config.TTL, + } - respData, err := d.client.CreateRecord(zoneRecord.ID, reqData) + newRecord, _, err := d.client.CreateRecord(zone.ID, record) if err != nil { return fmt.Errorf("aurora: could not create record: %v", err) } d.recordIDsMu.Lock() - d.recordIDs[fqdn] = respData.ID + d.recordIDs[fqdn] = newRecord.ID d.recordIDsMu.Unlock() return nil @@ -161,12 +164,12 @@ func (d *DNSProvider) CleanUp(domain, token, keyAuth string) error { authZone = acme.UnFqdn(authZone) - zoneRecord, err := d.getZoneInformationByName(authZone) + zone, err := d.getZoneInformationByName(authZone) if err != nil { return err } - _, err = d.client.RemoveRecord(zoneRecord.ID, recordID) + _, _, err = d.client.DeleteRecord(zone.ID, recordID) if err != nil { return err } @@ -184,10 +187,10 @@ func (d *DNSProvider) Timeout() (timeout, interval time.Duration) { return d.config.PropagationTimeout, d.config.PollingInterval } -func (d *DNSProvider) getZoneInformationByName(name string) (zones.ZoneRecord, error) { - zs, err := d.client.GetZones() +func (d *DNSProvider) getZoneInformationByName(name string) (auroradns.Zone, error) { + zs, _, err := d.client.ListZones() if err != nil { - return zones.ZoneRecord{}, err + return auroradns.Zone{}, err } for _, element := range zs { @@ -196,5 +199,5 @@ func (d *DNSProvider) getZoneInformationByName(name string) (zones.ZoneRecord, e } } - return zones.ZoneRecord{}, fmt.Errorf("could not find Zone record") + return auroradns.Zone{}, fmt.Errorf("could not find Zone record") } diff --git a/vendor/github.com/xenolf/lego/providers/dns/azure/azure.go b/vendor/github.com/xenolf/lego/providers/dns/azure/azure.go index d04c3d668..74a4e531f 100644 --- a/vendor/github.com/xenolf/lego/providers/dns/azure/azure.go +++ b/vendor/github.com/xenolf/lego/providers/dns/azure/azure.go @@ -7,6 +7,7 @@ import ( "context" "errors" "fmt" + "io/ioutil" "net/http" "strings" "time" @@ -15,18 +16,26 @@ import ( "github.com/Azure/go-autorest/autorest" "github.com/Azure/go-autorest/autorest/adal" "github.com/Azure/go-autorest/autorest/azure" + "github.com/Azure/go-autorest/autorest/azure/auth" "github.com/Azure/go-autorest/autorest/to" "github.com/xenolf/lego/acme" "github.com/xenolf/lego/platform/config/env" ) +const defaultMetadataEndpoint = "http://169.254.169.254" + // Config is used to configure the creation of the DNSProvider type Config struct { - ClientID string - ClientSecret string - SubscriptionID string - TenantID string - ResourceGroup string + // optional if using instance metadata service + ClientID string + ClientSecret string + TenantID string + + SubscriptionID string + ResourceGroup string + + MetadataEndpoint string + PropagationTimeout time.Duration PollingInterval time.Duration TTL int @@ -39,29 +48,26 @@ func NewDefaultConfig() *Config { TTL: env.GetOrDefaultInt("AZURE_TTL", 60), PropagationTimeout: env.GetOrDefaultSecond("AZURE_PROPAGATION_TIMEOUT", 2*time.Minute), PollingInterval: env.GetOrDefaultSecond("AZURE_POLLING_INTERVAL", 2*time.Second), + MetadataEndpoint: env.GetOrFile("AZURE_METADATA_ENDPOINT"), } } // DNSProvider is an implementation of the acme.ChallengeProvider interface type DNSProvider struct { - config *Config + config *Config + authorizer autorest.Authorizer } // NewDNSProvider returns a DNSProvider instance configured for azure. -// Credentials must be passed in the environment variables: AZURE_CLIENT_ID, -// AZURE_CLIENT_SECRET, AZURE_SUBSCRIPTION_ID, AZURE_TENANT_ID, AZURE_RESOURCE_GROUP +// Credentials can be passed in the environment variables: +// AZURE_CLIENT_ID, AZURE_CLIENT_SECRET, AZURE_SUBSCRIPTION_ID, AZURE_TENANT_ID, AZURE_RESOURCE_GROUP +// If the credentials are _not_ set via the environment, +// then it will attempt to get a bearer token via the instance metadata service. +// see: https://github.com/Azure/go-autorest/blob/v10.14.0/autorest/azure/auth/auth.go#L38-L42 func NewDNSProvider() (*DNSProvider, error) { - values, err := env.Get("AZURE_CLIENT_ID", "AZURE_CLIENT_SECRET", "AZURE_SUBSCRIPTION_ID", "AZURE_TENANT_ID", "AZURE_RESOURCE_GROUP") - if err != nil { - return nil, fmt.Errorf("azure: %v", err) - } - config := NewDefaultConfig() - config.ClientID = values["AZURE_CLIENT_ID"] - config.ClientSecret = values["AZURE_CLIENT_SECRET"] - config.SubscriptionID = values["AZURE_SUBSCRIPTION_ID"] - config.TenantID = values["AZURE_TENANT_ID"] - config.ResourceGroup = values["AZURE_RESOURCE_GROUP"] + config.SubscriptionID = env.GetOrFile("AZURE_SUBSCRIPTION_ID") + config.ResourceGroup = env.GetOrFile("AZURE_RESOURCE_GROUP") return NewDNSProviderConfig(config) } @@ -73,8 +79,8 @@ func NewDNSProviderCredentials(clientID, clientSecret, subscriptionID, tenantID, config := NewDefaultConfig() config.ClientID = clientID config.ClientSecret = clientSecret - config.SubscriptionID = subscriptionID config.TenantID = tenantID + config.SubscriptionID = subscriptionID config.ResourceGroup = resourceGroup return NewDNSProviderConfig(config) @@ -86,11 +92,40 @@ func NewDNSProviderConfig(config *Config) (*DNSProvider, error) { return nil, errors.New("azure: the configuration of the DNS provider is nil") } - if config.ClientID == "" || config.ClientSecret == "" || config.SubscriptionID == "" || config.TenantID == "" || config.ResourceGroup == "" { - return nil, errors.New("azure: some credentials information are missing") + if config.HTTPClient == nil { + config.HTTPClient = http.DefaultClient } - return &DNSProvider{config: config}, nil + authorizer, err := getAuthorizer(config) + if err != nil { + return nil, err + } + + if config.SubscriptionID == "" { + subsID, err := getMetadata(config, "subscriptionId") + if err != nil { + return nil, fmt.Errorf("azure: %v", err) + } + + if subsID == "" { + return nil, errors.New("azure: SubscriptionID is missing") + } + config.SubscriptionID = subsID + } + + if config.ResourceGroup == "" { + resGroup, err := getMetadata(config, "resourceGroupName") + if err != nil { + return nil, fmt.Errorf("azure: %v", err) + } + + if resGroup == "" { + return nil, errors.New("azure: ResourceGroup is missing") + } + config.ResourceGroup = resGroup + } + + return &DNSProvider{config: config, authorizer: authorizer}, nil } // Timeout returns the timeout and interval to use when checking for DNS @@ -110,12 +145,7 @@ func (d *DNSProvider) Present(domain, token, keyAuth string) error { } rsc := dns.NewRecordSetsClient(d.config.SubscriptionID) - spt, err := d.newServicePrincipalToken(azure.PublicCloud.ResourceManagerEndpoint) - if err != nil { - return fmt.Errorf("azure: %v", err) - } - - rsc.Authorizer = autorest.NewBearerAuthorizer(spt) + rsc.Authorizer = d.authorizer relative := toRelativeRecord(fqdn, acme.ToFqdn(zone)) rec := dns.RecordSet{ @@ -145,12 +175,7 @@ func (d *DNSProvider) CleanUp(domain, token, keyAuth string) error { relative := toRelativeRecord(fqdn, acme.ToFqdn(zone)) rsc := dns.NewRecordSetsClient(d.config.SubscriptionID) - spt, err := d.newServicePrincipalToken(azure.PublicCloud.ResourceManagerEndpoint) - if err != nil { - return fmt.Errorf("azure: %v", err) - } - - rsc.Authorizer = autorest.NewBearerAuthorizer(spt) + rsc.Authorizer = d.authorizer _, err = rsc.Delete(ctx, d.config.ResourceGroup, zone, relative, dns.TXT, "") if err != nil { @@ -166,14 +191,8 @@ func (d *DNSProvider) getHostedZoneID(ctx context.Context, fqdn string) (string, return "", err } - // Now we want to to Azure and get the zone. - spt, err := d.newServicePrincipalToken(azure.PublicCloud.ResourceManagerEndpoint) - if err != nil { - return "", err - } - dc := dns.NewZonesClient(d.config.SubscriptionID) - dc.Authorizer = autorest.NewBearerAuthorizer(spt) + dc.Authorizer = d.authorizer zone, err := dc.Get(ctx, d.config.ResourceGroup, acme.UnFqdn(authZone)) if err != nil { @@ -184,17 +203,61 @@ func (d *DNSProvider) getHostedZoneID(ctx context.Context, fqdn string) (string, return to.String(zone.Name), nil } -// NewServicePrincipalTokenFromCredentials creates a new ServicePrincipalToken using values of the -// passed credentials map. -func (d *DNSProvider) newServicePrincipalToken(scope string) (*adal.ServicePrincipalToken, error) { - oauthConfig, err := adal.NewOAuthConfig(azure.PublicCloud.ActiveDirectoryEndpoint, d.config.TenantID) - if err != nil { - return nil, err - } - return adal.NewServicePrincipalToken(*oauthConfig, d.config.ClientID, d.config.ClientSecret, scope) -} - // Returns the relative record to the domain func toRelativeRecord(domain, zone string) string { return acme.UnFqdn(strings.TrimSuffix(domain, zone)) } + +func getAuthorizer(config *Config) (autorest.Authorizer, error) { + if config.ClientID != "" && config.ClientSecret != "" && config.TenantID != "" { + oauthConfig, err := adal.NewOAuthConfig(azure.PublicCloud.ActiveDirectoryEndpoint, config.TenantID) + if err != nil { + return nil, err + } + + spt, err := adal.NewServicePrincipalToken(*oauthConfig, config.ClientID, config.ClientSecret, azure.PublicCloud.ResourceManagerEndpoint) + if err != nil { + return nil, err + } + + spt.SetSender(config.HTTPClient) + return autorest.NewBearerAuthorizer(spt), nil + } + + return auth.NewAuthorizerFromEnvironment() +} + +// Fetches metadata from environment or he instance metadata service +// borrowed from https://github.com/Microsoft/azureimds/blob/master/imdssample.go +func getMetadata(config *Config, field string) (string, error) { + metadataEndpoint := config.MetadataEndpoint + if len(metadataEndpoint) == 0 { + metadataEndpoint = defaultMetadataEndpoint + } + + resource := fmt.Sprintf("%s/metadata/instance/compute/%s", metadataEndpoint, field) + req, err := http.NewRequest(http.MethodGet, resource, nil) + if err != nil { + return "", err + } + + req.Header.Add("Metadata", "True") + + q := req.URL.Query() + q.Add("format", "text") + q.Add("api-version", "2017-12-01") + req.URL.RawQuery = q.Encode() + + resp, err := config.HTTPClient.Do(req) + if err != nil { + return "", err + } + defer resp.Body.Close() + + respBody, err := ioutil.ReadAll(resp.Body) + if err != nil { + return "", err + } + + return string(respBody[:]), nil +} diff --git a/vendor/github.com/xenolf/lego/providers/dns/dnsmadeeasy/client.go b/vendor/github.com/xenolf/lego/providers/dns/dnsmadeeasy/client.go index ba0727b9e..95f2dda05 100644 --- a/vendor/github.com/xenolf/lego/providers/dns/dnsmadeeasy/client.go +++ b/vendor/github.com/xenolf/lego/providers/dns/dnsmadeeasy/client.go @@ -7,6 +7,7 @@ import ( "encoding/hex" "encoding/json" "fmt" + "io/ioutil" "net/http" "time" ) @@ -27,6 +28,10 @@ type Record struct { SourceID int `json:"sourceId"` } +type recordsResponse struct { + Records *[]Record `json:"data"` +} + // Client DNSMadeEasy client type Client struct { apiKey string @@ -82,10 +87,6 @@ func (c *Client) GetRecords(domain *Domain, recordName, recordType string) (*[]R } defer resp.Body.Close() - type recordsResponse struct { - Records *[]Record `json:"data"` - } - records := &recordsResponse{} err = json.NewDecoder(resp.Body).Decode(&records) if err != nil { @@ -151,7 +152,11 @@ func (c *Client) sendRequest(method, resource string, payload interface{}) (*htt } if resp.StatusCode > 299 { - return nil, fmt.Errorf("DNSMadeEasy API request failed with HTTP status code %d", resp.StatusCode) + body, err := ioutil.ReadAll(resp.Body) + if err != nil { + return nil, fmt.Errorf("request failed with HTTP status code %d", resp.StatusCode) + } + return nil, fmt.Errorf("request failed with HTTP status code %d: %s", resp.StatusCode, string(body)) } return resp, nil diff --git a/vendor/github.com/xenolf/lego/providers/dns/dnsmadeeasy/dnsmadeeasy.go b/vendor/github.com/xenolf/lego/providers/dns/dnsmadeeasy/dnsmadeeasy.go index 519a44519..a55ea3978 100644 --- a/vendor/github.com/xenolf/lego/providers/dns/dnsmadeeasy/dnsmadeeasy.go +++ b/vendor/github.com/xenolf/lego/providers/dns/dnsmadeeasy/dnsmadeeasy.go @@ -1,3 +1,4 @@ +// Package dnsmadeeasy implements a DNS provider for solving the DNS-01 challenge using DNS Made Easy. package dnsmadeeasy import ( @@ -112,13 +113,13 @@ func (d *DNSProvider) Present(domainName, token, keyAuth string) error { authZone, err := acme.FindZoneByFqdn(fqdn, acme.RecursiveNameservers) if err != nil { - return err + return fmt.Errorf("dnsmadeeasy: unable to find zone for %s: %v", fqdn, err) } // fetch the domain details domain, err := d.client.GetDomain(authZone) if err != nil { - return err + return fmt.Errorf("dnsmadeeasy: unable to get domain for zone %s: %v", authZone, err) } // create the TXT record @@ -126,7 +127,10 @@ func (d *DNSProvider) Present(domainName, token, keyAuth string) error { record := &Record{Type: "TXT", Name: name, Value: value, TTL: d.config.TTL} err = d.client.CreateRecord(domain, record) - return err + if err != nil { + return fmt.Errorf("dnsmadeeasy: unable to create record for %s: %v", name, err) + } + return nil } // CleanUp removes the TXT records matching the specified parameters @@ -135,31 +139,32 @@ func (d *DNSProvider) CleanUp(domainName, token, keyAuth string) error { authZone, err := acme.FindZoneByFqdn(fqdn, acme.RecursiveNameservers) if err != nil { - return err + return fmt.Errorf("dnsmadeeasy: unable to find zone for %s: %v", fqdn, err) } // fetch the domain details domain, err := d.client.GetDomain(authZone) if err != nil { - return err + return fmt.Errorf("dnsmadeeasy: unable to get domain for zone %s: %v", authZone, err) } // find matching records name := strings.Replace(fqdn, "."+authZone, "", 1) records, err := d.client.GetRecords(domain, name, "TXT") if err != nil { - return err + return fmt.Errorf("dnsmadeeasy: unable to get records for domain %s: %v", domain.Name, err) } // delete records + var lastError error for _, record := range *records { err = d.client.DeleteRecord(record) if err != nil { - return err + lastError = fmt.Errorf("dnsmadeeasy: unable to delete record [id=%d, name=%s]: %v", record.ID, record.Name, err) } } - return nil + return lastError } // Timeout returns the timeout and interval to use when checking for DNS propagation. diff --git a/vendor/github.com/xenolf/lego/providers/dns/dreamhost/dreamhost.go b/vendor/github.com/xenolf/lego/providers/dns/dreamhost/dreamhost.go index cc0e36338..9edd27b72 100644 --- a/vendor/github.com/xenolf/lego/providers/dns/dreamhost/dreamhost.go +++ b/vendor/github.com/xenolf/lego/providers/dns/dreamhost/dreamhost.go @@ -1,4 +1,4 @@ -// Package dreamhost Adds lego support for http://dreamhost.com DNS updates +// Package dreamhost implements a DNS provider for solving the DNS-01 challenge using DreamHost. // See https://help.dreamhost.com/hc/en-us/articles/217560167-API_overview // and https://help.dreamhost.com/hc/en-us/articles/217555707-DNS-API-commands for the API spec. package dreamhost diff --git a/vendor/github.com/xenolf/lego/providers/dns/duckdns/duckdns.go b/vendor/github.com/xenolf/lego/providers/dns/duckdns/duckdns.go index 6bbf76352..7581af173 100644 --- a/vendor/github.com/xenolf/lego/providers/dns/duckdns/duckdns.go +++ b/vendor/github.com/xenolf/lego/providers/dns/duckdns/duckdns.go @@ -1,4 +1,4 @@ -// Package duckdns Adds lego support for http://duckdns.org. +// Package duckdns implements a DNS provider for solving the DNS-01 challenge using DuckDNS. // See http://www.duckdns.org/spec.jsp for more info on updating TXT records. package duckdns @@ -7,8 +7,12 @@ import ( "fmt" "io/ioutil" "net/http" + "net/url" + "strconv" + "strings" "time" + "github.com/miekg/dns" "github.com/xenolf/lego/acme" "github.com/xenolf/lego/platform/config/env" ) @@ -96,9 +100,16 @@ func (d *DNSProvider) Timeout() (timeout, interval time.Duration) { // To update the TXT record we just need to make one simple get request. // In DuckDNS you only have one TXT record shared with the domain and all sub domains. func updateTxtRecord(domain, token, txt string, clear bool) error { - u := fmt.Sprintf("https://www.duckdns.org/update?domains=%s&token=%s&clear=%t&txt=%s", domain, token, clear, txt) + u, _ := url.Parse("https://www.duckdns.org/update") - response, err := acme.HTTPClient.Get(u) + query := u.Query() + query.Set("domains", getMainDomain(domain)) + query.Set("token", token) + query.Set("clear", strconv.FormatBool(clear)) + query.Set("txt", txt) + u.RawQuery = query.Encode() + + response, err := acme.HTTPClient.Get(u.String()) if err != nil { return err } @@ -115,3 +126,23 @@ func updateTxtRecord(domain, token, txt string, clear bool) error { } return nil } + +// DuckDNS only lets you write to your subdomain +// so it must be in format subdomain.duckdns.org +// not in format subsubdomain.subdomain.duckdns.org +// so strip off everything that is not top 3 levels +func getMainDomain(domain string) string { + domain = acme.UnFqdn(domain) + + split := dns.Split(domain) + if strings.HasSuffix(strings.ToLower(domain), "duckdns.org") { + if len(split) < 3 { + return "" + } + + firstSubDomainIndex := split[len(split)-3] + return domain[firstSubDomainIndex:] + } + + return domain[split[len(split)-1]:] +} diff --git a/vendor/github.com/xenolf/lego/providers/dns/netcup/client.go b/vendor/github.com/xenolf/lego/providers/dns/netcup/client.go index f30bd7f12..17a2ae968 100644 --- a/vendor/github.com/xenolf/lego/providers/dns/netcup/client.go +++ b/vendor/github.com/xenolf/lego/providers/dns/netcup/client.go @@ -25,27 +25,27 @@ type Request struct { Param interface{} `json:"param"` } -// LoginMsg as specified in netcup WSDL +// LoginRequest as specified in netcup WSDL // https://ccp.netcup.net/run/webservice/servers/endpoint.php#login -type LoginMsg struct { +type LoginRequest struct { CustomerNumber string `json:"customernumber"` APIKey string `json:"apikey"` APIPassword string `json:"apipassword"` ClientRequestID string `json:"clientrequestid,omitempty"` } -// LogoutMsg as specified in netcup WSDL +// LogoutRequest as specified in netcup WSDL // https://ccp.netcup.net/run/webservice/servers/endpoint.php#logout -type LogoutMsg struct { +type LogoutRequest struct { CustomerNumber string `json:"customernumber"` APIKey string `json:"apikey"` APISessionID string `json:"apisessionid"` ClientRequestID string `json:"clientrequestid,omitempty"` } -// UpdateDNSRecordsMsg as specified in netcup WSDL +// UpdateDNSRecordsRequest as specified in netcup WSDL // https://ccp.netcup.net/run/webservice/servers/endpoint.php#updateDnsRecords -type UpdateDNSRecordsMsg struct { +type UpdateDNSRecordsRequest struct { DomainName string `json:"domainname"` CustomerNumber string `json:"customernumber"` APIKey string `json:"apikey"` @@ -55,15 +55,15 @@ type UpdateDNSRecordsMsg struct { } // DNSRecordSet as specified in netcup WSDL -// needed in UpdateDNSRecordsMsg +// needed in UpdateDNSRecordsRequest // https://ccp.netcup.net/run/webservice/servers/endpoint.php#Dnsrecordset type DNSRecordSet struct { DNSRecords []DNSRecord `json:"dnsrecords"` } -// InfoDNSRecordsMsg as specified in netcup WSDL +// InfoDNSRecordsRequest as specified in netcup WSDL // https://ccp.netcup.net/run/webservice/servers/endpoint.php#infoDnsRecords -type InfoDNSRecordsMsg struct { +type InfoDNSRecordsRequest struct { DomainName string `json:"domainname"` CustomerNumber string `json:"customernumber"` APIKey string `json:"apikey"` @@ -87,33 +87,30 @@ type DNSRecord struct { // ResponseMsg as specified in netcup WSDL // https://ccp.netcup.net/run/webservice/servers/endpoint.php#Responsemessage type ResponseMsg struct { - ServerRequestID string `json:"serverrequestid"` - ClientRequestID string `json:"clientrequestid,omitempty"` - Action string `json:"action"` - Status string `json:"status"` - StatusCode int `json:"statuscode"` - ShortMessage string `json:"shortmessage"` - LongMessage string `json:"longmessage"` - ResponseData ResponseData `json:"responsedata,omitempty"` + ServerRequestID string `json:"serverrequestid"` + ClientRequestID string `json:"clientrequestid,omitempty"` + Action string `json:"action"` + Status string `json:"status"` + StatusCode int `json:"statuscode"` + ShortMessage string `json:"shortmessage"` + LongMessage string `json:"longmessage"` + ResponseData json.RawMessage `json:"responsedata,omitempty"` } -// LogoutResponseMsg similar to ResponseMsg -// allows empty ResponseData field whilst unmarshaling -type LogoutResponseMsg struct { - ServerRequestID string `json:"serverrequestid"` - ClientRequestID string `json:"clientrequestid,omitempty"` - Action string `json:"action"` - Status string `json:"status"` - StatusCode int `json:"statuscode"` - ShortMessage string `json:"shortmessage"` - LongMessage string `json:"longmessage"` - ResponseData string `json:"responsedata,omitempty"` +func (r *ResponseMsg) Error() string { + return fmt.Sprintf("an error occurred during the action %s: [Status=%s, StatusCode=%d, ShortMessage=%s, LongMessage=%s]", + r.Action, r.Status, r.StatusCode, r.ShortMessage, r.LongMessage) } -// ResponseData to enable correct unmarshaling of ResponseMsg -type ResponseData struct { +// LoginResponse response to login action. +type LoginResponse struct { + APISessionID string `json:"apisessionid"` +} + +// InfoDNSRecordsResponse response to infoDnsRecords action. +type InfoDNSRecordsResponse struct { APISessionID string `json:"apisessionid"` - DNSRecords []DNSRecord `json:"dnsrecords"` + DNSRecords []DNSRecord `json:"dnsrecords,omitempty"` } // Client netcup DNS client @@ -126,7 +123,11 @@ type Client struct { } // NewClient creates a netcup DNS client -func NewClient(customerNumber string, apiKey string, apiPassword string) *Client { +func NewClient(customerNumber string, apiKey string, apiPassword string) (*Client, error) { + if customerNumber == "" || apiKey == "" || apiPassword == "" { + return nil, fmt.Errorf("credentials missing") + } + return &Client{ customerNumber: customerNumber, apiKey: apiKey, @@ -135,7 +136,7 @@ func NewClient(customerNumber string, apiKey string, apiPassword string) *Client HTTPClient: &http.Client{ Timeout: 10 * time.Second, }, - } + }, nil } // Login performs the login as specified by the netcup WSDL @@ -144,7 +145,7 @@ func NewClient(customerNumber string, apiKey string, apiPassword string) *Client func (c *Client) Login() (string, error) { payload := &Request{ Action: "login", - Param: &LoginMsg{ + Param: &LoginRequest{ CustomerNumber: c.customerNumber, APIKey: c.apiKey, APIPassword: c.apiPassword, @@ -152,21 +153,13 @@ func (c *Client) Login() (string, error) { }, } - response, err := c.sendRequest(payload) + var responseData LoginResponse + err := c.doRequest(payload, &responseData) if err != nil { - return "", fmt.Errorf("error sending request to DNS-API, %v", err) + return "", fmt.Errorf("loging error: %v", err) } - var r ResponseMsg - - err = json.Unmarshal(response, &r) - if err != nil { - return "", fmt.Errorf("error decoding response of DNS-API, %v", err) - } - if r.Status != success { - return "", fmt.Errorf("error logging into DNS-API, %v", r.LongMessage) - } - return r.ResponseData.APISessionID, nil + return responseData.APISessionID, nil } // Logout performs the logout with the supplied sessionID as specified by the netcup WSDL @@ -174,7 +167,7 @@ func (c *Client) Login() (string, error) { func (c *Client) Logout(sessionID string) error { payload := &Request{ Action: "logout", - Param: &LogoutMsg{ + Param: &LogoutRequest{ CustomerNumber: c.customerNumber, APIKey: c.apiKey, APISessionID: sessionID, @@ -182,54 +175,34 @@ func (c *Client) Logout(sessionID string) error { }, } - response, err := c.sendRequest(payload) + err := c.doRequest(payload, nil) if err != nil { - return fmt.Errorf("error logging out of DNS-API: %v", err) + return fmt.Errorf("logout error: %v", err) } - var r LogoutResponseMsg - - err = json.Unmarshal(response, &r) - if err != nil { - return fmt.Errorf("error logging out of DNS-API: %v", err) - } - - if r.Status != success { - return fmt.Errorf("error logging out of DNS-API: %v", r.ShortMessage) - } return nil } // UpdateDNSRecord performs an update of the DNSRecords as specified by the netcup WSDL // https://ccp.netcup.net/run/webservice/servers/endpoint.php -func (c *Client) UpdateDNSRecord(sessionID, domainName string, record DNSRecord) error { +func (c *Client) UpdateDNSRecord(sessionID, domainName string, records []DNSRecord) error { payload := &Request{ Action: "updateDnsRecords", - Param: UpdateDNSRecordsMsg{ + Param: UpdateDNSRecordsRequest{ DomainName: domainName, CustomerNumber: c.customerNumber, APIKey: c.apiKey, APISessionID: sessionID, ClientRequestID: "", - DNSRecordSet: DNSRecordSet{DNSRecords: []DNSRecord{record}}, + DNSRecordSet: DNSRecordSet{DNSRecords: records}, }, } - response, err := c.sendRequest(payload) + err := c.doRequest(payload, nil) if err != nil { - return err + return fmt.Errorf("error when sending the request: %v", err) } - var r ResponseMsg - - err = json.Unmarshal(response, &r) - if err != nil { - return err - } - - if r.Status != success { - return fmt.Errorf("%s: %+v", r.ShortMessage, r) - } return nil } @@ -239,7 +212,7 @@ func (c *Client) UpdateDNSRecord(sessionID, domainName string, record DNSRecord) func (c *Client) GetDNSRecords(hostname, apiSessionID string) ([]DNSRecord, error) { payload := &Request{ Action: "infoDnsRecords", - Param: InfoDNSRecordsMsg{ + Param: InfoDNSRecordsRequest{ DomainName: hostname, CustomerNumber: c.customerNumber, APIKey: c.apiKey, @@ -248,82 +221,98 @@ func (c *Client) GetDNSRecords(hostname, apiSessionID string) ([]DNSRecord, erro }, } - response, err := c.sendRequest(payload) + var responseData InfoDNSRecordsResponse + err := c.doRequest(payload, &responseData) if err != nil { - return nil, err + return nil, fmt.Errorf("error when sending the request: %v", err) } - var r ResponseMsg - - err = json.Unmarshal(response, &r) - if err != nil { - return nil, err - } - - if r.Status != success { - return nil, fmt.Errorf("%s", r.ShortMessage) - } - return r.ResponseData.DNSRecords, nil + return responseData.DNSRecords, nil } -// sendRequest marshals given body to JSON, send the request to netcup API +// doRequest marshals given body to JSON, send the request to netcup API // and returns body of response -func (c *Client) sendRequest(payload interface{}) ([]byte, error) { +func (c *Client) doRequest(payload interface{}, responseData interface{}) error { body, err := json.Marshal(payload) if err != nil { - return nil, err + return err } req, err := http.NewRequest(http.MethodPost, c.BaseURL, bytes.NewReader(body)) if err != nil { - return nil, err + return err } - req.Close = true + req.Close = true req.Header.Set("content-type", "application/json") req.Header.Set("User-Agent", acme.UserAgent) resp, err := c.HTTPClient.Do(req) if err != nil { - return nil, err + return err } - if resp.StatusCode > 299 { - return nil, fmt.Errorf("API request failed with HTTP Status code %d", resp.StatusCode) + if err = checkResponse(resp); err != nil { + return err } - body, err = ioutil.ReadAll(resp.Body) + respMsg, err := decodeResponseMsg(resp) if err != nil { - return nil, fmt.Errorf("read of response body failed, %v", err) + return err } - defer resp.Body.Close() - return body, nil -} + if respMsg.Status != success { + return respMsg + } -// GetDNSRecordIdx searches a given array of DNSRecords for a given DNSRecord -// equivalence is determined by Destination and RecortType attributes -// returns index of given DNSRecord in given array of DNSRecords -func GetDNSRecordIdx(records []DNSRecord, record DNSRecord) (int, error) { - for index, element := range records { - if record.Destination == element.Destination && record.RecordType == element.RecordType { - return index, nil + if responseData != nil { + err = json.Unmarshal(respMsg.ResponseData, responseData) + if err != nil { + return fmt.Errorf("%v: unmarshaling %T error: %v: %s", + respMsg, responseData, err, string(respMsg.ResponseData)) } } - return -1, fmt.Errorf("no DNS Record found") + + return nil } -// CreateTxtRecord uses the supplied values to return a DNSRecord of type TXT for the dns-01 challenge -func CreateTxtRecord(hostname, value string, ttl int) DNSRecord { - return DNSRecord{ - ID: 0, - Hostname: hostname, - RecordType: "TXT", - Priority: "", - Destination: value, - DeleteRecord: false, - State: "", - TTL: ttl, +func checkResponse(resp *http.Response) error { + if resp.StatusCode > 299 { + if resp.Body == nil { + return fmt.Errorf("response body is nil, status code=%d", resp.StatusCode) + } + + defer resp.Body.Close() + + raw, err := ioutil.ReadAll(resp.Body) + if err != nil { + return fmt.Errorf("unable to read body: status code=%d, error=%v", resp.StatusCode, err) + } + + return fmt.Errorf("status code=%d: %s", resp.StatusCode, string(raw)) } + + return nil +} + +func decodeResponseMsg(resp *http.Response) (*ResponseMsg, error) { + if resp.Body == nil { + return nil, fmt.Errorf("response body is nil, status code=%d", resp.StatusCode) + } + + defer resp.Body.Close() + + raw, err := ioutil.ReadAll(resp.Body) + if err != nil { + return nil, fmt.Errorf("unable to read body: status code=%d, error=%v", resp.StatusCode, err) + } + + var respMsg ResponseMsg + err = json.Unmarshal(raw, &respMsg) + if err != nil { + return nil, fmt.Errorf("unmarshaling %T error [status code=%d]: %v: %s", respMsg, resp.StatusCode, err, string(raw)) + } + + return &respMsg, nil } diff --git a/vendor/github.com/xenolf/lego/providers/dns/netcup/netcup.go b/vendor/github.com/xenolf/lego/providers/dns/netcup/netcup.go index 983b71e57..0dc59f960 100644 --- a/vendor/github.com/xenolf/lego/providers/dns/netcup/netcup.go +++ b/vendor/github.com/xenolf/lego/providers/dns/netcup/netcup.go @@ -9,6 +9,7 @@ import ( "time" "github.com/xenolf/lego/acme" + "github.com/xenolf/lego/log" "github.com/xenolf/lego/platform/config/env" ) @@ -27,8 +28,8 @@ type Config struct { func NewDefaultConfig() *Config { return &Config{ TTL: env.GetOrDefaultInt("NETCUP_TTL", 120), - PropagationTimeout: env.GetOrDefaultSecond("NETCUP_PROPAGATION_TIMEOUT", acme.DefaultPropagationTimeout), - PollingInterval: env.GetOrDefaultSecond("NETCUP_POLLING_INTERVAL", acme.DefaultPollingInterval), + PropagationTimeout: env.GetOrDefaultSecond("NETCUP_PROPAGATION_TIMEOUT", 120*time.Second), + PollingInterval: env.GetOrDefaultSecond("NETCUP_POLLING_INTERVAL", 5*time.Second), HTTPClient: &http.Client{ Timeout: env.GetOrDefaultSecond("NETCUP_HTTP_TIMEOUT", 10*time.Second), }, @@ -76,11 +77,11 @@ func NewDNSProviderConfig(config *Config) (*DNSProvider, error) { return nil, errors.New("netcup: the configuration of the DNS provider is nil") } - if config.Customer == "" || config.Key == "" || config.Password == "" { - return nil, fmt.Errorf("netcup: netcup credentials missing") + client, err := NewClient(config.Customer, config.Key, config.Password) + if err != nil { + return nil, fmt.Errorf("netcup: %v", err) } - client := NewClient(config.Customer, config.Key, config.Password) client.HTTPClient = config.HTTPClient return &DNSProvider{client: client, config: config}, nil @@ -100,27 +101,37 @@ func (d *DNSProvider) Present(domainName, token, keyAuth string) error { return fmt.Errorf("netcup: %v", err) } - hostname := strings.Replace(fqdn, "."+zone, "", 1) - record := CreateTxtRecord(hostname, value, d.config.TTL) - - err = d.client.UpdateDNSRecord(sessionID, acme.UnFqdn(zone), record) - if err != nil { - if errLogout := d.client.Logout(sessionID); errLogout != nil { - return fmt.Errorf("netcup: failed to add TXT-Record: %v; %v", err, errLogout) + defer func() { + err = d.client.Logout(sessionID) + if err != nil { + log.Print("netcup: %v", err) } + }() + + hostname := strings.Replace(fqdn, "."+zone, "", 1) + record := createTxtRecord(hostname, value, d.config.TTL) + + zone = acme.UnFqdn(zone) + + records, err := d.client.GetDNSRecords(zone, sessionID) + if err != nil { + // skip no existing records + log.Infof("no existing records, error ignored: %v", err) + } + + records = append(records, record) + + err = d.client.UpdateDNSRecord(sessionID, zone, records) + if err != nil { return fmt.Errorf("netcup: failed to add TXT-Record: %v", err) } - err = d.client.Logout(sessionID) - if err != nil { - return fmt.Errorf("netcup: %v", err) - } return nil } // CleanUp removes the TXT record matching the specified parameters -func (d *DNSProvider) CleanUp(domainname, token, keyAuth string) error { - fqdn, value, _ := acme.DNS01Record(domainname, keyAuth) +func (d *DNSProvider) CleanUp(domainName, token, keyAuth string) error { + fqdn, value, _ := acme.DNS01Record(domainName, keyAuth) zone, err := acme.FindZoneByFqdn(fqdn, acme.RecursiveNameservers) if err != nil { @@ -132,6 +143,13 @@ func (d *DNSProvider) CleanUp(domainname, token, keyAuth string) error { return fmt.Errorf("netcup: %v", err) } + defer func() { + err = d.client.Logout(sessionID) + if err != nil { + log.Print("netcup: %v", err) + } + }() + hostname := strings.Replace(fqdn, "."+zone, "", 1) zone = acme.UnFqdn(zone) @@ -141,27 +159,20 @@ func (d *DNSProvider) CleanUp(domainname, token, keyAuth string) error { return fmt.Errorf("netcup: %v", err) } - record := CreateTxtRecord(hostname, value, 0) + record := createTxtRecord(hostname, value, 0) - idx, err := GetDNSRecordIdx(records, record) + idx, err := getDNSRecordIdx(records, record) if err != nil { return fmt.Errorf("netcup: %v", err) } records[idx].DeleteRecord = true - err = d.client.UpdateDNSRecord(sessionID, zone, records[idx]) + err = d.client.UpdateDNSRecord(sessionID, zone, []DNSRecord{records[idx]}) if err != nil { - if errLogout := d.client.Logout(sessionID); errLogout != nil { - return fmt.Errorf("netcup: %v; %v", err, errLogout) - } return fmt.Errorf("netcup: %v", err) } - err = d.client.Logout(sessionID) - if err != nil { - return fmt.Errorf("netcup: %v", err) - } return nil } @@ -170,3 +181,29 @@ func (d *DNSProvider) CleanUp(domainname, token, keyAuth string) error { func (d *DNSProvider) Timeout() (timeout, interval time.Duration) { return d.config.PropagationTimeout, d.config.PollingInterval } + +// getDNSRecordIdx searches a given array of DNSRecords for a given DNSRecord +// equivalence is determined by Destination and RecortType attributes +// returns index of given DNSRecord in given array of DNSRecords +func getDNSRecordIdx(records []DNSRecord, record DNSRecord) (int, error) { + for index, element := range records { + if record.Destination == element.Destination && record.RecordType == element.RecordType { + return index, nil + } + } + return -1, fmt.Errorf("no DNS Record found") +} + +// createTxtRecord uses the supplied values to return a DNSRecord of type TXT for the dns-01 challenge +func createTxtRecord(hostname, value string, ttl int) DNSRecord { + return DNSRecord{ + ID: 0, + Hostname: hostname, + RecordType: "TXT", + Priority: "", + Destination: value, + DeleteRecord: false, + State: "", + TTL: ttl, + } +} diff --git a/vendor/golang.org/x/crypto/pkcs12/bmp-string.go b/vendor/golang.org/x/crypto/pkcs12/bmp-string.go new file mode 100644 index 000000000..233b8b62c --- /dev/null +++ b/vendor/golang.org/x/crypto/pkcs12/bmp-string.go @@ -0,0 +1,50 @@ +// Copyright 2015 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package pkcs12 + +import ( + "errors" + "unicode/utf16" +) + +// bmpString returns s encoded in UCS-2 with a zero terminator. +func bmpString(s string) ([]byte, error) { + // References: + // https://tools.ietf.org/html/rfc7292#appendix-B.1 + // https://en.wikipedia.org/wiki/Plane_(Unicode)#Basic_Multilingual_Plane + // - non-BMP characters are encoded in UTF 16 by using a surrogate pair of 16-bit codes + // EncodeRune returns 0xfffd if the rune does not need special encoding + // - the above RFC provides the info that BMPStrings are NULL terminated. + + ret := make([]byte, 0, 2*len(s)+2) + + for _, r := range s { + if t, _ := utf16.EncodeRune(r); t != 0xfffd { + return nil, errors.New("pkcs12: string contains characters that cannot be encoded in UCS-2") + } + ret = append(ret, byte(r/256), byte(r%256)) + } + + return append(ret, 0, 0), nil +} + +func decodeBMPString(bmpString []byte) (string, error) { + if len(bmpString)%2 != 0 { + return "", errors.New("pkcs12: odd-length BMP string") + } + + // strip terminator if present + if l := len(bmpString); l >= 2 && bmpString[l-1] == 0 && bmpString[l-2] == 0 { + bmpString = bmpString[:l-2] + } + + s := make([]uint16, 0, len(bmpString)/2) + for len(bmpString) > 0 { + s = append(s, uint16(bmpString[0])<<8+uint16(bmpString[1])) + bmpString = bmpString[2:] + } + + return string(utf16.Decode(s)), nil +} diff --git a/vendor/golang.org/x/crypto/pkcs12/crypto.go b/vendor/golang.org/x/crypto/pkcs12/crypto.go new file mode 100644 index 000000000..484ca51b7 --- /dev/null +++ b/vendor/golang.org/x/crypto/pkcs12/crypto.go @@ -0,0 +1,131 @@ +// Copyright 2015 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package pkcs12 + +import ( + "bytes" + "crypto/cipher" + "crypto/des" + "crypto/x509/pkix" + "encoding/asn1" + "errors" + + "golang.org/x/crypto/pkcs12/internal/rc2" +) + +var ( + oidPBEWithSHAAnd3KeyTripleDESCBC = asn1.ObjectIdentifier([]int{1, 2, 840, 113549, 1, 12, 1, 3}) + oidPBEWithSHAAnd40BitRC2CBC = asn1.ObjectIdentifier([]int{1, 2, 840, 113549, 1, 12, 1, 6}) +) + +// pbeCipher is an abstraction of a PKCS#12 cipher. +type pbeCipher interface { + // create returns a cipher.Block given a key. + create(key []byte) (cipher.Block, error) + // deriveKey returns a key derived from the given password and salt. + deriveKey(salt, password []byte, iterations int) []byte + // deriveKey returns an IV derived from the given password and salt. + deriveIV(salt, password []byte, iterations int) []byte +} + +type shaWithTripleDESCBC struct{} + +func (shaWithTripleDESCBC) create(key []byte) (cipher.Block, error) { + return des.NewTripleDESCipher(key) +} + +func (shaWithTripleDESCBC) deriveKey(salt, password []byte, iterations int) []byte { + return pbkdf(sha1Sum, 20, 64, salt, password, iterations, 1, 24) +} + +func (shaWithTripleDESCBC) deriveIV(salt, password []byte, iterations int) []byte { + return pbkdf(sha1Sum, 20, 64, salt, password, iterations, 2, 8) +} + +type shaWith40BitRC2CBC struct{} + +func (shaWith40BitRC2CBC) create(key []byte) (cipher.Block, error) { + return rc2.New(key, len(key)*8) +} + +func (shaWith40BitRC2CBC) deriveKey(salt, password []byte, iterations int) []byte { + return pbkdf(sha1Sum, 20, 64, salt, password, iterations, 1, 5) +} + +func (shaWith40BitRC2CBC) deriveIV(salt, password []byte, iterations int) []byte { + return pbkdf(sha1Sum, 20, 64, salt, password, iterations, 2, 8) +} + +type pbeParams struct { + Salt []byte + Iterations int +} + +func pbDecrypterFor(algorithm pkix.AlgorithmIdentifier, password []byte) (cipher.BlockMode, int, error) { + var cipherType pbeCipher + + switch { + case algorithm.Algorithm.Equal(oidPBEWithSHAAnd3KeyTripleDESCBC): + cipherType = shaWithTripleDESCBC{} + case algorithm.Algorithm.Equal(oidPBEWithSHAAnd40BitRC2CBC): + cipherType = shaWith40BitRC2CBC{} + default: + return nil, 0, NotImplementedError("algorithm " + algorithm.Algorithm.String() + " is not supported") + } + + var params pbeParams + if err := unmarshal(algorithm.Parameters.FullBytes, ¶ms); err != nil { + return nil, 0, err + } + + key := cipherType.deriveKey(params.Salt, password, params.Iterations) + iv := cipherType.deriveIV(params.Salt, password, params.Iterations) + + block, err := cipherType.create(key) + if err != nil { + return nil, 0, err + } + + return cipher.NewCBCDecrypter(block, iv), block.BlockSize(), nil +} + +func pbDecrypt(info decryptable, password []byte) (decrypted []byte, err error) { + cbc, blockSize, err := pbDecrypterFor(info.Algorithm(), password) + if err != nil { + return nil, err + } + + encrypted := info.Data() + if len(encrypted) == 0 { + return nil, errors.New("pkcs12: empty encrypted data") + } + if len(encrypted)%blockSize != 0 { + return nil, errors.New("pkcs12: input is not a multiple of the block size") + } + decrypted = make([]byte, len(encrypted)) + cbc.CryptBlocks(decrypted, encrypted) + + psLen := int(decrypted[len(decrypted)-1]) + if psLen == 0 || psLen > blockSize { + return nil, ErrDecryption + } + + if len(decrypted) < psLen { + return nil, ErrDecryption + } + ps := decrypted[len(decrypted)-psLen:] + decrypted = decrypted[:len(decrypted)-psLen] + if bytes.Compare(ps, bytes.Repeat([]byte{byte(psLen)}, psLen)) != 0 { + return nil, ErrDecryption + } + + return +} + +// decryptable abstracts an object that contains ciphertext. +type decryptable interface { + Algorithm() pkix.AlgorithmIdentifier + Data() []byte +} diff --git a/vendor/golang.org/x/crypto/pkcs12/errors.go b/vendor/golang.org/x/crypto/pkcs12/errors.go new file mode 100644 index 000000000..7377ce6fb --- /dev/null +++ b/vendor/golang.org/x/crypto/pkcs12/errors.go @@ -0,0 +1,23 @@ +// Copyright 2015 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package pkcs12 + +import "errors" + +var ( + // ErrDecryption represents a failure to decrypt the input. + ErrDecryption = errors.New("pkcs12: decryption error, incorrect padding") + + // ErrIncorrectPassword is returned when an incorrect password is detected. + // Usually, P12/PFX data is signed to be able to verify the password. + ErrIncorrectPassword = errors.New("pkcs12: decryption password incorrect") +) + +// NotImplementedError indicates that the input is not currently supported. +type NotImplementedError string + +func (e NotImplementedError) Error() string { + return "pkcs12: " + string(e) +} diff --git a/vendor/golang.org/x/crypto/pkcs12/internal/rc2/rc2.go b/vendor/golang.org/x/crypto/pkcs12/internal/rc2/rc2.go new file mode 100644 index 000000000..7499e3fb6 --- /dev/null +++ b/vendor/golang.org/x/crypto/pkcs12/internal/rc2/rc2.go @@ -0,0 +1,271 @@ +// Copyright 2015 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package rc2 implements the RC2 cipher +/* +https://www.ietf.org/rfc/rfc2268.txt +http://people.csail.mit.edu/rivest/pubs/KRRR98.pdf + +This code is licensed under the MIT license. +*/ +package rc2 + +import ( + "crypto/cipher" + "encoding/binary" +) + +// The rc2 block size in bytes +const BlockSize = 8 + +type rc2Cipher struct { + k [64]uint16 +} + +// New returns a new rc2 cipher with the given key and effective key length t1 +func New(key []byte, t1 int) (cipher.Block, error) { + // TODO(dgryski): error checking for key length + return &rc2Cipher{ + k: expandKey(key, t1), + }, nil +} + +func (*rc2Cipher) BlockSize() int { return BlockSize } + +var piTable = [256]byte{ + 0xd9, 0x78, 0xf9, 0xc4, 0x19, 0xdd, 0xb5, 0xed, 0x28, 0xe9, 0xfd, 0x79, 0x4a, 0xa0, 0xd8, 0x9d, + 0xc6, 0x7e, 0x37, 0x83, 0x2b, 0x76, 0x53, 0x8e, 0x62, 0x4c, 0x64, 0x88, 0x44, 0x8b, 0xfb, 0xa2, + 0x17, 0x9a, 0x59, 0xf5, 0x87, 0xb3, 0x4f, 0x13, 0x61, 0x45, 0x6d, 0x8d, 0x09, 0x81, 0x7d, 0x32, + 0xbd, 0x8f, 0x40, 0xeb, 0x86, 0xb7, 0x7b, 0x0b, 0xf0, 0x95, 0x21, 0x22, 0x5c, 0x6b, 0x4e, 0x82, + 0x54, 0xd6, 0x65, 0x93, 0xce, 0x60, 0xb2, 0x1c, 0x73, 0x56, 0xc0, 0x14, 0xa7, 0x8c, 0xf1, 0xdc, + 0x12, 0x75, 0xca, 0x1f, 0x3b, 0xbe, 0xe4, 0xd1, 0x42, 0x3d, 0xd4, 0x30, 0xa3, 0x3c, 0xb6, 0x26, + 0x6f, 0xbf, 0x0e, 0xda, 0x46, 0x69, 0x07, 0x57, 0x27, 0xf2, 0x1d, 0x9b, 0xbc, 0x94, 0x43, 0x03, + 0xf8, 0x11, 0xc7, 0xf6, 0x90, 0xef, 0x3e, 0xe7, 0x06, 0xc3, 0xd5, 0x2f, 0xc8, 0x66, 0x1e, 0xd7, + 0x08, 0xe8, 0xea, 0xde, 0x80, 0x52, 0xee, 0xf7, 0x84, 0xaa, 0x72, 0xac, 0x35, 0x4d, 0x6a, 0x2a, + 0x96, 0x1a, 0xd2, 0x71, 0x5a, 0x15, 0x49, 0x74, 0x4b, 0x9f, 0xd0, 0x5e, 0x04, 0x18, 0xa4, 0xec, + 0xc2, 0xe0, 0x41, 0x6e, 0x0f, 0x51, 0xcb, 0xcc, 0x24, 0x91, 0xaf, 0x50, 0xa1, 0xf4, 0x70, 0x39, + 0x99, 0x7c, 0x3a, 0x85, 0x23, 0xb8, 0xb4, 0x7a, 0xfc, 0x02, 0x36, 0x5b, 0x25, 0x55, 0x97, 0x31, + 0x2d, 0x5d, 0xfa, 0x98, 0xe3, 0x8a, 0x92, 0xae, 0x05, 0xdf, 0x29, 0x10, 0x67, 0x6c, 0xba, 0xc9, + 0xd3, 0x00, 0xe6, 0xcf, 0xe1, 0x9e, 0xa8, 0x2c, 0x63, 0x16, 0x01, 0x3f, 0x58, 0xe2, 0x89, 0xa9, + 0x0d, 0x38, 0x34, 0x1b, 0xab, 0x33, 0xff, 0xb0, 0xbb, 0x48, 0x0c, 0x5f, 0xb9, 0xb1, 0xcd, 0x2e, + 0xc5, 0xf3, 0xdb, 0x47, 0xe5, 0xa5, 0x9c, 0x77, 0x0a, 0xa6, 0x20, 0x68, 0xfe, 0x7f, 0xc1, 0xad, +} + +func expandKey(key []byte, t1 int) [64]uint16 { + + l := make([]byte, 128) + copy(l, key) + + var t = len(key) + var t8 = (t1 + 7) / 8 + var tm = byte(255 % uint(1<<(8+uint(t1)-8*uint(t8)))) + + for i := len(key); i < 128; i++ { + l[i] = piTable[l[i-1]+l[uint8(i-t)]] + } + + l[128-t8] = piTable[l[128-t8]&tm] + + for i := 127 - t8; i >= 0; i-- { + l[i] = piTable[l[i+1]^l[i+t8]] + } + + var k [64]uint16 + + for i := range k { + k[i] = uint16(l[2*i]) + uint16(l[2*i+1])*256 + } + + return k +} + +func rotl16(x uint16, b uint) uint16 { + return (x >> (16 - b)) | (x << b) +} + +func (c *rc2Cipher) Encrypt(dst, src []byte) { + + r0 := binary.LittleEndian.Uint16(src[0:]) + r1 := binary.LittleEndian.Uint16(src[2:]) + r2 := binary.LittleEndian.Uint16(src[4:]) + r3 := binary.LittleEndian.Uint16(src[6:]) + + var j int + + for j <= 16 { + // mix r0 + r0 = r0 + c.k[j] + (r3 & r2) + ((^r3) & r1) + r0 = rotl16(r0, 1) + j++ + + // mix r1 + r1 = r1 + c.k[j] + (r0 & r3) + ((^r0) & r2) + r1 = rotl16(r1, 2) + j++ + + // mix r2 + r2 = r2 + c.k[j] + (r1 & r0) + ((^r1) & r3) + r2 = rotl16(r2, 3) + j++ + + // mix r3 + r3 = r3 + c.k[j] + (r2 & r1) + ((^r2) & r0) + r3 = rotl16(r3, 5) + j++ + + } + + r0 = r0 + c.k[r3&63] + r1 = r1 + c.k[r0&63] + r2 = r2 + c.k[r1&63] + r3 = r3 + c.k[r2&63] + + for j <= 40 { + // mix r0 + r0 = r0 + c.k[j] + (r3 & r2) + ((^r3) & r1) + r0 = rotl16(r0, 1) + j++ + + // mix r1 + r1 = r1 + c.k[j] + (r0 & r3) + ((^r0) & r2) + r1 = rotl16(r1, 2) + j++ + + // mix r2 + r2 = r2 + c.k[j] + (r1 & r0) + ((^r1) & r3) + r2 = rotl16(r2, 3) + j++ + + // mix r3 + r3 = r3 + c.k[j] + (r2 & r1) + ((^r2) & r0) + r3 = rotl16(r3, 5) + j++ + + } + + r0 = r0 + c.k[r3&63] + r1 = r1 + c.k[r0&63] + r2 = r2 + c.k[r1&63] + r3 = r3 + c.k[r2&63] + + for j <= 60 { + // mix r0 + r0 = r0 + c.k[j] + (r3 & r2) + ((^r3) & r1) + r0 = rotl16(r0, 1) + j++ + + // mix r1 + r1 = r1 + c.k[j] + (r0 & r3) + ((^r0) & r2) + r1 = rotl16(r1, 2) + j++ + + // mix r2 + r2 = r2 + c.k[j] + (r1 & r0) + ((^r1) & r3) + r2 = rotl16(r2, 3) + j++ + + // mix r3 + r3 = r3 + c.k[j] + (r2 & r1) + ((^r2) & r0) + r3 = rotl16(r3, 5) + j++ + } + + binary.LittleEndian.PutUint16(dst[0:], r0) + binary.LittleEndian.PutUint16(dst[2:], r1) + binary.LittleEndian.PutUint16(dst[4:], r2) + binary.LittleEndian.PutUint16(dst[6:], r3) +} + +func (c *rc2Cipher) Decrypt(dst, src []byte) { + + r0 := binary.LittleEndian.Uint16(src[0:]) + r1 := binary.LittleEndian.Uint16(src[2:]) + r2 := binary.LittleEndian.Uint16(src[4:]) + r3 := binary.LittleEndian.Uint16(src[6:]) + + j := 63 + + for j >= 44 { + // unmix r3 + r3 = rotl16(r3, 16-5) + r3 = r3 - c.k[j] - (r2 & r1) - ((^r2) & r0) + j-- + + // unmix r2 + r2 = rotl16(r2, 16-3) + r2 = r2 - c.k[j] - (r1 & r0) - ((^r1) & r3) + j-- + + // unmix r1 + r1 = rotl16(r1, 16-2) + r1 = r1 - c.k[j] - (r0 & r3) - ((^r0) & r2) + j-- + + // unmix r0 + r0 = rotl16(r0, 16-1) + r0 = r0 - c.k[j] - (r3 & r2) - ((^r3) & r1) + j-- + } + + r3 = r3 - c.k[r2&63] + r2 = r2 - c.k[r1&63] + r1 = r1 - c.k[r0&63] + r0 = r0 - c.k[r3&63] + + for j >= 20 { + // unmix r3 + r3 = rotl16(r3, 16-5) + r3 = r3 - c.k[j] - (r2 & r1) - ((^r2) & r0) + j-- + + // unmix r2 + r2 = rotl16(r2, 16-3) + r2 = r2 - c.k[j] - (r1 & r0) - ((^r1) & r3) + j-- + + // unmix r1 + r1 = rotl16(r1, 16-2) + r1 = r1 - c.k[j] - (r0 & r3) - ((^r0) & r2) + j-- + + // unmix r0 + r0 = rotl16(r0, 16-1) + r0 = r0 - c.k[j] - (r3 & r2) - ((^r3) & r1) + j-- + + } + + r3 = r3 - c.k[r2&63] + r2 = r2 - c.k[r1&63] + r1 = r1 - c.k[r0&63] + r0 = r0 - c.k[r3&63] + + for j >= 0 { + // unmix r3 + r3 = rotl16(r3, 16-5) + r3 = r3 - c.k[j] - (r2 & r1) - ((^r2) & r0) + j-- + + // unmix r2 + r2 = rotl16(r2, 16-3) + r2 = r2 - c.k[j] - (r1 & r0) - ((^r1) & r3) + j-- + + // unmix r1 + r1 = rotl16(r1, 16-2) + r1 = r1 - c.k[j] - (r0 & r3) - ((^r0) & r2) + j-- + + // unmix r0 + r0 = rotl16(r0, 16-1) + r0 = r0 - c.k[j] - (r3 & r2) - ((^r3) & r1) + j-- + + } + + binary.LittleEndian.PutUint16(dst[0:], r0) + binary.LittleEndian.PutUint16(dst[2:], r1) + binary.LittleEndian.PutUint16(dst[4:], r2) + binary.LittleEndian.PutUint16(dst[6:], r3) +} diff --git a/vendor/golang.org/x/crypto/pkcs12/mac.go b/vendor/golang.org/x/crypto/pkcs12/mac.go new file mode 100644 index 000000000..5f38aa7de --- /dev/null +++ b/vendor/golang.org/x/crypto/pkcs12/mac.go @@ -0,0 +1,45 @@ +// Copyright 2015 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package pkcs12 + +import ( + "crypto/hmac" + "crypto/sha1" + "crypto/x509/pkix" + "encoding/asn1" +) + +type macData struct { + Mac digestInfo + MacSalt []byte + Iterations int `asn1:"optional,default:1"` +} + +// from PKCS#7: +type digestInfo struct { + Algorithm pkix.AlgorithmIdentifier + Digest []byte +} + +var ( + oidSHA1 = asn1.ObjectIdentifier([]int{1, 3, 14, 3, 2, 26}) +) + +func verifyMac(macData *macData, message, password []byte) error { + if !macData.Mac.Algorithm.Algorithm.Equal(oidSHA1) { + return NotImplementedError("unknown digest algorithm: " + macData.Mac.Algorithm.Algorithm.String()) + } + + key := pbkdf(sha1Sum, 20, 64, macData.MacSalt, password, macData.Iterations, 3, 20) + + mac := hmac.New(sha1.New, key) + mac.Write(message) + expectedMAC := mac.Sum(nil) + + if !hmac.Equal(macData.Mac.Digest, expectedMAC) { + return ErrIncorrectPassword + } + return nil +} diff --git a/vendor/golang.org/x/crypto/pkcs12/pbkdf.go b/vendor/golang.org/x/crypto/pkcs12/pbkdf.go new file mode 100644 index 000000000..5c419d41e --- /dev/null +++ b/vendor/golang.org/x/crypto/pkcs12/pbkdf.go @@ -0,0 +1,170 @@ +// Copyright 2015 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package pkcs12 + +import ( + "bytes" + "crypto/sha1" + "math/big" +) + +var ( + one = big.NewInt(1) +) + +// sha1Sum returns the SHA-1 hash of in. +func sha1Sum(in []byte) []byte { + sum := sha1.Sum(in) + return sum[:] +} + +// fillWithRepeats returns v*ceiling(len(pattern) / v) bytes consisting of +// repeats of pattern. +func fillWithRepeats(pattern []byte, v int) []byte { + if len(pattern) == 0 { + return nil + } + outputLen := v * ((len(pattern) + v - 1) / v) + return bytes.Repeat(pattern, (outputLen+len(pattern)-1)/len(pattern))[:outputLen] +} + +func pbkdf(hash func([]byte) []byte, u, v int, salt, password []byte, r int, ID byte, size int) (key []byte) { + // implementation of https://tools.ietf.org/html/rfc7292#appendix-B.2 , RFC text verbatim in comments + + // Let H be a hash function built around a compression function f: + + // Z_2^u x Z_2^v -> Z_2^u + + // (that is, H has a chaining variable and output of length u bits, and + // the message input to the compression function of H is v bits). The + // values for u and v are as follows: + + // HASH FUNCTION VALUE u VALUE v + // MD2, MD5 128 512 + // SHA-1 160 512 + // SHA-224 224 512 + // SHA-256 256 512 + // SHA-384 384 1024 + // SHA-512 512 1024 + // SHA-512/224 224 1024 + // SHA-512/256 256 1024 + + // Furthermore, let r be the iteration count. + + // We assume here that u and v are both multiples of 8, as are the + // lengths of the password and salt strings (which we denote by p and s, + // respectively) and the number n of pseudorandom bits required. In + // addition, u and v are of course non-zero. + + // For information on security considerations for MD5 [19], see [25] and + // [1], and on those for MD2, see [18]. + + // The following procedure can be used to produce pseudorandom bits for + // a particular "purpose" that is identified by a byte called "ID". + // This standard specifies 3 different values for the ID byte: + + // 1. If ID=1, then the pseudorandom bits being produced are to be used + // as key material for performing encryption or decryption. + + // 2. If ID=2, then the pseudorandom bits being produced are to be used + // as an IV (Initial Value) for encryption or decryption. + + // 3. If ID=3, then the pseudorandom bits being produced are to be used + // as an integrity key for MACing. + + // 1. Construct a string, D (the "diversifier"), by concatenating v/8 + // copies of ID. + var D []byte + for i := 0; i < v; i++ { + D = append(D, ID) + } + + // 2. Concatenate copies of the salt together to create a string S of + // length v(ceiling(s/v)) bits (the final copy of the salt may be + // truncated to create S). Note that if the salt is the empty + // string, then so is S. + + S := fillWithRepeats(salt, v) + + // 3. Concatenate copies of the password together to create a string P + // of length v(ceiling(p/v)) bits (the final copy of the password + // may be truncated to create P). Note that if the password is the + // empty string, then so is P. + + P := fillWithRepeats(password, v) + + // 4. Set I=S||P to be the concatenation of S and P. + I := append(S, P...) + + // 5. Set c=ceiling(n/u). + c := (size + u - 1) / u + + // 6. For i=1, 2, ..., c, do the following: + A := make([]byte, c*20) + var IjBuf []byte + for i := 0; i < c; i++ { + // A. Set A2=H^r(D||I). (i.e., the r-th hash of D||1, + // H(H(H(... H(D||I)))) + Ai := hash(append(D, I...)) + for j := 1; j < r; j++ { + Ai = hash(Ai) + } + copy(A[i*20:], Ai[:]) + + if i < c-1 { // skip on last iteration + // B. Concatenate copies of Ai to create a string B of length v + // bits (the final copy of Ai may be truncated to create B). + var B []byte + for len(B) < v { + B = append(B, Ai[:]...) + } + B = B[:v] + + // C. Treating I as a concatenation I_0, I_1, ..., I_(k-1) of v-bit + // blocks, where k=ceiling(s/v)+ceiling(p/v), modify I by + // setting I_j=(I_j+B+1) mod 2^v for each j. + { + Bbi := new(big.Int).SetBytes(B) + Ij := new(big.Int) + + for j := 0; j < len(I)/v; j++ { + Ij.SetBytes(I[j*v : (j+1)*v]) + Ij.Add(Ij, Bbi) + Ij.Add(Ij, one) + Ijb := Ij.Bytes() + // We expect Ijb to be exactly v bytes, + // if it is longer or shorter we must + // adjust it accordingly. + if len(Ijb) > v { + Ijb = Ijb[len(Ijb)-v:] + } + if len(Ijb) < v { + if IjBuf == nil { + IjBuf = make([]byte, v) + } + bytesShort := v - len(Ijb) + for i := 0; i < bytesShort; i++ { + IjBuf[i] = 0 + } + copy(IjBuf[bytesShort:], Ijb) + Ijb = IjBuf + } + copy(I[j*v:(j+1)*v], Ijb) + } + } + } + } + // 7. Concatenate A_1, A_2, ..., A_c together to form a pseudorandom + // bit string, A. + + // 8. Use the first n bits of A as the output of this entire process. + return A[:size] + + // If the above process is being used to generate a DES key, the process + // should be used to create 64 random bits, and the key's parity bits + // should be set after the 64 bits have been produced. Similar concerns + // hold for 2-key and 3-key triple-DES keys, for CDMF keys, and for any + // similar keys with parity bits "built into them". +} diff --git a/vendor/golang.org/x/crypto/pkcs12/pkcs12.go b/vendor/golang.org/x/crypto/pkcs12/pkcs12.go new file mode 100644 index 000000000..eff9ad3a9 --- /dev/null +++ b/vendor/golang.org/x/crypto/pkcs12/pkcs12.go @@ -0,0 +1,346 @@ +// Copyright 2015 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package pkcs12 implements some of PKCS#12. +// +// This implementation is distilled from https://tools.ietf.org/html/rfc7292 +// and referenced documents. It is intended for decoding P12/PFX-stored +// certificates and keys for use with the crypto/tls package. +package pkcs12 + +import ( + "crypto/ecdsa" + "crypto/rsa" + "crypto/x509" + "crypto/x509/pkix" + "encoding/asn1" + "encoding/hex" + "encoding/pem" + "errors" +) + +var ( + oidDataContentType = asn1.ObjectIdentifier([]int{1, 2, 840, 113549, 1, 7, 1}) + oidEncryptedDataContentType = asn1.ObjectIdentifier([]int{1, 2, 840, 113549, 1, 7, 6}) + + oidFriendlyName = asn1.ObjectIdentifier([]int{1, 2, 840, 113549, 1, 9, 20}) + oidLocalKeyID = asn1.ObjectIdentifier([]int{1, 2, 840, 113549, 1, 9, 21}) + oidMicrosoftCSPName = asn1.ObjectIdentifier([]int{1, 3, 6, 1, 4, 1, 311, 17, 1}) +) + +type pfxPdu struct { + Version int + AuthSafe contentInfo + MacData macData `asn1:"optional"` +} + +type contentInfo struct { + ContentType asn1.ObjectIdentifier + Content asn1.RawValue `asn1:"tag:0,explicit,optional"` +} + +type encryptedData struct { + Version int + EncryptedContentInfo encryptedContentInfo +} + +type encryptedContentInfo struct { + ContentType asn1.ObjectIdentifier + ContentEncryptionAlgorithm pkix.AlgorithmIdentifier + EncryptedContent []byte `asn1:"tag:0,optional"` +} + +func (i encryptedContentInfo) Algorithm() pkix.AlgorithmIdentifier { + return i.ContentEncryptionAlgorithm +} + +func (i encryptedContentInfo) Data() []byte { return i.EncryptedContent } + +type safeBag struct { + Id asn1.ObjectIdentifier + Value asn1.RawValue `asn1:"tag:0,explicit"` + Attributes []pkcs12Attribute `asn1:"set,optional"` +} + +type pkcs12Attribute struct { + Id asn1.ObjectIdentifier + Value asn1.RawValue `asn1:"set"` +} + +type encryptedPrivateKeyInfo struct { + AlgorithmIdentifier pkix.AlgorithmIdentifier + EncryptedData []byte +} + +func (i encryptedPrivateKeyInfo) Algorithm() pkix.AlgorithmIdentifier { + return i.AlgorithmIdentifier +} + +func (i encryptedPrivateKeyInfo) Data() []byte { + return i.EncryptedData +} + +// PEM block types +const ( + certificateType = "CERTIFICATE" + privateKeyType = "PRIVATE KEY" +) + +// unmarshal calls asn1.Unmarshal, but also returns an error if there is any +// trailing data after unmarshaling. +func unmarshal(in []byte, out interface{}) error { + trailing, err := asn1.Unmarshal(in, out) + if err != nil { + return err + } + if len(trailing) != 0 { + return errors.New("pkcs12: trailing data found") + } + return nil +} + +// ConvertToPEM converts all "safe bags" contained in pfxData to PEM blocks. +func ToPEM(pfxData []byte, password string) ([]*pem.Block, error) { + encodedPassword, err := bmpString(password) + if err != nil { + return nil, ErrIncorrectPassword + } + + bags, encodedPassword, err := getSafeContents(pfxData, encodedPassword) + + if err != nil { + return nil, err + } + + blocks := make([]*pem.Block, 0, len(bags)) + for _, bag := range bags { + block, err := convertBag(&bag, encodedPassword) + if err != nil { + return nil, err + } + blocks = append(blocks, block) + } + + return blocks, nil +} + +func convertBag(bag *safeBag, password []byte) (*pem.Block, error) { + block := &pem.Block{ + Headers: make(map[string]string), + } + + for _, attribute := range bag.Attributes { + k, v, err := convertAttribute(&attribute) + if err != nil { + return nil, err + } + block.Headers[k] = v + } + + switch { + case bag.Id.Equal(oidCertBag): + block.Type = certificateType + certsData, err := decodeCertBag(bag.Value.Bytes) + if err != nil { + return nil, err + } + block.Bytes = certsData + case bag.Id.Equal(oidPKCS8ShroundedKeyBag): + block.Type = privateKeyType + + key, err := decodePkcs8ShroudedKeyBag(bag.Value.Bytes, password) + if err != nil { + return nil, err + } + + switch key := key.(type) { + case *rsa.PrivateKey: + block.Bytes = x509.MarshalPKCS1PrivateKey(key) + case *ecdsa.PrivateKey: + block.Bytes, err = x509.MarshalECPrivateKey(key) + if err != nil { + return nil, err + } + default: + return nil, errors.New("found unknown private key type in PKCS#8 wrapping") + } + default: + return nil, errors.New("don't know how to convert a safe bag of type " + bag.Id.String()) + } + return block, nil +} + +func convertAttribute(attribute *pkcs12Attribute) (key, value string, err error) { + isString := false + + switch { + case attribute.Id.Equal(oidFriendlyName): + key = "friendlyName" + isString = true + case attribute.Id.Equal(oidLocalKeyID): + key = "localKeyId" + case attribute.Id.Equal(oidMicrosoftCSPName): + // This key is chosen to match OpenSSL. + key = "Microsoft CSP Name" + isString = true + default: + return "", "", errors.New("pkcs12: unknown attribute with OID " + attribute.Id.String()) + } + + if isString { + if err := unmarshal(attribute.Value.Bytes, &attribute.Value); err != nil { + return "", "", err + } + if value, err = decodeBMPString(attribute.Value.Bytes); err != nil { + return "", "", err + } + } else { + var id []byte + if err := unmarshal(attribute.Value.Bytes, &id); err != nil { + return "", "", err + } + value = hex.EncodeToString(id) + } + + return key, value, nil +} + +// Decode extracts a certificate and private key from pfxData. This function +// assumes that there is only one certificate and only one private key in the +// pfxData. +func Decode(pfxData []byte, password string) (privateKey interface{}, certificate *x509.Certificate, err error) { + encodedPassword, err := bmpString(password) + if err != nil { + return nil, nil, err + } + + bags, encodedPassword, err := getSafeContents(pfxData, encodedPassword) + if err != nil { + return nil, nil, err + } + + if len(bags) != 2 { + err = errors.New("pkcs12: expected exactly two safe bags in the PFX PDU") + return + } + + for _, bag := range bags { + switch { + case bag.Id.Equal(oidCertBag): + if certificate != nil { + err = errors.New("pkcs12: expected exactly one certificate bag") + } + + certsData, err := decodeCertBag(bag.Value.Bytes) + if err != nil { + return nil, nil, err + } + certs, err := x509.ParseCertificates(certsData) + if err != nil { + return nil, nil, err + } + if len(certs) != 1 { + err = errors.New("pkcs12: expected exactly one certificate in the certBag") + return nil, nil, err + } + certificate = certs[0] + + case bag.Id.Equal(oidPKCS8ShroundedKeyBag): + if privateKey != nil { + err = errors.New("pkcs12: expected exactly one key bag") + } + + if privateKey, err = decodePkcs8ShroudedKeyBag(bag.Value.Bytes, encodedPassword); err != nil { + return nil, nil, err + } + } + } + + if certificate == nil { + return nil, nil, errors.New("pkcs12: certificate missing") + } + if privateKey == nil { + return nil, nil, errors.New("pkcs12: private key missing") + } + + return +} + +func getSafeContents(p12Data, password []byte) (bags []safeBag, updatedPassword []byte, err error) { + pfx := new(pfxPdu) + if err := unmarshal(p12Data, pfx); err != nil { + return nil, nil, errors.New("pkcs12: error reading P12 data: " + err.Error()) + } + + if pfx.Version != 3 { + return nil, nil, NotImplementedError("can only decode v3 PFX PDU's") + } + + if !pfx.AuthSafe.ContentType.Equal(oidDataContentType) { + return nil, nil, NotImplementedError("only password-protected PFX is implemented") + } + + // unmarshal the explicit bytes in the content for type 'data' + if err := unmarshal(pfx.AuthSafe.Content.Bytes, &pfx.AuthSafe.Content); err != nil { + return nil, nil, err + } + + if len(pfx.MacData.Mac.Algorithm.Algorithm) == 0 { + return nil, nil, errors.New("pkcs12: no MAC in data") + } + + if err := verifyMac(&pfx.MacData, pfx.AuthSafe.Content.Bytes, password); err != nil { + if err == ErrIncorrectPassword && len(password) == 2 && password[0] == 0 && password[1] == 0 { + // some implementations use an empty byte array + // for the empty string password try one more + // time with empty-empty password + password = nil + err = verifyMac(&pfx.MacData, pfx.AuthSafe.Content.Bytes, password) + } + if err != nil { + return nil, nil, err + } + } + + var authenticatedSafe []contentInfo + if err := unmarshal(pfx.AuthSafe.Content.Bytes, &authenticatedSafe); err != nil { + return nil, nil, err + } + + if len(authenticatedSafe) != 2 { + return nil, nil, NotImplementedError("expected exactly two items in the authenticated safe") + } + + for _, ci := range authenticatedSafe { + var data []byte + + switch { + case ci.ContentType.Equal(oidDataContentType): + if err := unmarshal(ci.Content.Bytes, &data); err != nil { + return nil, nil, err + } + case ci.ContentType.Equal(oidEncryptedDataContentType): + var encryptedData encryptedData + if err := unmarshal(ci.Content.Bytes, &encryptedData); err != nil { + return nil, nil, err + } + if encryptedData.Version != 0 { + return nil, nil, NotImplementedError("only version 0 of EncryptedData is supported") + } + if data, err = pbDecrypt(encryptedData.EncryptedContentInfo, password); err != nil { + return nil, nil, err + } + default: + return nil, nil, NotImplementedError("only data and encryptedData content types are supported in authenticated safe") + } + + var safeContents []safeBag + if err := unmarshal(data, &safeContents); err != nil { + return nil, nil, err + } + bags = append(bags, safeContents...) + } + + return bags, password, nil +} diff --git a/vendor/golang.org/x/crypto/pkcs12/safebags.go b/vendor/golang.org/x/crypto/pkcs12/safebags.go new file mode 100644 index 000000000..def1f7b98 --- /dev/null +++ b/vendor/golang.org/x/crypto/pkcs12/safebags.go @@ -0,0 +1,57 @@ +// Copyright 2015 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package pkcs12 + +import ( + "crypto/x509" + "encoding/asn1" + "errors" +) + +var ( + // see https://tools.ietf.org/html/rfc7292#appendix-D + oidCertTypeX509Certificate = asn1.ObjectIdentifier([]int{1, 2, 840, 113549, 1, 9, 22, 1}) + oidPKCS8ShroundedKeyBag = asn1.ObjectIdentifier([]int{1, 2, 840, 113549, 1, 12, 10, 1, 2}) + oidCertBag = asn1.ObjectIdentifier([]int{1, 2, 840, 113549, 1, 12, 10, 1, 3}) +) + +type certBag struct { + Id asn1.ObjectIdentifier + Data []byte `asn1:"tag:0,explicit"` +} + +func decodePkcs8ShroudedKeyBag(asn1Data, password []byte) (privateKey interface{}, err error) { + pkinfo := new(encryptedPrivateKeyInfo) + if err = unmarshal(asn1Data, pkinfo); err != nil { + return nil, errors.New("pkcs12: error decoding PKCS#8 shrouded key bag: " + err.Error()) + } + + pkData, err := pbDecrypt(pkinfo, password) + if err != nil { + return nil, errors.New("pkcs12: error decrypting PKCS#8 shrouded key bag: " + err.Error()) + } + + ret := new(asn1.RawValue) + if err = unmarshal(pkData, ret); err != nil { + return nil, errors.New("pkcs12: error unmarshaling decrypted private key: " + err.Error()) + } + + if privateKey, err = x509.ParsePKCS8PrivateKey(pkData); err != nil { + return nil, errors.New("pkcs12: error parsing PKCS#8 private key: " + err.Error()) + } + + return privateKey, nil +} + +func decodeCertBag(asn1Data []byte) (x509Certificates []byte, err error) { + bag := new(certBag) + if err := unmarshal(asn1Data, bag); err != nil { + return nil, errors.New("pkcs12: error decoding cert bag: " + err.Error()) + } + if !bag.Id.Equal(oidCertTypeX509Certificate) { + return nil, NotImplementedError("only X509 certificates are supported") + } + return bag.Data, nil +} From 638960284e4b103a8185b84df4ffacd21cc6b1ca Mon Sep 17 00:00:00 2001 From: Ludovic Fernandez Date: Tue, 23 Oct 2018 13:14:03 +0200 Subject: [PATCH 09/29] Typo in the UI. --- webui/src/app/components/providers/providers.component.html | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/webui/src/app/components/providers/providers.component.html b/webui/src/app/components/providers/providers.component.html index 72fa96973..118b07f7d 100644 --- a/webui/src/app/components/providers/providers.component.html +++ b/webui/src/app/components/providers/providers.component.html @@ -376,7 +376,7 @@ {{ p.headers.sslHost }} - SSL Host + SSL Force Host {{ p.headers.sslForceHost }} From c7df82e695b3e994e51eba0af58a96dc095aa9c6 Mon Sep 17 00:00:00 2001 From: Ludovic Fernandez Date: Tue, 23 Oct 2018 17:36:05 +0200 Subject: [PATCH 10/29] Remove the trailing dot if the domain is not defined. --- provider/consulcatalog/config.go | 2 +- provider/consulcatalog/config_test.go | 16 +++++++++++++++- provider/docker/config.go | 7 +++++-- provider/ecs/config.go | 6 +++++- provider/ecs/config_test.go | 12 ++++++------ provider/marathon/config.go | 7 +++++-- provider/mesos/config.go | 5 ++++- provider/rancher/config.go | 6 +++++- 8 files changed, 46 insertions(+), 15 deletions(-) diff --git a/provider/consulcatalog/config.go b/provider/consulcatalog/config.go index f148a73a0..22cc99651 100644 --- a/provider/consulcatalog/config.go +++ b/provider/consulcatalog/config.go @@ -111,7 +111,7 @@ func (p *Provider) getFrontendRule(service serviceUpdate) string { return "" } - return buffer.String() + return strings.TrimSuffix(buffer.String(), ".") } func (p *Provider) getServer(node *api.ServiceEntry) types.Server { diff --git a/provider/consulcatalog/config_test.go b/provider/consulcatalog/config_test.go index 9485b925c..80d1c5fa3 100644 --- a/provider/consulcatalog/config_test.go +++ b/provider/consulcatalog/config_test.go @@ -1029,6 +1029,7 @@ func TestProviderGetFrontendRule(t *testing.T) { testCases := []struct { desc string service serviceUpdate + domain string expected string }{ { @@ -1037,8 +1038,18 @@ func TestProviderGetFrontendRule(t *testing.T) { ServiceName: "foo", Attributes: []string{}, }, + domain: "localhost", expected: "Host:foo.localhost", }, + { + desc: "When no domain should return default host foo", + service: serviceUpdate{ + ServiceName: "foo", + Attributes: []string{}, + }, + domain: "", + expected: "Host:foo", + }, { desc: "Should return host *.example.com", service: serviceUpdate{ @@ -1047,6 +1058,7 @@ func TestProviderGetFrontendRule(t *testing.T) { "traefik.frontend.rule=Host:*.example.com", }, }, + domain: "localhost", expected: "Host:*.example.com", }, { @@ -1057,6 +1069,7 @@ func TestProviderGetFrontendRule(t *testing.T) { "traefik.frontend.rule=Host:{{.ServiceName}}.example.com", }, }, + domain: "localhost", expected: "Host:foo.example.com", }, { @@ -1068,6 +1081,7 @@ func TestProviderGetFrontendRule(t *testing.T) { "contextPath=/bar", }, }, + domain: "localhost", expected: "PathPrefix:/bar", }, } @@ -1078,7 +1092,7 @@ func TestProviderGetFrontendRule(t *testing.T) { t.Parallel() p := &Provider{ - Domain: "localhost", + Domain: test.domain, Prefix: "traefik", FrontEndRule: "Host:{{.ServiceName}}.{{.Domain}}", frontEndRuleTemplate: template.New("consul catalog frontend rule"), diff --git a/provider/docker/config.go b/provider/docker/config.go index ab996b384..576a848c1 100644 --- a/provider/docker/config.go +++ b/provider/docker/config.go @@ -186,13 +186,16 @@ func (p *Provider) getFrontendRule(container dockerData, segmentLabels map[strin } domain := label.GetStringValue(segmentLabels, label.TraefikDomain, p.Domain) + if len(domain) > 0 { + domain = "." + domain + } if values, err := label.GetStringMultipleStrict(container.Labels, labelDockerComposeProject, labelDockerComposeService); err == nil { - return "Host:" + getSubDomain(values[labelDockerComposeService]+"."+values[labelDockerComposeProject]) + "." + domain + return "Host:" + getSubDomain(values[labelDockerComposeService]+"."+values[labelDockerComposeProject]) + domain } if len(domain) > 0 { - return "Host:" + getSubDomain(container.ServiceName) + "." + domain + return "Host:" + getSubDomain(container.ServiceName) + domain } return "" diff --git a/provider/ecs/config.go b/provider/ecs/config.go index fcc0ab1ee..ccc25e8bf 100644 --- a/provider/ecs/config.go +++ b/provider/ecs/config.go @@ -141,7 +141,11 @@ func (p *Provider) getFrontendRule(i ecsInstance) string { } domain := label.GetStringValue(i.SegmentLabels, label.TraefikDomain, p.Domain) - defaultRule := "Host:" + strings.ToLower(strings.Replace(i.Name, "_", "-", -1)) + "." + domain + if len(domain) > 0 { + domain = "." + domain + } + + defaultRule := "Host:" + strings.ToLower(strings.Replace(i.Name, "_", "-", -1)) + domain return label.GetStringValue(i.TraefikLabels, label.TraefikFrontendRule, defaultRule) } diff --git a/provider/ecs/config_test.go b/provider/ecs/config_test.go index f120b109f..0a4a169c8 100644 --- a/provider/ecs/config_test.go +++ b/provider/ecs/config_test.go @@ -52,7 +52,7 @@ func TestBuildConfiguration(t *testing.T) { Backend: "backend-instance", Routes: map[string]types.Route{ "route-frontend-instance": { - Rule: "Host:instance.", + Rule: "Host:instance", }, }, PassHostHeader: true, @@ -99,7 +99,7 @@ func TestBuildConfiguration(t *testing.T) { Backend: "backend-instance", Routes: map[string]types.Route{ "route-frontend-instance": { - Rule: "Host:instance.", + Rule: "Host:instance", }, }, PassHostHeader: true, @@ -144,7 +144,7 @@ func TestBuildConfiguration(t *testing.T) { Backend: "backend-instance", Routes: map[string]types.Route{ "route-frontend-instance": { - Rule: "Host:instance.", + Rule: "Host:instance", }, }, Auth: &types.Auth{ @@ -195,7 +195,7 @@ func TestBuildConfiguration(t *testing.T) { Backend: "backend-instance", Routes: map[string]types.Route{ "route-frontend-instance": { - Rule: "Host:instance.", + Rule: "Host:instance", }, }, Auth: &types.Auth{ @@ -246,7 +246,7 @@ func TestBuildConfiguration(t *testing.T) { Backend: "backend-instance", Routes: map[string]types.Route{ "route-frontend-instance": { - Rule: "Host:instance.", + Rule: "Host:instance", }, }, Auth: &types.Auth{ @@ -305,7 +305,7 @@ func TestBuildConfiguration(t *testing.T) { Backend: "backend-instance", Routes: map[string]types.Route{ "route-frontend-instance": { - Rule: "Host:instance.", + Rule: "Host:instance", }, }, Auth: &types.Auth{ diff --git a/provider/marathon/config.go b/provider/marathon/config.go index 0194ba4c6..5e892cb4a 100644 --- a/provider/marathon/config.go +++ b/provider/marathon/config.go @@ -214,11 +214,14 @@ func (p *Provider) getFrontendRule(app appData) string { } domain := label.GetStringValue(app.SegmentLabels, label.TraefikDomain, p.Domain) + if len(domain) > 0 { + domain = "." + domain + } if len(app.SegmentName) > 0 { - return "Host:" + strings.ToLower(provider.Normalize(app.SegmentName)) + "." + p.getSubDomain(app.ID) + "." + domain + return "Host:" + strings.ToLower(provider.Normalize(app.SegmentName)) + "." + p.getSubDomain(app.ID) + domain } - return "Host:" + p.getSubDomain(app.ID) + "." + domain + return "Host:" + p.getSubDomain(app.ID) + domain } func getPort(task marathon.Task, app appData) string { diff --git a/provider/mesos/config.go b/provider/mesos/config.go index 47a5ef9ce..61f415e88 100644 --- a/provider/mesos/config.go +++ b/provider/mesos/config.go @@ -222,8 +222,11 @@ func (p *Provider) getFrontendRule(task taskData) string { } domain := label.GetStringValue(task.TraefikLabels, label.TraefikDomain, p.Domain) + if len(domain) > 0 { + domain = "." + domain + } - return "Host:" + p.getSegmentSubDomain(task) + "." + domain + return "Host:" + p.getSegmentSubDomain(task) + domain } func (p *Provider) getServers(tasks []taskData) map[string]types.Server { diff --git a/provider/rancher/config.go b/provider/rancher/config.go index ccd8a43d1..d6232b47a 100644 --- a/provider/rancher/config.go +++ b/provider/rancher/config.go @@ -128,7 +128,11 @@ func (p *Provider) serviceFilter(service rancherData) bool { func (p *Provider) getFrontendRule(serviceName string, labels map[string]string) string { domain := label.GetStringValue(labels, label.TraefikDomain, p.Domain) - defaultRule := "Host:" + strings.ToLower(strings.Replace(serviceName, "/", ".", -1)) + "." + domain + if len(domain) > 0 { + domain = "." + domain + } + + defaultRule := "Host:" + strings.ToLower(strings.Replace(serviceName, "/", ".", -1)) + domain return label.GetStringValue(labels, label.TraefikFrontendRule, defaultRule) } From 82b2a102ed36f3ba1a50fd54bb988cc9e9291946 Mon Sep 17 00:00:00 2001 From: Daniel Tomcej Date: Tue, 23 Oct 2018 11:04:05 -0500 Subject: [PATCH 11/29] Add double wildcard test --- .../https/wildcard.www.snitest.com.cert | 20 +++++++++++++ .../https/wildcard.www.snitest.com.key | 28 +++++++++++++++++++ tls/certificate_store_test.go | 7 +++++ 3 files changed, 55 insertions(+) create mode 100644 integration/fixtures/https/wildcard.www.snitest.com.cert create mode 100644 integration/fixtures/https/wildcard.www.snitest.com.key diff --git a/integration/fixtures/https/wildcard.www.snitest.com.cert b/integration/fixtures/https/wildcard.www.snitest.com.cert new file mode 100644 index 000000000..9f0575f3a --- /dev/null +++ b/integration/fixtures/https/wildcard.www.snitest.com.cert @@ -0,0 +1,20 @@ +-----BEGIN CERTIFICATE----- +MIIDMDCCAhgCCQC425NNs+WWZzANBgkqhkiG9w0BAQsFADBaMQswCQYDVQQGEwJG +UjELMAkGA1UECAwCTFkxDTALBgNVBAcMBEx5b24xEzARBgNVBAoMCkNvbnRhaW5v +dXMxGjAYBgNVBAMMESoud3d3LnNuaXRlc3QuY29tMB4XDTE4MTAyMjE0MjcxNFoX +DTI4MTAxOTE0MjcxNFowWjELMAkGA1UEBhMCRlIxCzAJBgNVBAgMAkxZMQ0wCwYD +VQQHDARMeW9uMRMwEQYDVQQKDApDb250YWlub3VzMRowGAYDVQQDDBEqLnd3dy5z +bml0ZXN0LmNvbTCCASIwDQYJKoZIhvcNAQEBBQADggEPADCCAQoCggEBAMwFbc/I +gpOVNoefnIQrAy2wqK3VKSjFT5z5E8MVrHSU9PpC8bGQb0hTULmfHSzRTsajRLjv +rLM/EZDrJL+PQHcCG+XVYbqMmVis4qsevuOyFdFdfe66LIsV+zmsSUbMyssGS2Qw +AZx2D8RDtY35VcSA845gjQH+KfF1ST4s/73sr8ID5ZEEn4J6fbmrVfbxhygsx036 +VNw8OKby+7Gx3irz1ZC6JZ6jmzqlsu4EuDY1cjHCZSUD/JQ1jHz3gIRLV9OiglN/ +PAPu8zZZ/vtalEGytpLUcbjmvNg24Yc94vd3W3r4Ne13FhDLnB3w8Gz4pYZsEgkk +18LzttWcqHnNwg8CAwEAATANBgkqhkiG9w0BAQsFAAOCAQEAW1XJBk7oCGkzF4nR +0l2cEpG2QkHAUuXRa4PqH9QALUj2taAZHGiFF0UsknjbCnTsX6rzSLy1NFiJxyuO +CmaiZ9Y9mcYw+T+SXo862Yu1Jch48LoD5x1vW/F8ZT+Fnl+gXoh7ssAtjQ4YViWy +Z3A1y54Mb6JhuVjfOBuzbGwI9DDAetKZgTVY7SCm7MTrF5z/YMly5rixV5th1XCj +4bqZ9p4CZyP++Y4RffKuCf35cyD/9Y7Boq5A3E8LoxMRFzszyn9RhKdkKLOevGgc +r4H/w92uaQqQGRTxQfNWfphBdNuc+ZgXYIGiexcpqxJfA0Ei7XSsKVxxXNxLoJe5 +3xs+Lg== +-----END CERTIFICATE----- diff --git a/integration/fixtures/https/wildcard.www.snitest.com.key b/integration/fixtures/https/wildcard.www.snitest.com.key new file mode 100644 index 000000000..9ad9fe643 --- /dev/null +++ b/integration/fixtures/https/wildcard.www.snitest.com.key @@ -0,0 +1,28 @@ +-----BEGIN PRIVATE KEY----- +MIIEvAIBADANBgkqhkiG9w0BAQEFAASCBKYwggSiAgEAAoIBAQDMBW3PyIKTlTaH +n5yEKwMtsKit1SkoxU+c+RPDFax0lPT6QvGxkG9IU1C5nx0s0U7Go0S476yzPxGQ +6yS/j0B3Ahvl1WG6jJlYrOKrHr7jshXRXX3uuiyLFfs5rElGzMrLBktkMAGcdg/E +Q7WN+VXEgPOOYI0B/inxdUk+LP+97K/CA+WRBJ+Cen25q1X28YcoLMdN+lTcPDim +8vuxsd4q89WQuiWeo5s6pbLuBLg2NXIxwmUlA/yUNYx894CES1fTooJTfzwD7vM2 +Wf77WpRBsraS1HG45rzYNuGHPeL3d1t6+DXtdxYQy5wd8PBs+KWGbBIJJNfC87bV +nKh5zcIPAgMBAAECggEAW6lEwMmRAMVVDnHDXA4HC4wG/LJ8H3kmX5v4KPmf1XDm +71kMRX5iwNfNuNenv+75uXy4722e5Zk8RyOeCwJNMCqeZhAMLEfmzVQ/MipKEPp9 +muaqIYs7X/GsQSkKcuinY7ecP5Lh5m2Uf9T7yKFwyyw0QI9YSsDqDzVmhqyo6aaT +ob4Bua9mTOTMCjEaIk06SkS0Z5sCqtvKMMx/fI2XYSmxQvbwYPHInpyu2LQAvKTw +wpwDLF4Zetw1Tutbk8TSTaoC2rn6ZH5DYdJ9pk55/+UqVPo8tu/M//8JN0t9GY1/ +aqJ25juHjj0pfp+0830NOs4n6symBcR4bSbDn7r/4QKBgQDnTOdo09jtzJimGlbH +zEqYOi0NrWU/mLkpqbczjKqx8BnTyfF3FudhY7Gp2v1WX/ofjYS/P/2nY6sXKvig +9htqLRCe0Tk9vavY3eSEyaHu9Tbeixx7lM4pQfHCASreMp37RyhIisSPkzdCChNb +OuqYpTW4C2u9schMlmCVaWYtTQKBgQDhzsoIlWAAD//h2xqCGpcar0SzgPCHdUH6 +4ejVhmWPfy5Jlk1CwStlsO4BlcTW7ahN81GqIlyiqpi3O2JZ4HfdoZgKNdMK6YD5 +TkmXnABa42RrQtYHltvJCthctmjP7qoRxvDrDKLBY481AZjC1MNgPlpSrfALMibx +wyd6rjQuywKBgH+nuAfo8866nnz+CGsY2wqNARSNYFXrKjZOTqgKuKKgCwEScUvy +vhzH8uP10t/69Ia5ikwrOwlJPsH4m2PqsFK3MHcWrerfZZq5TEflKJRDjdbhHAUw +qV+n34/dKRWdBggKy7bNr5I2A8dU3D37lEJO3AkJdJsrJYrva7rKgvP5AoGAXNer +VfAk8qGhcfmmYowQSNZ7htqjCu75W+/6zaBerat7GqKDzcii0UL3+QrdTgmVQ8eh +cjSCphdCh0QRYiba4fOJEdmjlj7/2oGH3KA1vSj1puxqF+C9KWIeJ7CQU74rivej +IuGlIaKPxRmM976HPlEkzg3aPqA2Rv0YhGaP6hUCgYA3hEG6daHOj6/P+rR28wTp +xyraym7/8BOVWLweUFVM7YKOKrLAa7lhd254Twy0wUvTgiIw/XamhiVmdSh80gI9 +hooqYern7WGoL9zU2spVaEe2AzhSRvTuLqlRRyLLnPC6uaGVeC+SYD7zIDB2cwyC +bbvXmg15uPp02YpLtm8wyw== +-----END PRIVATE KEY----- diff --git a/tls/certificate_store_test.go b/tls/certificate_store_test.go index 9915133fa..34a13ec08 100644 --- a/tls/certificate_store_test.go +++ b/tls/certificate_store_test.go @@ -77,6 +77,13 @@ func TestGetBestCertificate(t *testing.T) { dynamicCert: "*.snitest.com", expectedCert: "*.snitest.com", }, + { + desc: "Best Match with two wildcard certs", + domainToCheck: "foo.www.snitest.com", + staticCert: "*.www.snitest.com", + dynamicCert: "*.snitest.com", + expectedCert: "*.www.snitest.com", + }, } for _, test := range testCases { From 99ddd7f9cb5cc0a688268a6381d3aa5e2eb340a0 Mon Sep 17 00:00:00 2001 From: herver Date: Tue, 23 Oct 2018 18:12:03 +0200 Subject: [PATCH 12/29] domain is also optional for "normal" mode --- docs/configuration/backends/docker.md | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/docs/configuration/backends/docker.md b/docs/configuration/backends/docker.md index 2ab4aae71..3515660b7 100644 --- a/docs/configuration/backends/docker.md +++ b/docs/configuration/backends/docker.md @@ -22,7 +22,7 @@ endpoint = "unix:///var/run/docker.sock" # Default base domain used for the frontend rules. # Can be overridden by setting the "traefik.domain" label on a container. # -# Required +# Optional # domain = "docker.localhost" @@ -450,4 +450,4 @@ Below is a recap of the behavior of `usebindportip` in different situations. | LblPort | ExtIp1:ExtPort1:IntPort1 & ExtIp2:LblPort:IntPort2 | ExtIp2:LblPort | !!! note - In the above table, ExtIp stands for "external IP found in the binding", IntIp stands for "internal network container's IP", ExtPort stands for "external Port found in the binding", and IntPort stands for "internal network container's port." \ No newline at end of file + In the above table, ExtIp stands for "external IP found in the binding", IntIp stands for "internal network container's IP", ExtPort stands for "external Port found in the binding", and IntPort stands for "internal network container's port." From df55c24cb5bbf4f050eb5bdef9cdb35913dae29b Mon Sep 17 00:00:00 2001 From: Daniel Tomcej Date: Wed, 24 Oct 2018 04:00:05 -0500 Subject: [PATCH 13/29] Add missing tmp directory to scratch image --- Dockerfile | 1 + 1 file changed, 1 insertion(+) diff --git a/Dockerfile b/Dockerfile index dbbcdd52c..873d55312 100644 --- a/Dockerfile +++ b/Dockerfile @@ -2,4 +2,5 @@ FROM scratch COPY script/ca-certificates.crt /etc/ssl/certs/ COPY dist/traefik / EXPOSE 80 +VOLUME ["/tmp"] ENTRYPOINT ["/traefik"] From 22ee8700caf24604e7ba974306c0280903b030f8 Mon Sep 17 00:00:00 2001 From: Manuel Zapf Date: Thu, 25 Oct 2018 09:50:03 +0200 Subject: [PATCH 14/29] add default path if nothing present --- provider/kubernetes/kubernetes.go | 15 ++++++++++++- provider/kubernetes/kubernetes_test.go | 31 ++++++++++++++++++++++++++ 2 files changed, 45 insertions(+), 1 deletion(-) diff --git a/provider/kubernetes/kubernetes.go b/provider/kubernetes/kubernetes.go index 01c345e57..94f8d825d 100644 --- a/provider/kubernetes/kubernetes.go +++ b/provider/kubernetes/kubernetes.go @@ -43,6 +43,7 @@ const ( traefikDefaultIngressClass = "traefik" defaultBackendName = "global-default-backend" defaultFrontendName = "global-default-frontend" + defaultFrontendRule = "PathPrefix:/" allowedProtocolHTTPS = "https" allowedProtocolH2C = "h2c" ) @@ -238,6 +239,11 @@ func (p *Provider) loadIngresses(k8sClient Client) (*types.Configuration, error) } baseName := r.Host + pa.Path + + if len(baseName) == 0 { + baseName = pa.Backend.ServiceName + } + if priority > 0 { baseName = strconv.Itoa(priority) + "-" + baseName } @@ -319,6 +325,12 @@ func (p *Provider) loadIngresses(k8sClient Client) (*types.Configuration, error) } } + if len(frontend.Routes) == 0 { + frontend.Routes["/"] = types.Route{ + Rule: defaultFrontendRule, + } + } + templateObjects.Frontends[baseName] = frontend templateObjects.Backends[baseName].CircuitBreaker = getCircuitBreaker(service) templateObjects.Backends[baseName].LoadBalancer = getLoadBalancer(service) @@ -539,7 +551,7 @@ func (p *Provider) addGlobalBackend(cl Client, i *extensionsv1beta1.Ingress, tem } templateObjects.Frontends[defaultFrontendName].Routes["/"] = types.Route{ - Rule: "PathPrefix:/", + Rule: defaultFrontendRule, } return nil @@ -578,6 +590,7 @@ func getRuleForPath(pa extensionsv1beta1.HTTPIngressPath, i *extensionsv1beta1.I rules = append(rules, rule) } + return strings.Join(rules, ";"), nil } diff --git a/provider/kubernetes/kubernetes_test.go b/provider/kubernetes/kubernetes_test.go index b11dd867c..4c04ce750 100644 --- a/provider/kubernetes/kubernetes_test.go +++ b/provider/kubernetes/kubernetes_test.go @@ -50,6 +50,11 @@ func TestLoadIngresses(t *testing.T) { onePath(iBackend("service7", intstr.FromInt(80))), ), ), + iRule(iHost(""), + iPaths( + onePath(iBackend("service8", intstr.FromInt(80))), + ), + ), ), ), } @@ -118,6 +123,14 @@ func TestLoadIngresses(t *testing.T) { clusterIP("10.0.0.7"), sPorts(sPort(80, ""))), ), + buildService( + sName("service8"), + sNamespace("testing"), + sUID("8"), + sSpec( + clusterIP("10.0.0.8"), + sPorts(sPort(80, ""))), + ), } endpoints := []*corev1.Endpoints{ @@ -165,6 +178,14 @@ func TestLoadIngresses(t *testing.T) { eAddresses(eAddress("10.10.0.7")), ePorts(ePort(80, ""))), ), + buildEndpoint( + eNamespace("testing"), + eName("service8"), + eUID("8"), + subset( + eAddresses(eAddress("10.10.0.8")), + ePorts(ePort(80, ""))), + ), } watchChan := make(chan interface{}) @@ -218,6 +239,12 @@ func TestLoadIngresses(t *testing.T) { server("http://10.10.0.7:80", weight(1)), ), ), + backend("service8", + lbMethod("wrr"), + servers( + server("http://10.10.0.8:80", weight(1)), + ), + ), ), frontends( frontend("foo/bar", @@ -248,6 +275,10 @@ func TestLoadIngresses(t *testing.T) { passHostHeader(), routes(route("*.service7", "HostRegexp:{subdomain:[A-Za-z0-9-_]+}.service7")), ), + frontend("service8", + passHostHeader(), + routes(route("/", "PathPrefix:/")), + ), ), ) assert.Equal(t, expected, actual) From aa26927d6170d14f8404bb6405f5c269eee3dfc6 Mon Sep 17 00:00:00 2001 From: Benjamin Gandon Date: Thu, 25 Oct 2018 10:18:03 +0200 Subject: [PATCH 15/29] Case insensitive host rule --- Gopkg.lock | 3 +- rules/rules.go | 6 ++-- rules/rules_test.go | 35 +++++++++++++++++++++- vendor/github.com/containous/mux/regexp.go | 6 ++++ 4 files changed, 45 insertions(+), 5 deletions(-) diff --git a/Gopkg.lock b/Gopkg.lock index e8b4586ec..6400ac3a2 100644 --- a/Gopkg.lock +++ b/Gopkg.lock @@ -291,7 +291,7 @@ branch = "master" name = "github.com/containous/mux" packages = ["."] - revision = "06ccd3e75091eb659b1d720cda0e16bc7057954c" + revision = "c33f32e268983f989290677351b871b65da75ba5" [[projects]] name = "github.com/containous/staert" @@ -819,6 +819,7 @@ revision = "9b66602d496a139e4722bdde32f0f1ac1c12d4a8" [[projects]] + branch = "master" name = "github.com/jjcollinge/servicefabric" packages = ["."] revision = "8eebe170fa1ba25d3dfb928b3f86a7313b13b9fe" diff --git a/rules/rules.go b/rules/rules.go index 44332261b..b523a882f 100644 --- a/rules/rules.go +++ b/rules/rules.go @@ -53,10 +53,10 @@ func (r *Rules) host(hosts ...string) *mux.Route { }) } -func (r *Rules) hostRegexp(hosts ...string) *mux.Route { +func (r *Rules) hostRegexp(hostPatterns ...string) *mux.Route { router := r.Route.Route.Subrouter() - for _, host := range hosts { - router.Host(strings.ToLower(host)) + for _, hostPattern := range hostPatterns { + router.Host(hostPattern) } return r.Route.Route } diff --git a/rules/rules_test.go b/rules/rules_test.go index 7239ad750..a843e63ee 100644 --- a/rules/rules_test.go +++ b/rules/rules_test.go @@ -195,6 +195,39 @@ func TestHostRegexp(t *testing.T) { "http://barcom": false, }, }, + { + desc: "regex insensitive", + hostExp: "{dummy:[A-Za-z-]+\\.bar\\.com}", + urls: map[string]bool{ + "http://FOO.bar.com": true, + "http://foo.bar.com": true, + "http://fooubar.com": false, + "http://barucom": false, + "http://barcom": false, + }, + }, + { + desc: "insensitive host", + hostExp: "{dummy:[a-z-]+\\.bar\\.com}", + urls: map[string]bool{ + "http://FOO.bar.com": true, + "http://foo.bar.com": true, + "http://fooubar.com": false, + "http://barucom": false, + "http://barcom": false, + }, + }, + { + desc: "insensitive host simple", + hostExp: "foo.bar.com", + urls: map[string]bool{ + "http://FOO.bar.com": true, + "http://foo.bar.com": true, + "http://fooubar.com": false, + "http://barucom": false, + "http://barcom": false, + }, + }, } for _, test := range testCases { @@ -212,7 +245,7 @@ func TestHostRegexp(t *testing.T) { for testURL, match := range test.urls { req := testhelpers.MustNewRequest(http.MethodGet, testURL, nil) - assert.Equal(t, match, rt.Match(req, &mux.RouteMatch{})) + assert.Equal(t, match, rt.Match(req, &mux.RouteMatch{}), testURL) } }) } diff --git a/vendor/github.com/containous/mux/regexp.go b/vendor/github.com/containous/mux/regexp.go index da8114ca4..d3049b7df 100644 --- a/vendor/github.com/containous/mux/regexp.go +++ b/vendor/github.com/containous/mux/regexp.go @@ -53,6 +53,12 @@ func newRouteRegexp(tpl string, matchHost, matchPrefix, matchQuery, strictSlash, varsN := make([]string, len(idxs)/2) varsR := make([]*regexp.Regexp, len(idxs)/2) pattern := bytes.NewBufferString("") + + // Host matching is case insensitive + if matchHost { + fmt.Fprint(pattern, "(?i)") + } + pattern.WriteByte('^') reverse := bytes.NewBufferString("") var end int From 8c2e99432df7fcebb6b8db96d3c479017a7ab297 Mon Sep 17 00:00:00 2001 From: Ludovic Fernandez Date: Thu, 25 Oct 2018 16:50:05 +0200 Subject: [PATCH 16/29] Add a note about TLS-ALPN challenge. --- docs/configuration/acme.md | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/docs/configuration/acme.md b/docs/configuration/acme.md index 6cb617744..cd2d6bf78 100644 --- a/docs/configuration/acme.md +++ b/docs/configuration/acme.md @@ -182,6 +182,10 @@ entryPoint = "https" [acme.tlsChallenge] ``` +!!! note + If the `TLS-ALPN-01` challenge is used, `acme.entryPoint` has to be reachable by Let's Encrypt through port 443. + This is a Let's Encrypt limitation as described on the [community forum](https://community.letsencrypt.org/t/support-for-ports-other-than-80-and-443/3419/72). + #### `httpChallenge` Use the `HTTP-01` challenge to generate and renew ACME certificates by provisioning a HTTP resource under a well-known URI. From ac11323fdd976645a1a5ac48b85a92173f25aaf3 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?G=C3=A9rald=20Cro=C3=ABs?= Date: Thu, 25 Oct 2018 17:04:03 +0200 Subject: [PATCH 17/29] Replaces emilevauge/whoami by containous/whoami in the documentation --- docs/benchmarks.md | 2 +- docs/index.md | 2 +- docs/user-guide/kv-config.md | 10 +++++----- docs/user-guide/swarm-mode.md | 14 +++++++------- docs/user-guide/swarm.md | 10 +++++----- 5 files changed, 19 insertions(+), 19 deletions(-) diff --git a/docs/benchmarks.md b/docs/benchmarks.md index 6e3bc42cf..4565d0449 100644 --- a/docs/benchmarks.md +++ b/docs/benchmarks.md @@ -15,7 +15,7 @@ I used 4 VMs for the tests with the following configuration: 1. One VM used to launch the benchmarking tool [wrk](https://github.com/wg/wrk) 2. One VM for Traefik (v1.0.0-beta.416) / nginx (v1.4.6) -3. Two VMs for 2 backend servers in go [whoami](https://github.com/emilevauge/whoamI/) +3. Two VMs for 2 backend servers in go [whoami](https://github.com/containous/whoami/) Each VM has been tuned using the following limits: diff --git a/docs/index.md b/docs/index.md index bcbcc4489..4f606be86 100644 --- a/docs/index.md +++ b/docs/index.md @@ -109,7 +109,7 @@ Edit your `docker-compose.yml` file and add the following at the end of your fil ```yaml # ... whoami: - image: emilevauge/whoami # A container that exposes an API to show its IP address + image: containous/whoami # A container that exposes an API to show its IP address labels: - "traefik.frontend.rule=Host:whoami.docker.localhost" ``` diff --git a/docs/user-guide/kv-config.md b/docs/user-guide/kv-config.md index 47dba912e..a39d1d9a6 100644 --- a/docs/user-guide/kv-config.md +++ b/docs/user-guide/kv-config.md @@ -24,7 +24,7 @@ The Traefik global configuration will be retrieved from a [Consul](https://consu First we have to launch Consul in a container. -The [docker-compose file](https://docs.docker.com/compose/compose-file/) allows us to launch Consul and four instances of the trivial app [emilevauge/whoamI](https://github.com/emilevauge/whoamI) : +The [docker-compose file](https://docs.docker.com/compose/compose-file/) allows us to launch Consul and four instances of the trivial app [containous/whoami](https://github.com/containous/whoami) : ```yaml consul: @@ -42,16 +42,16 @@ consul: - "8302/udp" whoami1: - image: emilevauge/whoami + image: containous/whoami whoami2: - image: emilevauge/whoami + image: containous/whoami whoami3: - image: emilevauge/whoami + image: containous/whoami whoami4: - image: emilevauge/whoami + image: containous/whoami ``` ### Upload the configuration in the Key-value store diff --git a/docs/user-guide/swarm-mode.md b/docs/user-guide/swarm-mode.md index 9b23ac60e..46b753abf 100644 --- a/docs/user-guide/swarm-mode.md +++ b/docs/user-guide/swarm-mode.md @@ -107,7 +107,7 @@ Let's explain this command: ## Deploy your apps -We can now deploy our app on the cluster, here [whoami](https://github.com/emilevauge/whoami), a simple web server in Go. +We can now deploy our app on the cluster, here [whoami](https://github.com/containous/whoami), a simple web server in Go. We start 2 services, on the `traefik-net` network. ```shell @@ -115,14 +115,14 @@ docker-machine ssh manager "docker service create \ --name whoami0 \ --label traefik.port=80 \ --network traefik-net \ - emilevauge/whoami" + containous/whoami" docker-machine ssh manager "docker service create \ --name whoami1 \ --label traefik.port=80 \ --network traefik-net \ --label traefik.backend.loadbalancer.sticky=true \ - emilevauge/whoami" + containous/whoami" ``` !!! note @@ -140,8 +140,8 @@ docker-machine ssh manager "docker service ls" ``` ID NAME MODE REPLICAS IMAGE PORTS moq3dq4xqv6t traefik replicated 1/1 traefik:latest *:80->80/tcp,*:8080->8080/tcp -ysil6oto1wim whoami0 replicated 1/1 emilevauge/whoami:latest -z9re2mnl34k4 whoami1 replicated 1/1 emilevauge/whoami:latest +ysil6oto1wim whoami0 replicated 1/1 containous/whoami:latest +z9re2mnl34k4 whoami1 replicated 1/1 containous/whoami:latest ``` @@ -243,8 +243,8 @@ docker-machine ssh manager "docker service ls" ``` ID NAME MODE REPLICAS IMAGE PORTS moq3dq4xqv6t traefik replicated 1/1 traefik:latest *:80->80/tcp,*:8080->8080/tcp -ysil6oto1wim whoami0 replicated 5/5 emilevauge/whoami:latest -z9re2mnl34k4 whoami1 replicated 5/5 emilevauge/whoami:latest +ysil6oto1wim whoami0 replicated 5/5 containous/whoami:latest +z9re2mnl34k4 whoami1 replicated 5/5 containous/whoami:latest ``` ## Access to your `whoami0` through Traefik multiple times. diff --git a/docs/user-guide/swarm.md b/docs/user-guide/swarm.md index e08c6ff08..68f720415 100644 --- a/docs/user-guide/swarm.md +++ b/docs/user-guide/swarm.md @@ -112,12 +112,12 @@ Let's explain this command: ## Deploy your apps -We can now deploy our app on the cluster, here [whoami](https://github.com/emilevauge/whoami), a simple web server in GO, on the network `my-net`: +We can now deploy our app on the cluster, here [whoami](https://github.com/containous/whoami), a simple web server in GO, on the network `my-net`: ```shell eval $(docker-machine env --swarm mhs-demo0) -docker run -d --name=whoami0 --net=my-net --env="constraint:node==mhs-demo0" emilevauge/whoami -docker run -d --name=whoami1 --net=my-net --env="constraint:node==mhs-demo1" emilevauge/whoami +docker run -d --name=whoami0 --net=my-net --env="constraint:node==mhs-demo0" containous/whoami +docker run -d --name=whoami1 --net=my-net --env="constraint:node==mhs-demo1" containous/whoami ``` Check that everything is started: @@ -127,8 +127,8 @@ docker ps ``` ``` CONTAINER ID IMAGE COMMAND CREATED STATUS PORTS NAMES -ba2c21488299 emilevauge/whoami "/whoamI" 8 seconds ago Up 9 seconds 80/tcp mhs-demo1/whoami1 -8147a7746e7a emilevauge/whoami "/whoamI" 19 seconds ago Up 20 seconds 80/tcp mhs-demo0/whoami0 +ba2c21488299 containous/whoami "/whoamI" 8 seconds ago Up 9 seconds 80/tcp mhs-demo1/whoami1 +8147a7746e7a containous/whoami "/whoamI" 19 seconds ago Up 20 seconds 80/tcp mhs-demo0/whoami0 8fbc39271b4c traefik "/traefik -l DEBUG -c" 36 seconds ago Up 37 seconds 192.168.99.101:80->80/tcp, 192.168.99.101:8080->8080/tcp mhs-demo0/serene_bhabha ``` From 74dc5b1c5818175800e68340ba73b163de0c318a Mon Sep 17 00:00:00 2001 From: Ludovic Fernandez Date: Thu, 25 Oct 2018 17:38:04 +0200 Subject: [PATCH 18/29] Support custom DNS resolvers for Let's Encrypt. --- acme/acme.go | 3 +++ cmd/traefik/traefik.go | 1 + docs/configuration/acme.md | 21 +++++++++++++++++ provider/acme/provider.go | 48 ++++++++++++++++++++++++++++++++++---- types/dns_resolvers.go | 44 ++++++++++++++++++++++++++++++++++ 5 files changed, 113 insertions(+), 4 deletions(-) create mode 100644 types/dns_resolvers.go diff --git a/acme/acme.go b/acme/acme.go index 73f20b8ae..a740edacd 100644 --- a/acme/acme.go +++ b/acme/acme.go @@ -451,6 +451,9 @@ func (a *ACME) buildACMEClient(account *Account) (*acme.Client, error) { return nil, err } + acmeprovider.SetRecursiveNameServers(a.DNSChallenge.Resolvers) + acmeprovider.SetPropagationCheck(a.DNSChallenge.DisablePropagationCheck) + var provider acme.ChallengeProvider provider, err = dns.NewDNSChallengeProviderByName(a.DNSChallenge.Provider) if err != nil { diff --git a/cmd/traefik/traefik.go b/cmd/traefik/traefik.go index b2bbfc415..5f851af5f 100644 --- a/cmd/traefik/traefik.go +++ b/cmd/traefik/traefik.go @@ -71,6 +71,7 @@ Complete documentation is available at https://traefik.io`, f.AddParser(reflect.TypeOf(kubernetes.Namespaces{}), &kubernetes.Namespaces{}) f.AddParser(reflect.TypeOf(ecs.Clusters{}), &ecs.Clusters{}) f.AddParser(reflect.TypeOf([]types.Domain{}), &types.Domains{}) + f.AddParser(reflect.TypeOf(types.DNSResolvers{}), &types.DNSResolvers{}) f.AddParser(reflect.TypeOf(types.Buckets{}), &types.Buckets{}) f.AddParser(reflect.TypeOf(types.StatusCodes{}), &types.StatusCodes{}) f.AddParser(reflect.TypeOf(types.FieldNames{}), &types.FieldNames{}) diff --git a/docs/configuration/acme.md b/docs/configuration/acme.md index cd2d6bf78..b70c36936 100644 --- a/docs/configuration/acme.md +++ b/docs/configuration/acme.md @@ -142,6 +142,23 @@ entryPoint = "https" # # delayBeforeCheck = 0 + # Use following DNS servers to resolve the FQDN authority. + # + # Optional + # Default: empty + # + # resolvers = ["1.1.1.1:53", "8.8.8.8:53"] + + # Disable the DNS propagation checks before notifying ACME that the DNS challenge is ready. + # + # NOT RECOMMENDED: + # Increase the risk of reaching Let's Encrypt's rate limits. + # + # Optional + # Default: false + # + # disablePropagationCheck = true + # Domains list. # Only domains defined here can generate wildcard certificates. # The certificates for these domains are negotiated at traefik startup only. @@ -302,6 +319,10 @@ Here is a list of supported `provider`s, that can automate the DNS verification, | [VegaDNS](https://github.com/shupp/VegaDNS-API) | `vegadns` | `SECRET_VEGADNS_KEY`, `SECRET_VEGADNS_SECRET`, `VEGADNS_URL` | Not tested yet | | [VULTR](https://www.vultr.com) | `vultr` | `VULTR_API_KEY` | Not tested yet | +#### `resolvers` + +Use custom DNS servers to resolve the FQDN authority. + ### `domains` You can provide SANs (alternative domains) to each main domain. diff --git a/provider/acme/provider.go b/provider/acme/provider.go index 349ae2619..99fddeaa0 100644 --- a/provider/acme/provider.go +++ b/provider/acme/provider.go @@ -6,6 +6,7 @@ import ( "fmt" "io/ioutil" fmtlog "log" + "net" "net/url" "reflect" "strings" @@ -74,10 +75,12 @@ type Certificate struct { // DNSChallenge contains DNS challenge Configuration type DNSChallenge struct { - Provider string `description:"Use a DNS-01 based challenge provider rather than HTTPS."` - DelayBeforeCheck flaeg.Duration `description:"Assume DNS propagates after a delay in seconds rather than finding and querying nameservers."` - preCheckTimeout time.Duration - preCheckInterval time.Duration + Provider string `description:"Use a DNS-01 based challenge provider rather than HTTPS."` + DelayBeforeCheck flaeg.Duration `description:"Assume DNS propagates after a delay in seconds rather than finding and querying nameservers."` + Resolvers types.DNSResolvers `description:"Use following DNS servers to resolve the FQDN authority."` + DisablePropagationCheck bool `description:"Disable the DNS propagation checks before notifying ACME that the DNS challenge is ready. [not recommended]"` + preCheckTimeout time.Duration + preCheckInterval time.Duration } // HTTPChallenge contains HTTP challenge Configuration @@ -252,6 +255,9 @@ func (p *Provider) getClient() (*acme.Client, error) { if p.DNSChallenge != nil && len(p.DNSChallenge.Provider) > 0 { log.Debugf("Using DNS Challenge provider: %s", p.DNSChallenge.Provider) + SetRecursiveNameServers(p.DNSChallenge.Resolvers) + SetPropagationCheck(p.DNSChallenge.DisablePropagationCheck) + err = dnsOverrideDelay(p.DNSChallenge.DelayBeforeCheck) if err != nil { return nil, err @@ -784,3 +790,37 @@ func isDomainAlreadyChecked(domainToCheck string, existentDomains []string) bool } return false } + +// SetPropagationCheck to disable the Lego PreCheck. +func SetPropagationCheck(disable bool) { + if disable { + acme.PreCheckDNS = func(_, _ string) (bool, error) { + return true, nil + } + } +} + +// SetRecursiveNameServers to provide a custom DNS resolver. +func SetRecursiveNameServers(dnsResolvers []string) { + resolvers := normaliseDNSResolvers(dnsResolvers) + if len(resolvers) > 0 { + acme.RecursiveNameservers = resolvers + log.Infof("Validating FQDN authority with DNS using %+v", resolvers) + } +} + +// ensure all servers have a port number +func normaliseDNSResolvers(dnsResolvers []string) []string { + var normalisedResolvers []string + for _, server := range dnsResolvers { + srv := strings.TrimSpace(server) + if len(srv) > 0 { + if host, port, err := net.SplitHostPort(srv); err != nil { + normalisedResolvers = append(normalisedResolvers, net.JoinHostPort(srv, "53")) + } else { + normalisedResolvers = append(normalisedResolvers, net.JoinHostPort(host, port)) + } + } + } + return normalisedResolvers +} diff --git a/types/dns_resolvers.go b/types/dns_resolvers.go new file mode 100644 index 000000000..dd96f7895 --- /dev/null +++ b/types/dns_resolvers.go @@ -0,0 +1,44 @@ +package types + +import ( + "fmt" + "strings" +) + +// DNSResolvers is a list of DNSes that we will try to resolve the challenged FQDN against +type DNSResolvers []string + +// String is the method to format the flag's value, part of the flag.Value interface. +// The String method's output will be used in diagnostics. +func (r *DNSResolvers) String() string { + return strings.Join(*r, ",") +} + +// Set is the method to set the flag value, part of the flag.Value interface. +// Set's argument is a string to be parsed to set the flag. +// It's a comma-separated list, so we split it. +func (r *DNSResolvers) Set(value string) error { + entryPoints := strings.Split(value, ",") + if len(entryPoints) == 0 { + return fmt.Errorf("wrong DNSResolvers format: %s", value) + } + for _, entryPoint := range entryPoints { + *r = append(*r, entryPoint) + } + return nil +} + +// Get return the DNSResolvers list +func (r *DNSResolvers) Get() interface{} { + return *r +} + +// SetValue sets the DNSResolvers list +func (r *DNSResolvers) SetValue(val interface{}) { + *r = val.(DNSResolvers) +} + +// Type is type of the struct +func (r *DNSResolvers) Type() string { + return "dnsresolvers" +} From 55334b20628b492190705a32f2ee24f8724ef918 Mon Sep 17 00:00:00 2001 From: Brendan LE GLAUNEC Date: Thu, 25 Oct 2018 18:00:05 +0200 Subject: [PATCH 19/29] Fix display of client username field --- integration/access_log_test.go | 52 ++++++++++++++++++++-- middlewares/accesslog/logger.go | 14 +++--- middlewares/accesslog/logger_test.go | 2 +- middlewares/accesslog/save_username.go | 60 ++++++++++++++++++++++++++ middlewares/auth/authenticator.go | 12 ++++-- server/server_loadbalancer.go | 3 +- server/server_middlewares.go | 6 ++- 7 files changed, 130 insertions(+), 19 deletions(-) create mode 100644 middlewares/accesslog/save_username.go diff --git a/integration/access_log_test.go b/integration/access_log_test.go index 27da9adb8..46f4de648 100644 --- a/integration/access_log_test.go +++ b/integration/access_log_test.go @@ -49,7 +49,6 @@ func (s *AccessLogSuite) TearDownTest(c *check.C) { } func (s *AccessLogSuite) TestAccessLog(c *check.C) { - // Ensure working directory is clean ensureWorkingDirectoryIsClean() // Start Traefik @@ -94,7 +93,6 @@ func (s *AccessLogSuite) TestAccessLog(c *check.C) { } func (s *AccessLogSuite) TestAccessLogAuthFrontend(c *check.C) { - // Ensure working directory is clean ensureWorkingDirectoryIsClean() expected := []accessLogValue{ @@ -142,7 +140,6 @@ func (s *AccessLogSuite) TestAccessLogAuthFrontend(c *check.C) { } func (s *AccessLogSuite) TestAccessLogAuthEntrypoint(c *check.C) { - // Ensure working directory is clean ensureWorkingDirectoryIsClean() expected := []accessLogValue{ @@ -190,7 +187,6 @@ func (s *AccessLogSuite) TestAccessLogAuthEntrypoint(c *check.C) { } func (s *AccessLogSuite) TestAccessLogAuthEntrypointSuccess(c *check.C) { - // Ensure working directory is clean ensureWorkingDirectoryIsClean() expected := []accessLogValue{ @@ -642,6 +638,54 @@ func (s *AccessLogSuite) TestAccessLogFrontendWhitelist(c *check.C) { checkNoOtherTraefikProblems(c) } +func (s *AccessLogSuite) TestAccessLogAuthFrontendSuccess(c *check.C) { + ensureWorkingDirectoryIsClean() + + expected := []accessLogValue{ + { + formatOnly: false, + code: "200", + user: "test", + frontendName: "Host-frontend-auth-docker", + backendURL: "http://172.17.0", + }, + } + + // Start Traefik + cmd, display := s.traefikCmd(withConfigFile("fixtures/access_log_config.toml")) + defer display(c) + + err := cmd.Start() + c.Assert(err, checker.IsNil) + defer cmd.Process.Kill() + + checkStatsForLogFile(c) + + s.composeProject.Container(c, "authFrontend") + + waitForTraefik(c, "authFrontend") + + // Verify Traefik started OK + checkTraefikStarted(c) + + // Test auth entrypoint + req, err := http.NewRequest(http.MethodGet, "http://127.0.0.1:8006/", nil) + c.Assert(err, checker.IsNil) + req.Host = "frontend.auth.docker.local" + req.SetBasicAuth("test", "test") + + err = try.Request(req, 500*time.Millisecond, try.StatusCodeIs(http.StatusOK), try.HasBody()) + c.Assert(err, checker.IsNil) + + // Verify access.log output as expected + count := checkAccessLogExactValuesOutput(c, expected) + + c.Assert(count, checker.GreaterOrEqualThan, len(expected)) + + // Verify no other Traefik problems + checkNoOtherTraefikProblems(c) +} + func checkNoOtherTraefikProblems(c *check.C) { traefikLog, err := ioutil.ReadFile(traefikTestLogFile) c.Assert(err, checker.IsNil) diff --git a/middlewares/accesslog/logger.go b/middlewares/accesslog/logger.go index 166bb2bdf..c1a518cad 100644 --- a/middlewares/accesslog/logger.go +++ b/middlewares/accesslog/logger.go @@ -180,7 +180,7 @@ func (l *LogHandler) ServeHTTP(rw http.ResponseWriter, req *http.Request, next h next.ServeHTTP(crw, reqWithDataTable) - core[ClientUsername] = usernameIfPresent(reqWithDataTable.URL) + core[ClientUsername] = formatUsernameForLog(core[ClientUsername]) logDataTable.DownstreamResponse = crw.Header() @@ -231,14 +231,12 @@ func silentSplitHostPort(value string) (host string, port string) { return host, port } -func usernameIfPresent(theURL *url.URL) string { - username := "-" - if theURL.User != nil { - if name := theURL.User.Username(); name != "" { - username = name - } +func formatUsernameForLog(usernameField interface{}) string { + username, ok := usernameField.(string) + if ok && len(username) != 0 { + return username } - return username + return "-" } // Logging handler to log frontend name, backend name, and elapsed time diff --git a/middlewares/accesslog/logger_test.go b/middlewares/accesslog/logger_test.go index cbf1da5fc..e7c8af991 100644 --- a/middlewares/accesslog/logger_test.go +++ b/middlewares/accesslog/logger_test.go @@ -619,7 +619,6 @@ func doLogging(t *testing.T, config *types.AccessLog) { Method: testMethod, RemoteAddr: fmt.Sprintf("%s:%d", testHostname, testPort), URL: &url.URL{ - User: url.UserPassword(testUsername, ""), Path: testPath, }, } @@ -639,4 +638,5 @@ func logWriterTestHandlerFunc(rw http.ResponseWriter, r *http.Request) { logDataTable.Core[RetryAttempts] = testRetryAttempts logDataTable.Core[StartUTC] = testStart.UTC() logDataTable.Core[StartLocal] = testStart.Local() + logDataTable.Core[ClientUsername] = testUsername } diff --git a/middlewares/accesslog/save_username.go b/middlewares/accesslog/save_username.go new file mode 100644 index 000000000..6debf7795 --- /dev/null +++ b/middlewares/accesslog/save_username.go @@ -0,0 +1,60 @@ +package accesslog + +import ( + "context" + "net/http" + + "github.com/urfave/negroni" +) + +const ( + clientUsernameKey key = "ClientUsername" +) + +// SaveUsername sends the Username name to the access logger. +type SaveUsername struct { + next http.Handler +} + +// NewSaveUsername creates a SaveUsername handler. +func NewSaveUsername(next http.Handler) http.Handler { + return &SaveUsername{next} +} + +func (sf *SaveUsername) ServeHTTP(rw http.ResponseWriter, r *http.Request) { + serveSaveUsername(r, func() { + sf.next.ServeHTTP(rw, r) + }) +} + +// SaveNegroniUsername adds the Username to the access logger data table. +type SaveNegroniUsername struct { + next negroni.Handler +} + +// NewSaveNegroniUsername creates a SaveNegroniUsername handler. +func NewSaveNegroniUsername(next negroni.Handler) negroni.Handler { + return &SaveNegroniUsername{next} +} + +func (sf *SaveNegroniUsername) ServeHTTP(rw http.ResponseWriter, r *http.Request, next http.HandlerFunc) { + serveSaveUsername(r, func() { + sf.next.ServeHTTP(rw, r, next) + }) +} + +func serveSaveUsername(r *http.Request, apply func()) { + table := GetLogDataTable(r) + + username, ok := r.Context().Value(clientUsernameKey).(string) + if ok { + table.Core[ClientUsername] = username + } + + apply() +} + +// WithUserName adds a username to a requests' context +func WithUserName(req *http.Request, username string) *http.Request { + return req.WithContext(context.WithValue(req.Context(), clientUsernameKey, username)) +} diff --git a/middlewares/auth/authenticator.go b/middlewares/auth/authenticator.go index ea223049e..8e2491605 100644 --- a/middlewares/auth/authenticator.go +++ b/middlewares/auth/authenticator.go @@ -4,11 +4,11 @@ import ( "fmt" "io/ioutil" "net/http" - "net/url" "strings" goauth "github.com/abbot/go-http-auth" "github.com/containous/traefik/log" + "github.com/containous/traefik/middlewares/accesslog" "github.com/containous/traefik/middlewares/tracing" "github.com/containous/traefik/types" "github.com/urfave/negroni" @@ -86,7 +86,10 @@ func createAuthDigestHandler(digestAuth *goauth.DigestAuth, authConfig *types.Au digestAuth.RequireAuth(w, r) } else { log.Debugf("Digest auth succeeded") - r.URL.User = url.User(username) + + // set username in request context + r = accesslog.WithUserName(r, username) + if authConfig.HeaderField != "" { r.Header[authConfig.HeaderField] = []string{username} } @@ -105,7 +108,10 @@ func createAuthBasicHandler(basicAuth *goauth.BasicAuth, authConfig *types.Auth) basicAuth.RequireAuth(w, r) } else { log.Debugf("Basic auth succeeded") - r.URL.User = url.User(username) + + // set username in request context + r = accesslog.WithUserName(r, username) + if authConfig.HeaderField != "" { r.Header[authConfig.HeaderField] = []string{username} } diff --git a/server/server_loadbalancer.go b/server/server_loadbalancer.go index 7d7efc6de..f8e8e4726 100644 --- a/server/server_loadbalancer.go +++ b/server/server_loadbalancer.go @@ -115,7 +115,8 @@ func (s *Server) buildLoadBalancer(frontendName string, backendName string, back var saveFrontend http.Handler if s.accessLoggerMiddleware != nil { - saveBackend := accesslog.NewSaveBackend(fwd, backendName) + saveUsername := accesslog.NewSaveUsername(fwd) + saveBackend := accesslog.NewSaveBackend(saveUsername, backendName) saveFrontend = accesslog.NewSaveFrontend(saveBackend, frontendName) rr, _ = roundrobin.New(saveFrontend) } else { diff --git a/server/server_middlewares.go b/server/server_middlewares.go index ae62f993d..4edf25c3d 100644 --- a/server/server_middlewares.go +++ b/server/server_middlewares.go @@ -309,7 +309,8 @@ func buildIPWhiteLister(whiteList *types.WhiteList, wlRange []string) (*middlewa func (s *Server) wrapNegroniHandlerWithAccessLog(handler negroni.Handler, frontendName string) negroni.Handler { if s.accessLoggerMiddleware != nil { - saveBackend := accesslog.NewSaveNegroniBackend(handler, "Traefik") + saveUsername := accesslog.NewSaveNegroniUsername(handler) + saveBackend := accesslog.NewSaveNegroniBackend(saveUsername, "Traefik") saveFrontend := accesslog.NewSaveNegroniFrontend(saveBackend, frontendName) return saveFrontend } @@ -318,7 +319,8 @@ func (s *Server) wrapNegroniHandlerWithAccessLog(handler negroni.Handler, fronte func (s *Server) wrapHTTPHandlerWithAccessLog(handler http.Handler, frontendName string) http.Handler { if s.accessLoggerMiddleware != nil { - saveBackend := accesslog.NewSaveBackend(handler, "Traefik") + saveUsername := accesslog.NewSaveUsername(handler) + saveBackend := accesslog.NewSaveBackend(saveUsername, "Traefik") saveFrontend := accesslog.NewSaveFrontend(saveBackend, frontendName) return saveFrontend } From 5e49354bf2b46fe3000c0ea1b609f26934a7cb94 Mon Sep 17 00:00:00 2001 From: Yoan Blanc Date: Mon, 29 Oct 2018 14:20:03 +0100 Subject: [PATCH 20/29] acme: exoscale move from .ch to .com --- docs/configuration/acme.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docs/configuration/acme.md b/docs/configuration/acme.md index b70c36936..2ecb97470 100644 --- a/docs/configuration/acme.md +++ b/docs/configuration/acme.md @@ -290,7 +290,7 @@ Here is a list of supported `provider`s, that can automate the DNS verification, | [Duck DNS](https://www.duckdns.org/) | `duckdns` | `DUCKDNS_TOKEN` | No | | [Dyn](https://dyn.com) | `dyn` | `DYN_CUSTOMER_NAME`, `DYN_USER_NAME`, `DYN_PASSWORD` | Not tested yet | | External Program | `exec` | `EXEC_PATH` | Not tested yet | -| [Exoscale](https://www.exoscale.ch) | `exoscale` | `EXOSCALE_API_KEY`, `EXOSCALE_API_SECRET`, `EXOSCALE_ENDPOINT` | YES | +| [Exoscale](https://www.exoscale.com) | `exoscale` | `EXOSCALE_API_KEY`, `EXOSCALE_API_SECRET`, `EXOSCALE_ENDPOINT` | YES | | [Fast DNS](https://www.akamai.com/) | `fastdns` | `AKAMAI_CLIENT_TOKEN`, `AKAMAI_CLIENT_SECRET`, `AKAMAI_ACCESS_TOKEN` | Not tested yet | | [Gandi](https://www.gandi.net) | `gandi` | `GANDI_API_KEY` | Not tested yet | | [Gandi v5](http://doc.livedns.gandi.net) | `gandiv5` | `GANDIV5_API_KEY` | YES | From bc2cba5aa434b232821b16c66df3deb980382561 Mon Sep 17 00:00:00 2001 From: mwvdev Date: Mon, 29 Oct 2018 14:44:03 +0100 Subject: [PATCH 21/29] Removed unused imports --- webui/src/app/charts/bar-chart/bar-chart.component.ts | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/webui/src/app/charts/bar-chart/bar-chart.component.ts b/webui/src/app/charts/bar-chart/bar-chart.component.ts index cc77ddfec..fb695fefb 100644 --- a/webui/src/app/charts/bar-chart/bar-chart.component.ts +++ b/webui/src/app/charts/bar-chart/bar-chart.component.ts @@ -1,5 +1,5 @@ import { Component, ElementRef, Input, OnChanges, OnInit, SimpleChanges } from '@angular/core'; -import { axisBottom, axisLeft, easeLinear, max, min, scaleBand, scaleLinear, select } from 'd3'; +import { axisBottom, axisLeft, max, scaleBand, scaleLinear, select } from 'd3'; import { format } from 'd3-format'; import * as _ from 'lodash'; import { WindowService } from '../../services/window.service'; From 21c94141baded0237c01455ab3ff67e04c0719e9 Mon Sep 17 00:00:00 2001 From: Konovalov Nikolay Date: Mon, 29 Oct 2018 16:58:03 +0300 Subject: [PATCH 22/29] Update docs/configuration/acme.md --- docs/configuration/acme.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docs/configuration/acme.md b/docs/configuration/acme.md index 2ecb97470..1aaa51d05 100644 --- a/docs/configuration/acme.md +++ b/docs/configuration/acme.md @@ -370,7 +370,7 @@ It is not possible to request a double wildcard certificate for a domain (for ex Due to ACME limitation it is not possible to define wildcards in SANs (alternative domains). Thus, the wildcard domain has to be defined as a main domain. Most likely the root domain should receive a certificate too, so it needs to be specified as SAN and 2 `DNS-01` challenges are executed. In this case the generated DNS TXT record for both domains is the same. -Eventhough this behaviour is [DNS RFC](https://community.letsencrypt.org/t/wildcard-issuance-two-txt-records-for-the-same-name/54528/2) compliant, it can lead to problems as all DNS providers keep DNS records cached for a certain time (TTL) and this TTL can be superior to the challenge timeout making the `DNS-01` challenge fail. +Even though this behaviour is [DNS RFC](https://community.letsencrypt.org/t/wildcard-issuance-two-txt-records-for-the-same-name/54528/2) compliant, it can lead to problems as all DNS providers keep DNS records cached for a certain time (TTL) and this TTL can be superior to the challenge timeout making the `DNS-01` challenge fail. The Traefik ACME client library [LEGO](https://github.com/xenolf/lego) supports some but not all DNS providers to work around this issue. The [`provider` table](/configuration/acme/#provider) indicates if they allow generating certificates for a wildcard domain and its root domain. From 7eeecd23ac16e0cdf2513ce7db3d491a9615a926 Mon Sep 17 00:00:00 2001 From: SALLEYRON Julien Date: Mon, 29 Oct 2018 15:30:04 +0100 Subject: [PATCH 23/29] Provider docker shutdown problem --- provider/docker/docker.go | 23 ++++++++--------------- 1 file changed, 8 insertions(+), 15 deletions(-) diff --git a/provider/docker/docker.go b/provider/docker/docker.go index 1da8e0e05..fc7bb84a1 100644 --- a/provider/docker/docker.go +++ b/provider/docker/docker.go @@ -121,18 +121,17 @@ func (p *Provider) createClient() (client.APIClient, error) { // Provide allows the docker provider to provide configurations to traefik // using the given configuration channel. func (p *Provider) Provide(configurationChan chan<- types.ConfigMessage, pool *safe.Pool) error { - // TODO register this routine in pool, and watch for stop channel - safe.Go(func() { + pool.GoCtx(func(routineCtx context.Context) { operation := func() error { var err error - + ctx, cancel := context.WithCancel(routineCtx) + defer cancel() dockerClient, err := p.createClient() if err != nil { log.Errorf("Failed to create a client for docker, error: %s", err) return err } - ctx := context.Background() serverVersion, err := dockerClient.ServerVersion(ctx) if err != nil { log.Errorf("Failed to retrieve information of the docker client and server host: %s", err) @@ -160,12 +159,11 @@ func (p *Provider) Provide(configurationChan chan<- types.ConfigMessage, pool *s Configuration: configuration, } if p.Watch { - ctx, cancel := context.WithCancel(ctx) if p.SwarmMode { errChan := make(chan error) // TODO: This need to be change. Linked to Swarm events docker/docker#23827 ticker := time.NewTicker(SwarmDefaultWatchTime) - pool.Go(func(stop chan bool) { + pool.GoCtx(func(ctx context.Context) { defer close(errChan) for { select { @@ -184,9 +182,8 @@ func (p *Provider) Provide(configurationChan chan<- types.ConfigMessage, pool *s } } - case <-stop: + case <-ctx.Done(): ticker.Stop() - cancel() return } } @@ -197,10 +194,6 @@ func (p *Provider) Provide(configurationChan chan<- types.ConfigMessage, pool *s // channel closed } else { - pool.Go(func(stop chan bool) { - <-stop - cancel() - }) f := filters.NewArgs() f.Add("type", "container") options := dockertypes.EventsOptions{ @@ -213,7 +206,6 @@ func (p *Provider) Provide(configurationChan chan<- types.ConfigMessage, pool *s if err != nil { log.Errorf("Failed to list containers for docker, error %s", err) // Call cancel to get out of the monitor - cancel() return } configuration := p.buildConfiguration(containers) @@ -238,8 +230,9 @@ func (p *Provider) Provide(configurationChan chan<- types.ConfigMessage, pool *s if err == io.EOF { log.Debug("Provider event stream closed") } - return err + case <-ctx.Done(): + return nil } } } @@ -249,7 +242,7 @@ func (p *Provider) Provide(configurationChan chan<- types.ConfigMessage, pool *s notify := func(err error, time time.Duration) { log.Errorf("Provider connection error %+v, retrying in %s", err, time) } - err := backoff.RetryNotify(safe.OperationWithRecover(operation), job.NewBackOff(backoff.NewExponentialBackOff()), notify) + err := backoff.RetryNotify(safe.OperationWithRecover(operation), backoff.WithContext(job.NewBackOff(backoff.NewExponentialBackOff()), routineCtx), notify) if err != nil { log.Errorf("Cannot connect to docker server %+v", err) } From 450471d30a062ca21cc595ef6a72fcfa8751fc5a Mon Sep 17 00:00:00 2001 From: Jean-Baptiste Doumenjou Date: Mon, 29 Oct 2018 16:02:06 +0100 Subject: [PATCH 24/29] Add the missing pass-client-tls annotation to the kubernetes provider --- autogen/gentemplates/gen.go | 22 +++++ docs/configuration/backends/kubernetes.md | 86 ++++++++++++------- provider/kubernetes/annotations.go | 3 +- .../kubernetes/builder_configuration_test.go | 22 +++++ provider/kubernetes/kubernetes.go | 70 +++++++++------ provider/kubernetes/kubernetes_test.go | 23 +++-- templates/kubernetes.tmpl | 22 +++++ 7 files changed, 183 insertions(+), 65 deletions(-) diff --git a/autogen/gentemplates/gen.go b/autogen/gentemplates/gen.go index bc64f14f7..057cb3e52 100644 --- a/autogen/gentemplates/gen.go +++ b/autogen/gentemplates/gen.go @@ -1379,6 +1379,28 @@ var _templatesKubernetesTmpl = []byte(`[backends] {{end}} {{end}} + {{if $frontend.PassTLSClientCert }} + [frontends."{{ $frontendName }}".passTLSClientCert] + pem = {{ $frontend.PassTLSClientCert.PEM }} + {{ $infos := $frontend.PassTLSClientCert.Infos }} + {{if $infos }} + [frontends."{{ $frontendName }}".passTLSClientCert.infos] + notAfter = {{ $infos.NotAfter }} + notBefore = {{ $infos.NotBefore }} + sans = {{ $infos.Sans }} + {{ $subject := $infos.Subject }} + {{if $subject }} + [frontends."{{ $frontendName }}".passTLSClientCert.infos.subject] + country = {{ $subject.Country }} + province = {{ $subject.Province }} + locality = {{ $subject.Locality }} + organization = {{ $subject.Organization }} + commonName = {{ $subject.CommonName }} + serialNumber = {{ $subject.SerialNumber }} + {{end}} + {{end}} + {{end}} + {{if $frontend.Headers }} [frontends."{{ $frontendName }}".headers] SSLRedirect = {{ $frontend.Headers.SSLRedirect }} diff --git a/docs/configuration/backends/kubernetes.md b/docs/configuration/backends/kubernetes.md index 962cc3225..eb526993e 100644 --- a/docs/configuration/backends/kubernetes.md +++ b/docs/configuration/backends/kubernetes.md @@ -146,29 +146,35 @@ If either of those configuration options exist, then the backend communication p The following general annotations are applicable on the Ingress object: -| Annotation | Description | -|---------------------------------------------------------------------------------|-----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| -| `traefik.ingress.kubernetes.io/error-pages: ` | (1) See [custom error pages](/configuration/commons/#custom-error-pages) section. | -| `traefik.ingress.kubernetes.io/frontend-entry-points: http,https` | Override the default frontend endpoints. | -| `traefik.ingress.kubernetes.io/pass-tls-cert: "true"` | Override the default frontend PassTLSCert value. Default: `false`. | -| `traefik.ingress.kubernetes.io/preserve-host: "true"` | Forward client `Host` header to the backend. | -| `traefik.ingress.kubernetes.io/priority: "3"` | Override the default frontend rule priority. | -| `traefik.ingress.kubernetes.io/rate-limit: ` | (2) See [rate limiting](/configuration/commons/#rate-limiting) section. | -| `traefik.ingress.kubernetes.io/redirect-entry-point: https` | Enables Redirect to another entryPoint for that frontend (e.g. HTTPS). | -| `traefik.ingress.kubernetes.io/redirect-permanent: "true"` | Return 301 instead of 302. | -| `traefik.ingress.kubernetes.io/redirect-regex: ^http://localhost/(.*)` | Redirect to another URL for that frontend. Must be set with `traefik.ingress.kubernetes.io/redirect-replacement`. | -| `traefik.ingress.kubernetes.io/redirect-replacement: http://mydomain/$1` | Redirect to another URL for that frontend. Must be set with `traefik.ingress.kubernetes.io/redirect-regex`. | -| `traefik.ingress.kubernetes.io/rewrite-target: /users` | Replaces each matched Ingress path with the specified one, and adds the old path to the `X-Replaced-Path` header. | -| `traefik.ingress.kubernetes.io/rule-type: PathPrefixStrip` | Overrides the default frontend rule type. Only path-related matchers can be specified [(`Path`, `PathPrefix`, `PathStrip`, `PathPrefixStrip`)](/basics/#path-matcher-usage-guidelines). | -| `traefik.ingress.kubernetes.io/request-modifier: AddPrefix: /users` | Adds a [request modifier](/basics/#modifiers) to the backend request. | -| `traefik.ingress.kubernetes.io/whitelist-source-range: "1.2.3.0/24, fe80::/16"` | A comma-separated list of IP ranges permitted for access (6). | -| `ingress.kubernetes.io/whitelist-x-forwarded-for: "true"` | Use `X-Forwarded-For` header as valid source of IP for the white list. | -| `traefik.ingress.kubernetes.io/app-root: "/index.html"` | Redirects all requests for `/` to the defined path. (4) | -| `traefik.ingress.kubernetes.io/service-weights: ` | Set ingress backend weights specified as percentage or decimal numbers in YAML. (5) -| `ingress.kubernetes.io/protocol: ` | Set the protocol Traefik will use to communicate with pods. +| Annotation | Description | +|---------------------------------------------------------------------------------|--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| +| `traefik.ingress.kubernetes.io/app-root: "/index.html"` | Redirects all requests for `/` to the defined path. (1) | +| `traefik.ingress.kubernetes.io/error-pages: ` | See [custom error pages](/configuration/commons/#custom-error-pages) section. (2) | +| `traefik.ingress.kubernetes.io/frontend-entry-points: http,https` | Override the default frontend endpoints. | +| `traefik.ingress.kubernetes.io/pass-client-tls-cert: ` | Forward the client certificate following the configuration in YAML. (3) | +| `traefik.ingress.kubernetes.io/pass-tls-cert: "true"` | Override the default frontend PassTLSCert value. Default: `false`.(DEPRECATED) | +| `traefik.ingress.kubernetes.io/preserve-host: "true"` | Forward client `Host` header to the backend. | +| `traefik.ingress.kubernetes.io/priority: "3"` | Override the default frontend rule priority. | +| `traefik.ingress.kubernetes.io/rate-limit: ` | See [rate limiting](/configuration/commons/#rate-limiting) section. (4) | +| `traefik.ingress.kubernetes.io/redirect-entry-point: https` | Enables Redirect to another entryPoint for that frontend (e.g. HTTPS). | +| `traefik.ingress.kubernetes.io/redirect-permanent: "true"` | Return 301 instead of 302. | +| `traefik.ingress.kubernetes.io/redirect-regex: ^http://localhost/(.*)` | Redirect to another URL for that frontend. Must be set with `traefik.ingress.kubernetes.io/redirect-replacement`. | +| `traefik.ingress.kubernetes.io/redirect-replacement: http://mydomain/$1` | Redirect to another URL for that frontend. Must be set with `traefik.ingress.kubernetes.io/redirect-regex`. | +| `traefik.ingress.kubernetes.io/request-modifier: AddPrefix: /users` | Adds a [request modifier](/basics/#modifiers) to the backend request. | +| `traefik.ingress.kubernetes.io/rewrite-target: /users` | Replaces each matched Ingress path with the specified one, and adds the old path to the `X-Replaced-Path` header. | +| `traefik.ingress.kubernetes.io/rule-type: PathPrefixStrip` | Overrides the default frontend rule type. Only path-related matchers can be specified [(`Path`, `PathPrefix`, `PathStrip`, `PathPrefixStrip`)](/basics/#path-matcher-usage-guidelines).(5) | +| `traefik.ingress.kubernetes.io/service-weights: ` | Set ingress backend weights specified as percentage or decimal numbers in YAML. (6) | +| `traefik.ingress.kubernetes.io/whitelist-source-range: "1.2.3.0/24, fe80::/16"` | A comma-separated list of IP ranges permitted for access (7). | +| `ingress.kubernetes.io/whitelist-x-forwarded-for: "true"` | Use `X-Forwarded-For` header as valid source of IP for the white list. | +| `ingress.kubernetes.io/protocol: ` | Set the protocol Traefik will use to communicate with pods. | +<1> `traefik.ingress.kubernetes.io/app-root`: +Non-root paths will not be affected by this annotation and handled normally. +This annotation may not be combined with other redirect annotations. +Trying to do so will result in the other redirects being ignored. +This annotation can be used in combination with `traefik.ingress.kubernetes.io/redirect-permanent` to configure whether the `app-root` redirect is a 301 or a 302. -<1> `traefik.ingress.kubernetes.io/error-pages` example: +<2> `traefik.ingress.kubernetes.io/error-pages` example: ```yaml foo: @@ -184,7 +190,31 @@ fii: query: /bir ``` -<2> `traefik.ingress.kubernetes.io/rate-limit` example: +<3> `traefik.ingress.kubernetes.io/pass-client-tls-cert` example: + +```yaml +# add escaped pem in the `X-Forwarded-Tls-Client-Cert` header +pem: true +# add escaped certificate following infos in the `X-Forwarded-Tls-Client-Cert-Infos` header +infos: + notafter: true + notbefore: true + sans: true + subject: + country: true + province: true + locality: true + organization: true + commonname: true + serialnumber: true +``` + +If `pem` is set, it will add a `X-Forwarded-Tls-Client-Cert` header that contains the escaped pem as value. +If at least one flag of the `infos` part is set, it will add a `X-Forwarded-Tls-Client-Cert-Infos` header that contains an escaped string composed of the client certificate data selected by the infos flags. +This infos part is composed like the following example (not escaped): +```Subject="C=FR,ST=SomeState,L=Lyon,O=Cheese,CN=*.cheese.org",NB=1531900816,NA=1563436816,SAN=*.cheese.org,*.cheese.net,cheese.in,test@cheese.org,test@cheese.net,10.0.1.0,10.0.1.2``` + +<4> `traefik.ingress.kubernetes.io/rate-limit` example: ```yaml extractorfunc: client.ip @@ -199,16 +229,10 @@ rateset: burst: 18 ``` -<3> `traefik.ingress.kubernetes.io/rule-type` +<5> `traefik.ingress.kubernetes.io/rule-type` Note: `ReplacePath` is deprecated in this annotation, use the `traefik.ingress.kubernetes.io/request-modifier` annotation instead. Default: `PathPrefix`. -<4> `traefik.ingress.kubernetes.io/app-root`: -Non-root paths will not be affected by this annotation and handled normally. -This annotation may not be combined with other redirect annotations. -Trying to do so will result in the other redirects being ignored. -This annotation can be used in combination with `traefik.ingress.kubernetes.io/redirect-permanent` to configure whether the `app-root` redirect is a 301 or a 302. - -<5> `traefik.ingress.kubernetes.io/service-weights`: +<6> `traefik.ingress.kubernetes.io/service-weights`: Service weights enable to split traffic across multiple backing services in a fine-grained manner. Example: @@ -236,7 +260,7 @@ For each path definition, this annotation will fail if: See also the [user guide section traffic splitting](/user-guide/kubernetes/#traffic-splitting). -<6> `traefik.ingress.kubernetes.io/whitelist-source-range`: +<7> `traefik.ingress.kubernetes.io/whitelist-source-range`: All source IPs are permitted if the list is empty or a single range is ill-formatted. Please note, you may have to set `service.spec.externalTrafficPolicy` to the value `Local` to preserve the source IP of the request for filtering. Please see [this link](https://kubernetes.io/docs/tutorials/services/source-ip/) for more information. diff --git a/provider/kubernetes/annotations.go b/provider/kubernetes/annotations.go index 03805a4a7..c1b4abde3 100644 --- a/provider/kubernetes/annotations.go +++ b/provider/kubernetes/annotations.go @@ -22,7 +22,8 @@ const ( annotationKubernetesWhiteListSourceRange = "ingress.kubernetes.io/whitelist-source-range" annotationKubernetesWhiteListUseXForwardedFor = "ingress.kubernetes.io/whitelist-x-forwarded-for" annotationKubernetesPreserveHost = "ingress.kubernetes.io/preserve-host" - annotationKubernetesPassTLSCert = "ingress.kubernetes.io/pass-tls-cert" + annotationKubernetesPassTLSCert = "ingress.kubernetes.io/pass-tls-cert" // Deprecated + annotationKubernetesPassTLSClientCert = "ingress.kubernetes.io/pass-client-tls-cert" annotationKubernetesFrontendEntryPoints = "ingress.kubernetes.io/frontend-entry-points" annotationKubernetesPriority = "ingress.kubernetes.io/priority" annotationKubernetesCircuitBreakerExpression = "ingress.kubernetes.io/circuit-breaker-expression" diff --git a/provider/kubernetes/builder_configuration_test.go b/provider/kubernetes/builder_configuration_test.go index ccb82505c..7ec4a9f94 100644 --- a/provider/kubernetes/builder_configuration_test.go +++ b/provider/kubernetes/builder_configuration_test.go @@ -382,12 +382,34 @@ func limitPeriod(period time.Duration) func(*types.Rate) { } } +// Deprecated func passTLSCert() func(*types.Frontend) { return func(f *types.Frontend) { f.PassTLSCert = true } } +func passTLSClientCert() func(*types.Frontend) { + return func(f *types.Frontend) { + f.PassTLSClientCert = &types.TLSClientHeaders{ + PEM: true, + Infos: &types.TLSClientCertificateInfos{ + NotAfter: true, + NotBefore: true, + Subject: &types.TLSCLientCertificateSubjectInfos{ + Country: true, + Province: true, + Locality: true, + Organization: true, + CommonName: true, + SerialNumber: true, + }, + Sans: true, + }, + } + } +} + func routes(opts ...func(*types.Route) string) func(*types.Frontend) { return func(f *types.Frontend) { f.Routes = make(map[string]types.Route) diff --git a/provider/kubernetes/kubernetes.go b/provider/kubernetes/kubernetes.go index 94f8d825d..0aa07d85e 100644 --- a/provider/kubernetes/kubernetes.go +++ b/provider/kubernetes/kubernetes.go @@ -62,7 +62,7 @@ type Provider struct { Token string `description:"Kubernetes bearer token (not needed for in-cluster client)"` CertAuthFilePath string `description:"Kubernetes certificate authority file path (not needed for in-cluster client)"` DisablePassHostHeaders bool `description:"Kubernetes disable PassHost Headers" export:"true"` - EnablePassTLSCert bool `description:"Kubernetes enable Pass TLS Client Certs" export:"true"` + EnablePassTLSCert bool `description:"Kubernetes enable Pass TLS Client Certs" export:"true"` // Deprecated Namespaces Namespaces `description:"Kubernetes namespaces" export:"true"` LabelSelector string `description:"Kubernetes Ingress label selector to use" export:"true"` IngressClass string `description:"Value of kubernetes.io/ingress.class annotation to watch for" export:"true"` @@ -275,22 +275,23 @@ func (p *Provider) loadIngresses(k8sClient Client) (*types.Configuration, error) } passHostHeader := getBoolValue(i.Annotations, annotationKubernetesPreserveHost, !p.DisablePassHostHeaders) - passTLSCert := getBoolValue(i.Annotations, annotationKubernetesPassTLSCert, p.EnablePassTLSCert) + passTLSCert := getBoolValue(i.Annotations, annotationKubernetesPassTLSCert, p.EnablePassTLSCert) // Deprecated entryPoints := getSliceStringValue(i.Annotations, annotationKubernetesFrontendEntryPoints) frontend = &types.Frontend{ - Backend: baseName, - PassHostHeader: passHostHeader, - PassTLSCert: passTLSCert, - Routes: make(map[string]types.Route), - Priority: priority, - WhiteList: getWhiteList(i), - Redirect: getFrontendRedirect(i, baseName, pa.Path), - EntryPoints: entryPoints, - Headers: getHeader(i), - Errors: getErrorPages(i), - RateLimit: getRateLimit(i), - Auth: auth, + Backend: baseName, + PassHostHeader: passHostHeader, + PassTLSCert: passTLSCert, + PassTLSClientCert: getPassTLSClientCert(i), + Routes: make(map[string]types.Route), + Priority: priority, + WhiteList: getWhiteList(i), + Redirect: getFrontendRedirect(i, baseName, pa.Path), + EntryPoints: entryPoints, + Headers: getHeader(i), + Errors: getErrorPages(i), + RateLimit: getRateLimit(i), + Auth: auth, } } @@ -532,22 +533,23 @@ func (p *Provider) addGlobalBackend(cl Client, i *extensionsv1beta1.Ingress, tem } passHostHeader := getBoolValue(i.Annotations, annotationKubernetesPreserveHost, !p.DisablePassHostHeaders) - passTLSCert := getBoolValue(i.Annotations, annotationKubernetesPassTLSCert, p.EnablePassTLSCert) + passTLSCert := getBoolValue(i.Annotations, annotationKubernetesPassTLSCert, p.EnablePassTLSCert) // Deprecated priority := getIntValue(i.Annotations, annotationKubernetesPriority, 0) entryPoints := getSliceStringValue(i.Annotations, annotationKubernetesFrontendEntryPoints) templateObjects.Frontends[defaultFrontendName] = &types.Frontend{ - Backend: defaultBackendName, - PassHostHeader: passHostHeader, - PassTLSCert: passTLSCert, - Routes: make(map[string]types.Route), - Priority: priority, - WhiteList: getWhiteList(i), - Redirect: getFrontendRedirect(i, defaultFrontendName, "/"), - EntryPoints: entryPoints, - Headers: getHeader(i), - Errors: getErrorPages(i), - RateLimit: getRateLimit(i), + Backend: defaultBackendName, + PassHostHeader: passHostHeader, + PassTLSCert: passTLSCert, + PassTLSClientCert: getPassTLSClientCert(i), + Routes: make(map[string]types.Route), + Priority: priority, + WhiteList: getWhiteList(i), + Redirect: getFrontendRedirect(i, defaultFrontendName, "/"), + EntryPoints: entryPoints, + Headers: getHeader(i), + Errors: getErrorPages(i), + RateLimit: getRateLimit(i), } templateObjects.Frontends[defaultFrontendName].Routes["/"] = types.Route{ @@ -1084,6 +1086,22 @@ func getRateLimit(i *extensionsv1beta1.Ingress) *types.RateLimit { return rateLimit } +func getPassTLSClientCert(i *extensionsv1beta1.Ingress) *types.TLSClientHeaders { + var passTLSClientCert *types.TLSClientHeaders + + passRaw := getStringValue(i.Annotations, annotationKubernetesPassTLSClientCert, "") + if len(passRaw) > 0 { + passTLSClientCert = &types.TLSClientHeaders{} + err := yaml.Unmarshal([]byte(passRaw), passTLSClientCert) + if err != nil { + log.Error(err) + return nil + } + } + + return passTLSClientCert +} + func templateSafeString(value string) error { _, err := strconv.Unquote(`"` + value + `"`) return err diff --git a/provider/kubernetes/kubernetes_test.go b/provider/kubernetes/kubernetes_test.go index 4c04ce750..d1aba1ad7 100644 --- a/provider/kubernetes/kubernetes_test.go +++ b/provider/kubernetes/kubernetes_test.go @@ -728,6 +728,7 @@ func TestGetPassHostHeader(t *testing.T) { assert.Equal(t, expected, actual) } +// Deprecated func TestGetPassTLSCert(t *testing.T) { ingresses := []*extensionsv1beta1.Ingress{ buildIngress(iNamespace("awesome"), @@ -1102,6 +1103,20 @@ func TestIngressAnnotations(t *testing.T) { buildIngress( iNamespace("testing"), iAnnotation(annotationKubernetesPassTLSCert, "true"), + iAnnotation(annotationKubernetesPassTLSClientCert, ` +pem: true +infos: + notafter: true + notbefore: true + sans: true + subject: + country: true + province: true + locality: true + organization: true + commonname: true + serialnumber: true +`), iAnnotation(annotationKubernetesIngressClass, traefikDefaultRealm), iRules( iRule( @@ -1500,13 +1515,7 @@ rateset: ), frontend("other/sslstuff", passHostHeader(), - passTLSCert(), - routes( - route("/sslstuff", "PathPrefix:/sslstuff"), - route("other", "Host:other")), - ), - frontend("other/sslstuff", - passHostHeader(), + passTLSClientCert(), passTLSCert(), routes( route("/sslstuff", "PathPrefix:/sslstuff"), diff --git a/templates/kubernetes.tmpl b/templates/kubernetes.tmpl index 57e5e6344..c522ffae5 100644 --- a/templates/kubernetes.tmpl +++ b/templates/kubernetes.tmpl @@ -129,6 +129,28 @@ {{end}} {{end}} + {{if $frontend.PassTLSClientCert }} + [frontends."{{ $frontendName }}".passTLSClientCert] + pem = {{ $frontend.PassTLSClientCert.PEM }} + {{ $infos := $frontend.PassTLSClientCert.Infos }} + {{if $infos }} + [frontends."{{ $frontendName }}".passTLSClientCert.infos] + notAfter = {{ $infos.NotAfter }} + notBefore = {{ $infos.NotBefore }} + sans = {{ $infos.Sans }} + {{ $subject := $infos.Subject }} + {{if $subject }} + [frontends."{{ $frontendName }}".passTLSClientCert.infos.subject] + country = {{ $subject.Country }} + province = {{ $subject.Province }} + locality = {{ $subject.Locality }} + organization = {{ $subject.Organization }} + commonName = {{ $subject.CommonName }} + serialNumber = {{ $subject.SerialNumber }} + {{end}} + {{end}} + {{end}} + {{if $frontend.Headers }} [frontends."{{ $frontendName }}".headers] SSLRedirect = {{ $frontend.Headers.SSLRedirect }} From 993caf505885e5bb985cda88c04bcc77d89ece59 Mon Sep 17 00:00:00 2001 From: Brendan LE GLAUNEC Date: Mon, 29 Oct 2018 16:24:04 +0100 Subject: [PATCH 25/29] Fix access log field parsing --- types/logs.go | 10 ++++++++++ types/logs_test.go | 8 ++++++++ 2 files changed, 18 insertions(+) diff --git a/types/logs.go b/types/logs.go index 374d3f700..118de9c5f 100644 --- a/types/logs.go +++ b/types/logs.go @@ -88,6 +88,11 @@ func (f *FieldNames) Get() interface{} { // Set's argument is a string to be parsed to set the flag. // It's a space-separated list, so we split it. func (f *FieldNames) Set(value string) error { + // When arguments are passed through YAML, escaped double quotes + // might be added to this string, and they would break the last + // key/value pair. This ensures the string is clean. + value = strings.Trim(value, "\"") + fields := strings.Fields(value) for _, field := range fields { @@ -123,6 +128,11 @@ func (f *FieldHeaderNames) Get() interface{} { // Set's argument is a string to be parsed to set the flag. // It's a space-separated list, so we split it. func (f *FieldHeaderNames) Set(value string) error { + // When arguments are passed through YAML, escaped double quotes + // might be added to this string, and they would break the last + // key/value pair. This ensures the string is clean. + value = strings.Trim(value, "\"") + fields := strings.Fields(value) for _, field := range fields { diff --git a/types/logs_test.go b/types/logs_test.go index 332158ed2..0b1bf8ebc 100644 --- a/types/logs_test.go +++ b/types/logs_test.go @@ -301,6 +301,14 @@ func TestFieldsHeadersNamesSet(t *testing.T) { "X-HEADER-2": "bar", }, }, + { + desc: "Two values separated by space with escaped double quotes should return FieldNames of size 2", + value: "\"X-HEADER-1=foo X-HEADER-2=bar\"", + expected: &FieldHeaderNames{ + "X-HEADER-1": "foo", + "X-HEADER-2": "bar", + }, + }, } for _, test := range testCases { From c6dd1dccc33922dfc7f9022204881ad6bf005d6e Mon Sep 17 00:00:00 2001 From: Manuel Zapf Date: Mon, 29 Oct 2018 16:48:06 +0100 Subject: [PATCH 26/29] add static redirect --- server/server_configuration.go | 24 +++++------------------- server/server_configuration_test.go | 17 ++++++----------- server/server_middlewares.go | 8 ++++++++ server/server_middlewares_test.go | 3 +-- server/server_test.go | 5 +---- 5 files changed, 21 insertions(+), 36 deletions(-) diff --git a/server/server_configuration.go b/server/server_configuration.go index 2646505f8..73cc07eab 100644 --- a/server/server_configuration.go +++ b/server/server_configuration.go @@ -42,13 +42,7 @@ func (s *Server) loadConfiguration(configMsg types.ConfigMessage) { s.metricsRegistry.ConfigReloadsCounter().Add(1) - newServerEntryPoints, err := s.loadConfig(newConfigurations, s.globalConfiguration) - if err != nil { - s.metricsRegistry.ConfigReloadsFailureCounter().Add(1) - s.metricsRegistry.LastConfigReloadFailureGauge().Set(float64(time.Now().Unix())) - log.Error("Error loading new configuration, aborted ", err) - return - } + newServerEntryPoints := s.loadConfig(newConfigurations, s.globalConfiguration) s.metricsRegistry.LastConfigReloadSuccessGauge().Set(float64(time.Now().Unix())) @@ -77,11 +71,7 @@ func (s *Server) loadConfiguration(configMsg types.ConfigMessage) { // loadConfig returns a new gorilla.mux Route from the specified global configuration and the dynamic // provider configurations. -func (s *Server) loadConfig(configurations types.Configurations, globalConfiguration configuration.GlobalConfiguration) (map[string]*serverEntryPoint, error) { - redirectHandlers, err := s.buildEntryPointRedirect() - if err != nil { - return nil, err - } +func (s *Server) loadConfig(configurations types.Configurations, globalConfiguration configuration.GlobalConfiguration) map[string]*serverEntryPoint { serverEntryPoints := s.buildServerEntryPoints() @@ -95,7 +85,7 @@ func (s *Server) loadConfig(configurations types.Configurations, globalConfigura for _, frontendName := range frontendNames { frontendPostConfigs, err := s.loadFrontendConfig(providerName, frontendName, config, - redirectHandlers, serverEntryPoints, + serverEntryPoints, backendsHandlers, backendsHealthCheck) if err != nil { log.Errorf("%v. Skipping frontend %s...", err, frontendName) @@ -128,12 +118,12 @@ func (s *Server) loadConfig(configurations types.Configurations, globalConfigura } } - return serverEntryPoints, err + return serverEntryPoints } func (s *Server) loadFrontendConfig( providerName string, frontendName string, config *types.Configuration, - redirectHandlers map[string]negroni.Handler, serverEntryPoints map[string]*serverEntryPoint, + serverEntryPoints map[string]*serverEntryPoint, backendsHandlers map[string]http.Handler, backendsHealthCheck map[string]*healthcheck.BackendConfig, ) ([]handlerPostConfig, error) { @@ -194,10 +184,6 @@ func (s *Server) loadFrontendConfig( n := negroni.New() - if _, exist := redirectHandlers[entryPointName]; exist { - n.Use(redirectHandlers[entryPointName]) - } - for _, handler := range handlers { n.Use(handler) } diff --git a/server/server_configuration_test.go b/server/server_configuration_test.go index fa945c830..5fbad854e 100644 --- a/server/server_configuration_test.go +++ b/server/server_configuration_test.go @@ -135,8 +135,7 @@ func TestServerLoadConfigHealthCheckOptions(t *testing.T) { srv := NewServer(globalConfig, nil, entryPoints) - _, err := srv.loadConfig(dynamicConfigs, globalConfig) - require.NoError(t, err) + _ = srv.loadConfig(dynamicConfigs, globalConfig) expectedNumHealthCheckBackends := 0 if healthCheck != nil { @@ -187,8 +186,7 @@ func TestServerLoadConfigEmptyBasicAuth(t *testing.T) { } srv := NewServer(globalConfig, nil, entryPoints) - _, err := srv.loadConfig(dynamicConfigs, globalConfig) - require.NoError(t, err) + _ = srv.loadConfig(dynamicConfigs, globalConfig) } func TestServerLoadCertificateWithDefaultEntryPoint(t *testing.T) { @@ -214,9 +212,9 @@ func TestServerLoadCertificateWithDefaultEntryPoint(t *testing.T) { } srv := NewServer(globalConfig, nil, entryPoints) - if mapEntryPoints, err := srv.loadConfig(dynamicConfigs, globalConfig); err != nil { - t.Fatalf("got error: %s", err) - } else if !mapEntryPoints["https"].certs.ContainsCertificates() { + + mapEntryPoints := srv.loadConfig(dynamicConfigs, globalConfig) + if !mapEntryPoints["https"].certs.ContainsCertificates() { t.Fatal("got error: https entryPoint must have TLS certificates.") } } @@ -259,10 +257,7 @@ func TestReuseBackend(t *testing.T) { srv := NewServer(globalConfig, nil, entryPoints) - serverEntryPoints, err := srv.loadConfig(dynamicConfigs, globalConfig) - if err != nil { - t.Fatalf("error loading config: %s", err) - } + serverEntryPoints := srv.loadConfig(dynamicConfigs, globalConfig) // Test that the /ok path returns a status 200. responseRecorderOk := &httptest.ResponseRecorder{} diff --git a/server/server_middlewares.go b/server/server_middlewares.go index 4edf25c3d..9a2922b34 100644 --- a/server/server_middlewares.go +++ b/server/server_middlewares.go @@ -163,6 +163,14 @@ func (s *Server) buildServerEntryPointMiddlewares(serverEntryPointName string, s } } + if s.entryPoints[serverEntryPointName].Configuration.Redirect != nil { + redirectHandlers, err := s.buildEntryPointRedirect() + if err != nil { + return nil, fmt.Errorf("failed to create redirect middleware: %v", err) + } + serverMiddlewares = append(serverMiddlewares, redirectHandlers[serverEntryPointName]) + } + if s.entryPoints[serverEntryPointName].Configuration.Auth != nil { authMiddleware, err := mauth.NewAuthenticator(s.entryPoints[serverEntryPointName].Configuration.Auth, s.tracingMiddleware) if err != nil { diff --git a/server/server_middlewares_test.go b/server/server_middlewares_test.go index 06b6a3311..d8552b7b0 100644 --- a/server/server_middlewares_test.go +++ b/server/server_middlewares_test.go @@ -285,6 +285,5 @@ func TestServerGenericFrontendAuthFail(t *testing.T) { srv := NewServer(globalConfig, nil, nil) - _, err := srv.loadConfig(dynamicConfigs, globalConfig) - require.NoError(t, err) + _ = srv.loadConfig(dynamicConfigs, globalConfig) } diff --git a/server/server_test.go b/server/server_test.go index f1572002f..50fa497eb 100644 --- a/server/server_test.go +++ b/server/server_test.go @@ -333,10 +333,7 @@ func TestServerResponseEmptyBackend(t *testing.T) { dynamicConfigs := types.Configurations{"config": test.config(testServer.URL)} srv := NewServer(globalConfig, nil, entryPointsConfig) - entryPoints, err := srv.loadConfig(dynamicConfigs, globalConfig) - if err != nil { - t.Fatalf("error loading config: %s", err) - } + entryPoints := srv.loadConfig(dynamicConfigs, globalConfig) responseRecorder := &httptest.ResponseRecorder{} request := httptest.NewRequest(http.MethodGet, testServer.URL+requestPath, nil) From e6e9a869194020aac344731459c5ac0ce1b99b32 Mon Sep 17 00:00:00 2001 From: SALLEYRON Julien Date: Mon, 29 Oct 2018 18:42:03 +0100 Subject: [PATCH 27/29] Add flush interval option on backend --- autogen/.placeholder | 0 autogen/gentemplates/gen.go | 49 +++++++++++++ docs/configuration/backends/consulcatalog.md | 1 + docs/configuration/backends/docker.md | 5 +- docs/configuration/backends/ecs.md | 1 + docs/configuration/backends/file.md | 3 + docs/configuration/backends/kubernetes.md | 1 + docs/configuration/backends/marathon.md | 1 + docs/configuration/backends/mesos.md | 1 + docs/configuration/backends/rancher.md | 5 +- .../fixtures/grpc/config_with_flush.toml | 31 ++++++++ integration/grpc_test.go | 61 ++++++++++++++++ provider/consulcatalog/config.go | 1 + provider/consulcatalog/config_test.go | 4 + provider/docker/config.go | 15 ++-- .../docker/config_container_docker_test.go | 4 + .../docker/config_container_swarm_test.go | 4 + provider/ecs/config.go | 18 +++-- provider/ecs/config_test.go | 4 + provider/kubernetes/annotations.go | 73 ++++++++++--------- .../kubernetes/builder_configuration_test.go | 7 ++ provider/kubernetes/kubernetes.go | 13 ++++ provider/kubernetes/kubernetes_test.go | 32 ++++++++ provider/kv/keynames.go | 1 + provider/kv/kv_config.go | 15 ++++ provider/label/names.go | 2 + provider/label/partial.go | 13 ++++ provider/marathon/config.go | 15 ++-- provider/marathon/config_test.go | 4 + provider/mesos/config.go | 19 ++--- provider/mesos/config_test.go | 4 + provider/rancher/config.go | 13 ++-- provider/rancher/config_test.go | 4 + server/server_configuration.go | 14 +++- templates/consul_catalog.tmpl | 8 ++ templates/docker.tmpl | 6 ++ templates/ecs.tmpl | 6 ++ templates/kubernetes.tmpl | 5 ++ templates/kv.tmpl | 6 ++ templates/marathon.tmpl | 6 ++ templates/mesos.tmpl | 6 ++ templates/rancher.tmpl | 6 ++ types/types.go | 18 +++-- 43 files changed, 420 insertions(+), 85 deletions(-) delete mode 100644 autogen/.placeholder create mode 100644 integration/fixtures/grpc/config_with_flush.toml diff --git a/autogen/.placeholder b/autogen/.placeholder deleted file mode 100644 index e69de29bb..000000000 diff --git a/autogen/gentemplates/gen.go b/autogen/gentemplates/gen.go index 057cb3e52..6346a4be9 100644 --- a/autogen/gentemplates/gen.go +++ b/autogen/gentemplates/gen.go @@ -143,6 +143,14 @@ var _templatesConsul_catalogTmpl = []byte(`[backends] expression = "{{ $circuitBreaker.Expression }}" {{end}} + {{ $responseForwarding := getResponseForwarding $service.TraefikLabels }} + {{if $responseForwarding }} + [backends."backend-{{ $backendName }}".responseForwarding] + flushInterval = "{{ $responseForwarding.FlushInterval }}" + {{end}} + + + {{ $loadBalancer := getLoadBalancer $service.TraefikLabels }} {{if $loadBalancer }} [backends."backend-{{ $backendName }}".loadBalancer] @@ -620,6 +628,12 @@ var _templatesDockerTmpl = []byte(`{{$backendServers := .Servers}} expression = "{{ $circuitBreaker.Expression }}" {{end}} + {{ $responseForwarding := getResponseForwarding $backend.SegmentLabels }} + {{if $responseForwarding }} + [backends."backend-{{ $backendName }}".responseForwarding] + flushInterval = "{{ $responseForwarding.FlushInterval }}" + {{end}} + {{ $loadBalancer := getLoadBalancer $backend.SegmentLabels }} {{if $loadBalancer }} [backends."backend-{{ $backendName }}".loadBalancer] @@ -948,6 +962,12 @@ var _templatesEcsTmpl = []byte(`[backends] expression = "{{ $circuitBreaker.Expression }}" {{end}} + {{ $responseForwarding := getResponseForwarding $firstInstance.SegmentLabels }} + {{if $responseForwarding }} + [backends."backend-{{ $serviceName }}".responseForwarding] + flushInterval = "{{ $responseForwarding.FlushInterval }}" + {{end}} + {{ $loadBalancer := getLoadBalancer $firstInstance.SegmentLabels }} {{if $loadBalancer }} [backends."backend-{{ $serviceName }}".loadBalancer] @@ -1258,6 +1278,11 @@ var _templatesKubernetesTmpl = []byte(`[backends] expression = "{{ $backend.CircuitBreaker.Expression }}" {{end}} + {{if $backend.ResponseForwarding }} + [backends."{{ $backendName }}".responseForwarding] + flushInterval = "{{ $backend.responseForwarding.FlushInterval }}" + {{end}} + [backends."{{ $backendName }}".loadBalancer] method = "{{ $backend.LoadBalancer.Method }}" sticky = {{ $backend.LoadBalancer.Sticky }} @@ -1492,6 +1517,12 @@ var _templatesKvTmpl = []byte(`[backends] [backends."{{ $backendName }}".circuitBreaker] expression = "{{ $circuitBreaker.Expression }}" {{end}} + + {{ $responseForwarding := getResponseForwarding $backend }} + {{if $responseForwarding }} + [backends."{{ $backendName }}".responseForwarding] + flushInterval = "{{ $responseForwarding.flushInterval }}" + {{end}} {{ $loadBalancer := getLoadBalancer $backend }} {{if $loadBalancer }} @@ -1862,6 +1893,12 @@ var _templatesMarathonTmpl = []byte(`{{ $apps := .Applications }} [backends."{{ $backendName }}".circuitBreaker] expression = "{{ $circuitBreaker.Expression }}" {{end}} + + {{ $responseForwarding := getResponseForwarding $app.SegmentLabels }} + {{if $responseForwarding }} + [backends."{{ $backendName }}".responseForwarding] + flushInterval = "{{ $responseForwarding.FlushInterval }}" + {{end}} {{ $loadBalancer := getLoadBalancer $app.SegmentLabels }} {{if $loadBalancer }} @@ -2177,6 +2214,12 @@ var _templatesMesosTmpl = []byte(`[backends] expression = "{{ $circuitBreaker.Expression }}" {{end}} + {{ $responseForwarding := getResponseForwarding $app.TraefikLabels }} + {{if $responseForwarding }} + [backends."backend-{{ $backendName }}".responseForwarding] + flushInterval = "{{ $responseForwarding.FlushInterval }}" + {{end}} + {{ $loadBalancer := getLoadBalancer $app.TraefikLabels }} {{if $loadBalancer }} [backends."backend-{{ $backendName }}".loadBalancer] @@ -2545,6 +2588,12 @@ var _templatesRancherTmpl = []byte(`{{ $backendServers := .Backends }} expression = "{{ $circuitBreaker.Expression }}" {{end}} + {{ $responseForwarding := getResponseForwarding $backend.SegmentLabels }} + {{if $responseForwarding }} + [backends."backend-{{ $backendName }}".responseForwarding] + flushInterval = "{{ $responseForwarding.FlushInterval }}" + {{end}} + {{ $loadBalancer := getLoadBalancer $backend.SegmentLabels }} {{if $loadBalancer }} [backends."backend-{{ $backendName }}".loadBalancer] diff --git a/docs/configuration/backends/consulcatalog.md b/docs/configuration/backends/consulcatalog.md index d161eadfc..7dc9248cb 100644 --- a/docs/configuration/backends/consulcatalog.md +++ b/docs/configuration/backends/consulcatalog.md @@ -105,6 +105,7 @@ Additional settings can be defined using Consul Catalog tags. | `traefik.backend.buffering.memResponseBodyBytes=0` | See [buffering](/configuration/commons/#buffering) section. | | `traefik.backend.buffering.retryExpression=EXPR` | See [buffering](/configuration/commons/#buffering) section. | | `.backend.circuitbreaker.expression=EXPR` | Creates a [circuit breaker](/basics/#backends) to be used against the backend. ex: `NetworkErrorRatio() > 0.` | +| `.backend.responseForwarding.flushInterval=10ms` | Defines the interval between two flushes when forwarding response from backend to client. | | `.backend.healthcheck.path=/health` | Enables health check for the backend, hitting the container at `path`. | | `.backend.healthcheck.interval=1s` | Defines the health check interval. | | `.backend.healthcheck.port=8080` | Sets a different port for the health check. | diff --git a/docs/configuration/backends/docker.md b/docs/configuration/backends/docker.md index 3515660b7..e8d133ac2 100644 --- a/docs/configuration/backends/docker.md +++ b/docs/configuration/backends/docker.md @@ -213,9 +213,9 @@ Labels can be used on containers to override default behavior. |---------------------------------------------------------------------|----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| | `traefik.docker.network` | Overrides the default docker network to use for connections to the container. [1] | | `traefik.domain` | Sets the default base domain for the frontend rules. For more information, check the [Container Labels section's of the user guide "Let's Encrypt & Docker"](/user-guide/docker-and-lets-encrypt/#container-labels) | -| `traefik.enable=false` | Disables this container in Traefik. | +| `traefik.enable=false` | Disables this container in Traefik. | | `traefik.port=80` | Registers this port. Useful when the container exposes multiples ports. | -| `traefik.tags=foo,bar,myTag` | Adds Traefik tags to the Docker container/service to be used in [constraints](/configuration/commons/#constraints). | +| `traefik.tags=foo,bar,myTag` | Adds Traefik tags to the Docker container/service to be used in [constraints](/configuration/commons/#constraints). | | `traefik.protocol=https` | Overrides the default `http` protocol | | `traefik.weight=10` | Assigns this weight to the container | | `traefik.backend=foo` | Gives the name `foo` to the generated backend for this container. | @@ -225,6 +225,7 @@ Labels can be used on containers to override default behavior. | `traefik.backend.buffering.memResponseBodyBytes=0` | See [buffering](/configuration/commons/#buffering) section. | | `traefik.backend.buffering.retryExpression=EXPR` | See [buffering](/configuration/commons/#buffering) section. | | `traefik.backend.circuitbreaker.expression=EXPR` | Creates a [circuit breaker](/basics/#backends) to be used against the backend | +| `traefik.backend.responseForwarding.flushInterval=10ms` | Defines the interval between two flushes when forwarding response from backend to client. | | `traefik.backend.healthcheck.path=/health` | Enables health check for the backend, hitting the container at `path`. | | `traefik.backend.healthcheck.interval=1s` | Defines the health check interval. | | `traefik.backend.healthcheck.port=8080` | Sets a different port for the health check. | diff --git a/docs/configuration/backends/ecs.md b/docs/configuration/backends/ecs.md index 74effb56b..b4d84319c 100644 --- a/docs/configuration/backends/ecs.md +++ b/docs/configuration/backends/ecs.md @@ -150,6 +150,7 @@ Labels can be used on task containers to override default behaviour: | `traefik.backend.buffering.memResponseBodyBytes=0` | See [buffering](/configuration/commons/#buffering) section. | | `traefik.backend.buffering.retryExpression=EXPR` | See [buffering](/configuration/commons/#buffering) section. | | `traefik.backend.circuitbreaker.expression=EXPR` | Creates a [circuit breaker](/basics/#backends) to be used against the backend | +| `traefik.backend.responseForwarding.flushInterval=10ms` | Defines the interval between two flushes when forwarding response from backend to client. | | `traefik.backend.healthcheck.path=/health` | Enables health check for the backend, hitting the container at `path`. | | `traefik.backend.healthcheck.interval=1s` | Defines the health check interval. (Default: 30s) | | `traefik.backend.healthcheck.scheme=http` | Overrides the server URL scheme. | diff --git a/docs/configuration/backends/file.md b/docs/configuration/backends/file.md index 50c0fe496..bd24d9b72 100644 --- a/docs/configuration/backends/file.md +++ b/docs/configuration/backends/file.md @@ -23,6 +23,9 @@ Traefik can be configured with a file. [backends.backend1.circuitBreaker] expression = "NetworkErrorRatio() > 0.5" + + [backends.backend1.responseForwarding] + flushInterval = "10ms" [backends.backend1.loadBalancer] method = "drr" diff --git a/docs/configuration/backends/kubernetes.md b/docs/configuration/backends/kubernetes.md index eb526993e..c38af2bbf 100644 --- a/docs/configuration/backends/kubernetes.md +++ b/docs/configuration/backends/kubernetes.md @@ -277,6 +277,7 @@ The following annotations are applicable on the Service object associated with a | `traefik.backend.loadbalancer.sticky: "true"` | Enable backend sticky sessions (DEPRECATED). | | `traefik.ingress.kubernetes.io/affinity: "true"` | Enable backend sticky sessions. | | `traefik.ingress.kubernetes.io/circuit-breaker-expression: ` | Set the circuit breaker expression for the backend. | +| `traefik.ingress.kubernetes.io/responseforwarding-flushinterval: "10ms` | Defines the interval between two flushes when forwarding response from backend to client. | | `traefik.ingress.kubernetes.io/load-balancer-method: drr` | Override the default `wrr` load balancer algorithm. | | `traefik.ingress.kubernetes.io/max-conn-amount: "10"` | Sets the maximum number of simultaneous connections to the backend.
Must be used in conjunction with the label below to take effect. | | `traefik.ingress.kubernetes.io/max-conn-extractor-func: client.ip` | Set the function to be used against the request to determine what to limit maximum connections to the backend by.
Must be used in conjunction with the above label to take effect. | diff --git a/docs/configuration/backends/marathon.md b/docs/configuration/backends/marathon.md index 4b7f41397..ef4b20b99 100644 --- a/docs/configuration/backends/marathon.md +++ b/docs/configuration/backends/marathon.md @@ -208,6 +208,7 @@ The following labels can be defined on Marathon applications. They adjust the be | `traefik.backend.buffering.memResponseBodyBytes=0` | See [buffering](/configuration/commons/#buffering) section. | | `traefik.backend.buffering.retryExpression=EXPR` | See [buffering](/configuration/commons/#buffering) section. | | `traefik.backend.circuitbreaker.expression=EXPR` | Creates a [circuit breaker](/basics/#backends) to be used against the backend | +| `traefik.backend.responseForwarding.flushInterval=10ms` | Defines the interval between two flushes when forwarding response from backend to client. | | `traefik.backend.healthcheck.path=/health` | Enables health check for the backend, hitting the container at `path`. | | `traefik.backend.healthcheck.interval=1s` | Defines the health check interval. (Default: 30s) | | `traefik.backend.healthcheck.port=8080` | Sets a different port for the health check. | diff --git a/docs/configuration/backends/mesos.md b/docs/configuration/backends/mesos.md index 2c2628390..13568bdc8 100644 --- a/docs/configuration/backends/mesos.md +++ b/docs/configuration/backends/mesos.md @@ -122,6 +122,7 @@ The following labels can be defined on Mesos tasks. They adjust the behavior for | `traefik.backend.buffering.memResponseBodyBytes=0` | See [buffering](/configuration/commons/#buffering) section. | | `traefik.backend.buffering.retryExpression=EXPR` | See [buffering](/configuration/commons/#buffering) section. | | `traefik.backend.circuitbreaker.expression=EXPR` | Creates a [circuit breaker](/basics/#backends) to be used against the backend | +| `traefik.backend.responseForwarding.flushInterval=10ms` | Defines the interval between two flushes when forwarding response from backend to client. | | `traefik.backend.healthcheck.path=/health` | Enables health check for the backend, hitting the container at `path`. | | `traefik.backend.healthcheck.interval=1s` | Defines the health check interval. (Default: 30s) | | `traefik.backend.healthcheck.scheme=http` | Overrides the server URL scheme. | diff --git a/docs/configuration/backends/rancher.md b/docs/configuration/backends/rancher.md index f91c4d9b9..9e96c2c2f 100644 --- a/docs/configuration/backends/rancher.md +++ b/docs/configuration/backends/rancher.md @@ -140,8 +140,8 @@ Labels can be used on task containers to override default behavior: | Label | Description | |---------------------------------------------------------------------|----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| -| `traefik.domain` | Sets the default base domain for the frontend rules. | -| `traefik.enable=false` | Disables this container in Traefik. | +| `traefik.domain` | Sets the default base domain for the frontend rules. | +| `traefik.enable=false` | Disables this container in Traefik. | | `traefik.port=80` | Registers this port. Useful when the container exposes multiple ports. | | `traefik.protocol=https` | Overrides the default `http` protocol. | | `traefik.weight=10` | Assigns this weight to the container. | @@ -152,6 +152,7 @@ Labels can be used on task containers to override default behavior: | `traefik.backend.buffering.memResponseBodyBytes=0` | See [buffering](/configuration/commons/#buffering) section. | | `traefik.backend.buffering.retryExpression=EXPR` | See [buffering](/configuration/commons/#buffering) section. | | `traefik.backend.circuitbreaker.expression=EXPR` | Creates a [circuit breaker](/basics/#backends) to be used against the backend | +| `traefik.backend.responseForwarding.flushInterval=10ms` | Defines the interval between two flushes when forwarding response from backend to client. | | `traefik.backend.healthcheck.path=/health` | Enables health check for the backend, hitting the container at `path`. | | `traefik.backend.healthcheck.interval=1s` | Defines the health check interval. | | `traefik.backend.healthcheck.port=8080` | Sets a different port for the health check. | diff --git a/integration/fixtures/grpc/config_with_flush.toml b/integration/fixtures/grpc/config_with_flush.toml new file mode 100644 index 000000000..9615e3dd2 --- /dev/null +++ b/integration/fixtures/grpc/config_with_flush.toml @@ -0,0 +1,31 @@ +defaultEntryPoints = ["https"] + +rootCAs = [ """{{ .CertContent }}""" ] + +[entryPoints] + [entryPoints.https] + address = ":4443" + [entryPoints.https.tls] + [[entryPoints.https.tls.certificates]] + certFile = """{{ .CertContent }}""" + keyFile = """{{ .KeyContent }}""" + + +[api] + +[file] + +[backends] + [backends.backend1] + [backends.backend1.responseForwarding] + flushInterval="1ms" + [backends.backend1.servers.server1] + url = "https://127.0.0.1:{{ .GRPCServerPort }}" + weight = 1 + + +[frontends] + [frontends.frontend1] + backend = "backend1" + [frontends.frontend1.routes.test_1] + rule = "Host:127.0.0.1" diff --git a/integration/grpc_test.go b/integration/grpc_test.go index 8ec9813a0..d26f8a502 100644 --- a/integration/grpc_test.go +++ b/integration/grpc_test.go @@ -356,3 +356,64 @@ func (s *GRPCSuite) TestGRPCBuffer(c *check.C) { }) c.Assert(err, check.IsNil) } + +func (s *GRPCSuite) TestGRPCBufferWithFlushInterval(c *check.C) { + stopStreamExample := make(chan bool) + defer func() { stopStreamExample <- true }() + lis, err := net.Listen("tcp", ":0") + c.Assert(err, check.IsNil) + _, port, err := net.SplitHostPort(lis.Addr().String()) + c.Assert(err, check.IsNil) + + go func() { + err := startGRPCServer(lis, &myserver{ + stopStreamExample: stopStreamExample, + }) + c.Log(err) + c.Assert(err, check.IsNil) + }() + + file := s.adaptFile(c, "fixtures/grpc/config_with_flush.toml", struct { + CertContent string + KeyContent string + GRPCServerPort string + }{ + CertContent: string(LocalhostCert), + KeyContent: string(LocalhostKey), + GRPCServerPort: port, + }) + + defer os.Remove(file) + cmd, display := s.traefikCmd(withConfigFile(file)) + defer display(c) + + err = cmd.Start() + c.Assert(err, check.IsNil) + defer cmd.Process.Kill() + + // wait for Traefik + err = try.GetRequest("http://127.0.0.1:8080/api/providers", 1*time.Second, try.BodyContains("Host:127.0.0.1")) + c.Assert(err, check.IsNil) + var client helloworld.Greeter_StreamExampleClient + client, closer, err := callStreamExampleClientGRPC() + defer closer() + c.Assert(err, check.IsNil) + + received := make(chan bool) + go func() { + tr, err := client.Recv() + c.Assert(err, check.IsNil) + c.Assert(len(tr.Data), check.Equals, 512) + received <- true + }() + + err = try.Do(time.Millisecond*100, func() error { + select { + case <-received: + return nil + default: + return errors.New("failed to receive stream data") + } + }) + c.Assert(err, check.IsNil) +} diff --git a/provider/consulcatalog/config.go b/provider/consulcatalog/config.go index 22cc99651..460b4246f 100644 --- a/provider/consulcatalog/config.go +++ b/provider/consulcatalog/config.go @@ -34,6 +34,7 @@ func (p *Provider) buildConfigurationV2(catalog []catalogUpdate) *types.Configur "getMaxConn": label.GetMaxConn, "getHealthCheck": label.GetHealthCheck, "getBuffering": label.GetBuffering, + "getResponseForwarding": label.GetResponseForwarding, "getServer": p.getServer, // Frontend functions diff --git a/provider/consulcatalog/config_test.go b/provider/consulcatalog/config_test.go index 80d1c5fa3..e3017b982 100644 --- a/provider/consulcatalog/config_test.go +++ b/provider/consulcatalog/config_test.go @@ -405,6 +405,7 @@ func TestProviderBuildConfiguration(t *testing.T) { label.TraefikBackend + "=foobar", label.TraefikBackendCircuitBreakerExpression + "=NetworkErrorRatio() > 0.5", + label.TraefikBackendResponseForwardingFlushInterval + "=10ms", label.TraefikBackendHealthCheckPath + "=/health", label.TraefikBackendHealthCheckScheme + "=http", label.TraefikBackendHealthCheckPort + "=880", @@ -673,6 +674,9 @@ func TestProviderBuildConfiguration(t *testing.T) { CircuitBreaker: &types.CircuitBreaker{ Expression: "NetworkErrorRatio() > 0.5", }, + ResponseForwarding: &types.ResponseForwarding{ + FlushInterval: "10ms", + }, LoadBalancer: &types.LoadBalancer{ Method: "drr", Sticky: true, diff --git a/provider/docker/config.go b/provider/docker/config.go index 576a848c1..a77d19181 100644 --- a/provider/docker/config.go +++ b/provider/docker/config.go @@ -33,13 +33,14 @@ func (p *Provider) buildConfigurationV2(containersInspected []dockerData) *types "getDomain": label.GetFuncString(label.TraefikDomain, p.Domain), // Backend functions - "getIPAddress": p.getDeprecatedIPAddress, // TODO: Should we expose getIPPort instead? - "getServers": p.getServers, - "getMaxConn": label.GetMaxConn, - "getHealthCheck": label.GetHealthCheck, - "getBuffering": label.GetBuffering, - "getCircuitBreaker": label.GetCircuitBreaker, - "getLoadBalancer": label.GetLoadBalancer, + "getIPAddress": p.getDeprecatedIPAddress, // TODO: Should we expose getIPPort instead? + "getServers": p.getServers, + "getMaxConn": label.GetMaxConn, + "getHealthCheck": label.GetHealthCheck, + "getBuffering": label.GetBuffering, + "getResponseForwarding": label.GetResponseForwarding, + "getCircuitBreaker": label.GetCircuitBreaker, + "getLoadBalancer": label.GetLoadBalancer, // Frontend functions "getBackendName": getBackendName, diff --git a/provider/docker/config_container_docker_test.go b/provider/docker/config_container_docker_test.go index ea01e7b71..4fd8596ed 100644 --- a/provider/docker/config_container_docker_test.go +++ b/provider/docker/config_container_docker_test.go @@ -434,6 +434,7 @@ func TestDockerBuildConfiguration(t *testing.T) { label.TraefikBackend: "foobar", label.TraefikBackendCircuitBreakerExpression: "NetworkErrorRatio() > 0.5", + label.TraefikBackendResponseForwardingFlushInterval: "10ms", label.TraefikBackendHealthCheckScheme: "http", label.TraefikBackendHealthCheckPath: "/health", label.TraefikBackendHealthCheckPort: "880", @@ -666,6 +667,9 @@ func TestDockerBuildConfiguration(t *testing.T) { CircuitBreaker: &types.CircuitBreaker{ Expression: "NetworkErrorRatio() > 0.5", }, + ResponseForwarding: &types.ResponseForwarding{ + FlushInterval: "10ms", + }, LoadBalancer: &types.LoadBalancer{ Method: "drr", Sticky: true, diff --git a/provider/docker/config_container_swarm_test.go b/provider/docker/config_container_swarm_test.go index 29e862757..e0e868a2e 100644 --- a/provider/docker/config_container_swarm_test.go +++ b/provider/docker/config_container_swarm_test.go @@ -383,6 +383,7 @@ func TestSwarmBuildConfiguration(t *testing.T) { label.TraefikBackend: "foobar", label.TraefikBackendCircuitBreakerExpression: "NetworkErrorRatio() > 0.5", + label.TraefikBackendResponseForwardingFlushInterval: "10ms", label.TraefikBackendHealthCheckScheme: "http", label.TraefikBackendHealthCheckPath: "/health", label.TraefikBackendHealthCheckPort: "880", @@ -584,6 +585,9 @@ func TestSwarmBuildConfiguration(t *testing.T) { CircuitBreaker: &types.CircuitBreaker{ Expression: "NetworkErrorRatio() > 0.5", }, + ResponseForwarding: &types.ResponseForwarding{ + FlushInterval: "10ms", + }, LoadBalancer: &types.LoadBalancer{ Method: "drr", Sticky: true, diff --git a/provider/ecs/config.go b/provider/ecs/config.go index ccc25e8bf..0183f297b 100644 --- a/provider/ecs/config.go +++ b/provider/ecs/config.go @@ -21,14 +21,16 @@ import ( func (p *Provider) buildConfigurationV2(instances []ecsInstance) (*types.Configuration, error) { var ecsFuncMap = template.FuncMap{ // Backend functions - "getHost": getHost, - "getPort": getPort, - "getCircuitBreaker": label.GetCircuitBreaker, - "getLoadBalancer": label.GetLoadBalancer, - "getMaxConn": label.GetMaxConn, - "getHealthCheck": label.GetHealthCheck, - "getBuffering": label.GetBuffering, - "getServers": getServers, + "getHost": getHost, + "getPort": getPort, + "getCircuitBreaker": label.GetCircuitBreaker, + "getLoadBalancer": label.GetLoadBalancer, + "getMaxConn": label.GetMaxConn, + "getHealthCheck": label.GetHealthCheck, + "getBuffering": label.GetBuffering, + "getResponseForwarding": label.GetResponseForwarding, + + "getServers": getServers, // Frontend functions "filterFrontends": filterFrontends, diff --git a/provider/ecs/config_test.go b/provider/ecs/config_test.go index 0a4a169c8..bdb9a3749 100644 --- a/provider/ecs/config_test.go +++ b/provider/ecs/config_test.go @@ -342,6 +342,7 @@ func TestBuildConfiguration(t *testing.T) { label.TraefikBackend: aws.String("foobar"), label.TraefikBackendCircuitBreakerExpression: aws.String("NetworkErrorRatio() > 0.5"), + label.TraefikBackendResponseForwardingFlushInterval: aws.String("10ms"), label.TraefikBackendHealthCheckScheme: aws.String("http"), label.TraefikBackendHealthCheckPath: aws.String("/health"), label.TraefikBackendHealthCheckPort: aws.String("880"), @@ -458,6 +459,9 @@ func TestBuildConfiguration(t *testing.T) { CircuitBreaker: &types.CircuitBreaker{ Expression: "NetworkErrorRatio() > 0.5", }, + ResponseForwarding: &types.ResponseForwarding{ + FlushInterval: "10ms", + }, LoadBalancer: &types.LoadBalancer{ Method: "drr", Sticky: true, diff --git a/provider/kubernetes/annotations.go b/provider/kubernetes/annotations.go index c1b4abde3..042b5708e 100644 --- a/provider/kubernetes/annotations.go +++ b/provider/kubernetes/annotations.go @@ -7,42 +7,43 @@ import ( ) const ( - annotationKubernetesIngressClass = "kubernetes.io/ingress.class" - annotationKubernetesAuthRealm = "ingress.kubernetes.io/auth-realm" - annotationKubernetesAuthType = "ingress.kubernetes.io/auth-type" - annotationKubernetesAuthSecret = "ingress.kubernetes.io/auth-secret" - annotationKubernetesAuthHeaderField = "ingress.kubernetes.io/auth-header-field" - annotationKubernetesAuthForwardResponseHeaders = "ingress.kubernetes.io/auth-response-headers" - annotationKubernetesAuthRemoveHeader = "ingress.kubernetes.io/auth-remove-header" - annotationKubernetesAuthForwardURL = "ingress.kubernetes.io/auth-url" - annotationKubernetesAuthForwardTrustHeaders = "ingress.kubernetes.io/auth-trust-headers" - annotationKubernetesAuthForwardTLSSecret = "ingress.kubernetes.io/auth-tls-secret" - annotationKubernetesAuthForwardTLSInsecure = "ingress.kubernetes.io/auth-tls-insecure" - annotationKubernetesRewriteTarget = "ingress.kubernetes.io/rewrite-target" - annotationKubernetesWhiteListSourceRange = "ingress.kubernetes.io/whitelist-source-range" - annotationKubernetesWhiteListUseXForwardedFor = "ingress.kubernetes.io/whitelist-x-forwarded-for" - annotationKubernetesPreserveHost = "ingress.kubernetes.io/preserve-host" - annotationKubernetesPassTLSCert = "ingress.kubernetes.io/pass-tls-cert" // Deprecated - annotationKubernetesPassTLSClientCert = "ingress.kubernetes.io/pass-client-tls-cert" - annotationKubernetesFrontendEntryPoints = "ingress.kubernetes.io/frontend-entry-points" - annotationKubernetesPriority = "ingress.kubernetes.io/priority" - annotationKubernetesCircuitBreakerExpression = "ingress.kubernetes.io/circuit-breaker-expression" - annotationKubernetesLoadBalancerMethod = "ingress.kubernetes.io/load-balancer-method" - annotationKubernetesAffinity = "ingress.kubernetes.io/affinity" - annotationKubernetesSessionCookieName = "ingress.kubernetes.io/session-cookie-name" - annotationKubernetesRuleType = "ingress.kubernetes.io/rule-type" - annotationKubernetesRedirectEntryPoint = "ingress.kubernetes.io/redirect-entry-point" - annotationKubernetesRedirectPermanent = "ingress.kubernetes.io/redirect-permanent" - annotationKubernetesRedirectRegex = "ingress.kubernetes.io/redirect-regex" - annotationKubernetesRedirectReplacement = "ingress.kubernetes.io/redirect-replacement" - annotationKubernetesMaxConnAmount = "ingress.kubernetes.io/max-conn-amount" - annotationKubernetesMaxConnExtractorFunc = "ingress.kubernetes.io/max-conn-extractor-func" - annotationKubernetesRateLimit = "ingress.kubernetes.io/rate-limit" - annotationKubernetesErrorPages = "ingress.kubernetes.io/error-pages" - annotationKubernetesBuffering = "ingress.kubernetes.io/buffering" - annotationKubernetesAppRoot = "ingress.kubernetes.io/app-root" - annotationKubernetesServiceWeights = "ingress.kubernetes.io/service-weights" - annotationKubernetesRequestModifier = "ingress.kubernetes.io/request-modifier" + annotationKubernetesIngressClass = "kubernetes.io/ingress.class" + annotationKubernetesAuthRealm = "ingress.kubernetes.io/auth-realm" + annotationKubernetesAuthType = "ingress.kubernetes.io/auth-type" + annotationKubernetesAuthSecret = "ingress.kubernetes.io/auth-secret" + annotationKubernetesAuthHeaderField = "ingress.kubernetes.io/auth-header-field" + annotationKubernetesAuthForwardResponseHeaders = "ingress.kubernetes.io/auth-response-headers" + annotationKubernetesAuthRemoveHeader = "ingress.kubernetes.io/auth-remove-header" + annotationKubernetesAuthForwardURL = "ingress.kubernetes.io/auth-url" + annotationKubernetesAuthForwardTrustHeaders = "ingress.kubernetes.io/auth-trust-headers" + annotationKubernetesAuthForwardTLSSecret = "ingress.kubernetes.io/auth-tls-secret" + annotationKubernetesAuthForwardTLSInsecure = "ingress.kubernetes.io/auth-tls-insecure" + annotationKubernetesRewriteTarget = "ingress.kubernetes.io/rewrite-target" + annotationKubernetesWhiteListSourceRange = "ingress.kubernetes.io/whitelist-source-range" + annotationKubernetesWhiteListUseXForwardedFor = "ingress.kubernetes.io/whitelist-x-forwarded-for" + annotationKubernetesPreserveHost = "ingress.kubernetes.io/preserve-host" + annotationKubernetesPassTLSCert = "ingress.kubernetes.io/pass-tls-cert" // Deprecated + annotationKubernetesPassTLSClientCert = "ingress.kubernetes.io/pass-client-tls-cert" + annotationKubernetesFrontendEntryPoints = "ingress.kubernetes.io/frontend-entry-points" + annotationKubernetesPriority = "ingress.kubernetes.io/priority" + annotationKubernetesCircuitBreakerExpression = "ingress.kubernetes.io/circuit-breaker-expression" + annotationKubernetesLoadBalancerMethod = "ingress.kubernetes.io/load-balancer-method" + annotationKubernetesAffinity = "ingress.kubernetes.io/affinity" + annotationKubernetesSessionCookieName = "ingress.kubernetes.io/session-cookie-name" + annotationKubernetesRuleType = "ingress.kubernetes.io/rule-type" + annotationKubernetesRedirectEntryPoint = "ingress.kubernetes.io/redirect-entry-point" + annotationKubernetesRedirectPermanent = "ingress.kubernetes.io/redirect-permanent" + annotationKubernetesRedirectRegex = "ingress.kubernetes.io/redirect-regex" + annotationKubernetesRedirectReplacement = "ingress.kubernetes.io/redirect-replacement" + annotationKubernetesMaxConnAmount = "ingress.kubernetes.io/max-conn-amount" + annotationKubernetesMaxConnExtractorFunc = "ingress.kubernetes.io/max-conn-extractor-func" + annotationKubernetesRateLimit = "ingress.kubernetes.io/rate-limit" + annotationKubernetesErrorPages = "ingress.kubernetes.io/error-pages" + annotationKubernetesBuffering = "ingress.kubernetes.io/buffering" + annotationKubernetesResponseForwardingFlushInterval = "ingress.kubernetes.io/responseforwarding-flushinterval" + annotationKubernetesAppRoot = "ingress.kubernetes.io/app-root" + annotationKubernetesServiceWeights = "ingress.kubernetes.io/service-weights" + annotationKubernetesRequestModifier = "ingress.kubernetes.io/request-modifier" annotationKubernetesSSLForceHost = "ingress.kubernetes.io/ssl-force-host" annotationKubernetesSSLRedirect = "ingress.kubernetes.io/ssl-redirect" diff --git a/provider/kubernetes/builder_configuration_test.go b/provider/kubernetes/builder_configuration_test.go index 7ec4a9f94..c350bf663 100644 --- a/provider/kubernetes/builder_configuration_test.go +++ b/provider/kubernetes/builder_configuration_test.go @@ -93,6 +93,13 @@ func circuitBreaker(exp string) func(*types.Backend) { } } +func responseForwarding(interval string) func(*types.Backend) { + return func(b *types.Backend) { + b.ResponseForwarding = &types.ResponseForwarding{} + b.ResponseForwarding.FlushInterval = interval + } +} + func buffering(opts ...func(*types.Buffering)) func(*types.Backend) { return func(b *types.Backend) { if b.Buffering == nil { diff --git a/provider/kubernetes/kubernetes.go b/provider/kubernetes/kubernetes.go index 0aa07d85e..9d761c60c 100644 --- a/provider/kubernetes/kubernetes.go +++ b/provider/kubernetes/kubernetes.go @@ -337,6 +337,7 @@ func (p *Provider) loadIngresses(k8sClient Client) (*types.Configuration, error) templateObjects.Backends[baseName].LoadBalancer = getLoadBalancer(service) templateObjects.Backends[baseName].MaxConn = getMaxConn(service) templateObjects.Backends[baseName].Buffering = getBuffering(service) + templateObjects.Backends[baseName].ResponseForwarding = getResponseForwarding(service) protocol := label.DefaultProtocol @@ -494,6 +495,7 @@ func (p *Provider) addGlobalBackend(cl Client, i *extensionsv1beta1.Ingress, tem templateObjects.Backends[defaultBackendName].LoadBalancer = getLoadBalancer(service) templateObjects.Backends[defaultBackendName].MaxConn = getMaxConn(service) templateObjects.Backends[defaultBackendName].Buffering = getBuffering(service) + templateObjects.Backends[defaultBackendName].ResponseForwarding = getResponseForwarding(service) endpoints, exists, err := cl.GetEndpoints(service.Namespace, service.Name) if err != nil { @@ -951,6 +953,17 @@ func getWhiteList(i *extensionsv1beta1.Ingress) *types.WhiteList { } } +func getResponseForwarding(service *corev1.Service) *types.ResponseForwarding { + flushIntervalValue := getStringValue(service.Annotations, annotationKubernetesResponseForwardingFlushInterval, "") + if len(flushIntervalValue) == 0 { + return nil + } + + return &types.ResponseForwarding{ + FlushInterval: flushIntervalValue, + } +} + func getBuffering(service *corev1.Service) *types.Buffering { var buffering *types.Buffering diff --git a/provider/kubernetes/kubernetes_test.go b/provider/kubernetes/kubernetes_test.go index d1aba1ad7..e45b0d77b 100644 --- a/provider/kubernetes/kubernetes_test.go +++ b/provider/kubernetes/kubernetes_test.go @@ -908,6 +908,9 @@ func TestServiceAnnotations(t *testing.T) { iRule( iHost("max-conn"), iPaths(onePath(iBackend("service4", intstr.FromInt(804))))), + iRule( + iHost("flush"), + iPaths(onePath(iBackend("service5", intstr.FromInt(805))))), ), ), } @@ -958,6 +961,15 @@ retryexpression: IsNetworkError() && Attempts() <= 2 clusterIP("10.0.0.4"), sPorts(sPort(804, "http"))), ), + buildService( + sName("service5"), + sNamespace("testing"), + sUID("5"), + sAnnotation(annotationKubernetesResponseForwardingFlushInterval, "10ms"), + sSpec( + clusterIP("10.0.0.5"), + sPorts(sPort(80, ""))), + ), } endpoints := []*corev1.Endpoints{ @@ -1005,6 +1017,17 @@ retryexpression: IsNetworkError() && Attempts() <= 2 eAddresses(eAddress("10.4.0.2")), ePorts(ePort(8080, "http"))), ), + buildEndpoint( + eNamespace("testing"), + eName("service5"), + eUID("5"), + subset( + eAddresses(eAddress("10.4.0.1")), + ePorts(ePort(8080, "http"))), + subset( + eAddresses(eAddress("10.4.0.2")), + ePorts(ePort(8080, "http"))), + ), } watchChan := make(chan interface{}) @@ -1028,6 +1051,11 @@ retryexpression: IsNetworkError() && Attempts() <= 2 lbMethod("drr"), circuitBreaker("NetworkErrorRatio() > 0.5"), ), + backend("flush", + servers(), + lbMethod("wrr"), + responseForwarding("10ms"), + ), backend("bar", servers( server("http://10.15.0.1:8080", weight(1)), @@ -1073,6 +1101,10 @@ retryexpression: IsNetworkError() && Attempts() <= 2 passHostHeader(), routes( route("max-conn", "Host:max-conn"))), + frontend("flush", + passHostHeader(), + routes( + route("flush", "Host:flush"))), ), ) diff --git a/provider/kv/keynames.go b/provider/kv/keynames.go index 38876c797..b9c2546a4 100644 --- a/provider/kv/keynames.go +++ b/provider/kv/keynames.go @@ -3,6 +3,7 @@ package kv const ( pathBackends = "/backends/" pathBackendCircuitBreakerExpression = "/circuitbreaker/expression" + pathBackendResponseForwardingFlushInterval = "/responseforwarding/flushinterval" pathBackendHealthCheckScheme = "/healthcheck/scheme" pathBackendHealthCheckPath = "/healthcheck/path" pathBackendHealthCheckPort = "/healthcheck/port" diff --git a/provider/kv/kv_config.go b/provider/kv/kv_config.go index 4883ce108..e06a2b7ba 100644 --- a/provider/kv/kv_config.go +++ b/provider/kv/kv_config.go @@ -59,6 +59,7 @@ func (p *Provider) buildConfiguration() *types.Configuration { // Backend functions "getServers": p.getServers, "getCircuitBreaker": p.getCircuitBreaker, + "getResponseForwarding": p.getResponseForwarding, "getLoadBalancer": p.getLoadBalancer, "getMaxConn": p.getMaxConn, "getHealthCheck": p.getHealthCheck, @@ -269,6 +270,20 @@ func (p *Provider) getLoadBalancer(rootPath string) *types.LoadBalancer { return lb } +func (p *Provider) getResponseForwarding(rootPath string) *types.ResponseForwarding { + if !p.has(rootPath, pathBackendResponseForwardingFlushInterval) { + return nil + } + value := p.get("", rootPath, pathBackendResponseForwardingFlushInterval) + if len(value) == 0 { + return nil + } + + return &types.ResponseForwarding{ + FlushInterval: value, + } +} + func (p *Provider) getCircuitBreaker(rootPath string) *types.CircuitBreaker { if !p.has(rootPath, pathBackendCircuitBreakerExpression) { return nil diff --git a/provider/label/names.go b/provider/label/names.go index 442a2f890..4fb07e688 100644 --- a/provider/label/names.go +++ b/provider/label/names.go @@ -29,6 +29,7 @@ const ( SuffixBackendMaxConnAmount = "backend.maxconn.amount" SuffixBackendMaxConnExtractorFunc = "backend.maxconn.extractorfunc" SuffixBackendBuffering = "backend.buffering" + SuffixBackendResponseForwardingFlushInterval = "backend.responseForwarding.flushInterval" SuffixBackendBufferingMaxRequestBodyBytes = SuffixBackendBuffering + ".maxRequestBodyBytes" SuffixBackendBufferingMemRequestBodyBytes = SuffixBackendBuffering + ".memRequestBodyBytes" SuffixBackendBufferingMaxResponseBodyBytes = SuffixBackendBuffering + ".maxResponseBodyBytes" @@ -131,6 +132,7 @@ const ( TraefikBackendMaxConnAmount = Prefix + SuffixBackendMaxConnAmount TraefikBackendMaxConnExtractorFunc = Prefix + SuffixBackendMaxConnExtractorFunc TraefikBackendBuffering = Prefix + SuffixBackendBuffering + TraefikBackendResponseForwardingFlushInterval = Prefix + SuffixBackendResponseForwardingFlushInterval TraefikBackendBufferingMaxRequestBodyBytes = Prefix + SuffixBackendBufferingMaxRequestBodyBytes TraefikBackendBufferingMemRequestBodyBytes = Prefix + SuffixBackendBufferingMemRequestBodyBytes TraefikBackendBufferingMaxResponseBodyBytes = Prefix + SuffixBackendBufferingMaxResponseBodyBytes diff --git a/provider/label/partial.go b/provider/label/partial.go index a2026743d..d4b7652ad 100644 --- a/provider/label/partial.go +++ b/provider/label/partial.go @@ -354,6 +354,19 @@ func GetHealthCheck(labels map[string]string) *types.HealthCheck { } } +// GetResponseForwarding Create ResponseForwarding from labels +func GetResponseForwarding(labels map[string]string) *types.ResponseForwarding { + if !HasPrefix(labels, TraefikBackendResponseForwardingFlushInterval) { + return nil + } + + value := GetStringValue(labels, TraefikBackendResponseForwardingFlushInterval, "0") + + return &types.ResponseForwarding{ + FlushInterval: value, + } +} + // GetBuffering Create buffering from labels func GetBuffering(labels map[string]string) *types.Buffering { if !HasPrefix(labels, TraefikBackendBuffering) { diff --git a/provider/marathon/config.go b/provider/marathon/config.go index 5e892cb4a..a99b44693 100644 --- a/provider/marathon/config.go +++ b/provider/marathon/config.go @@ -32,13 +32,14 @@ func (p *Provider) buildConfigurationV2(applications *marathon.Applications) *ty "getBackendName": p.getBackendName, // Backend functions - "getPort": getPort, - "getCircuitBreaker": label.GetCircuitBreaker, - "getLoadBalancer": label.GetLoadBalancer, - "getMaxConn": label.GetMaxConn, - "getHealthCheck": label.GetHealthCheck, - "getBuffering": label.GetBuffering, - "getServers": p.getServers, + "getPort": getPort, + "getCircuitBreaker": label.GetCircuitBreaker, + "getLoadBalancer": label.GetLoadBalancer, + "getMaxConn": label.GetMaxConn, + "getHealthCheck": label.GetHealthCheck, + "getBuffering": label.GetBuffering, + "getResponseForwarding": label.GetResponseForwarding, + "getServers": p.getServers, // Frontend functions "getSegmentNameSuffix": getSegmentNameSuffix, diff --git a/provider/marathon/config_test.go b/provider/marathon/config_test.go index 172b88963..c5f09abeb 100644 --- a/provider/marathon/config_test.go +++ b/provider/marathon/config_test.go @@ -357,6 +357,7 @@ func TestBuildConfiguration(t *testing.T) { withLabel(label.TraefikBackend, "foobar"), withLabel(label.TraefikBackendCircuitBreakerExpression, "NetworkErrorRatio() > 0.5"), + withLabel(label.TraefikBackendResponseForwardingFlushInterval, "10ms"), withLabel(label.TraefikBackendHealthCheckScheme, "http"), withLabel(label.TraefikBackendHealthCheckPath, "/health"), withLabel(label.TraefikBackendHealthCheckPort, "880"), @@ -586,6 +587,9 @@ func TestBuildConfiguration(t *testing.T) { CircuitBreaker: &types.CircuitBreaker{ Expression: "NetworkErrorRatio() > 0.5", }, + ResponseForwarding: &types.ResponseForwarding{ + FlushInterval: "10ms", + }, LoadBalancer: &types.LoadBalancer{ Method: "drr", Sticky: true, diff --git a/provider/mesos/config.go b/provider/mesos/config.go index 61f415e88..efb7b606e 100644 --- a/provider/mesos/config.go +++ b/provider/mesos/config.go @@ -29,15 +29,16 @@ func (p *Provider) buildConfigurationV2(tasks []state.Task) *types.Configuration "getID": getID, // Backend functions - "getBackendName": getBackendName, - "getCircuitBreaker": label.GetCircuitBreaker, - "getLoadBalancer": label.GetLoadBalancer, - "getMaxConn": label.GetMaxConn, - "getHealthCheck": label.GetHealthCheck, - "getBuffering": label.GetBuffering, - "getServers": p.getServers, - "getHost": p.getHost, - "getServerPort": p.getServerPort, + "getBackendName": getBackendName, + "getCircuitBreaker": label.GetCircuitBreaker, + "getLoadBalancer": label.GetLoadBalancer, + "getMaxConn": label.GetMaxConn, + "getHealthCheck": label.GetHealthCheck, + "getBuffering": label.GetBuffering, + "getResponseForwarding": label.GetResponseForwarding, + "getServers": p.getServers, + "getHost": p.getHost, + "getServerPort": p.getServerPort, // Frontend functions "getSegmentNameSuffix": getSegmentNameSuffix, diff --git a/provider/mesos/config_test.go b/provider/mesos/config_test.go index 9961fd118..bdef2344d 100644 --- a/provider/mesos/config_test.go +++ b/provider/mesos/config_test.go @@ -314,6 +314,7 @@ func TestBuildConfiguration(t *testing.T) { withLabel(label.TraefikBackend, "foobar"), withLabel(label.TraefikBackendCircuitBreakerExpression, "NetworkErrorRatio() > 0.5"), + withLabel(label.TraefikBackendResponseForwardingFlushInterval, "10ms"), withLabel(label.TraefikBackendHealthCheckScheme, "http"), withLabel(label.TraefikBackendHealthCheckPath, "/health"), withLabel(label.TraefikBackendHealthCheckPort, "880"), @@ -546,6 +547,9 @@ func TestBuildConfiguration(t *testing.T) { CircuitBreaker: &types.CircuitBreaker{ Expression: "NetworkErrorRatio() > 0.5", }, + ResponseForwarding: &types.ResponseForwarding{ + FlushInterval: "10ms", + }, LoadBalancer: &types.LoadBalancer{ Method: "drr", Stickiness: &types.Stickiness{ diff --git a/provider/rancher/config.go b/provider/rancher/config.go index d6232b47a..aa11c3cf7 100644 --- a/provider/rancher/config.go +++ b/provider/rancher/config.go @@ -20,12 +20,13 @@ func (p *Provider) buildConfigurationV2(services []rancherData) *types.Configura "getDomain": label.GetFuncString(label.TraefikDomain, p.Domain), // Backend functions - "getCircuitBreaker": label.GetCircuitBreaker, - "getLoadBalancer": label.GetLoadBalancer, - "getMaxConn": label.GetMaxConn, - "getHealthCheck": label.GetHealthCheck, - "getBuffering": label.GetBuffering, - "getServers": getServers, + "getCircuitBreaker": label.GetCircuitBreaker, + "getLoadBalancer": label.GetLoadBalancer, + "getMaxConn": label.GetMaxConn, + "getHealthCheck": label.GetHealthCheck, + "getBuffering": label.GetBuffering, + "getResponseForwarding": label.GetResponseForwarding, + "getServers": getServers, // Frontend functions "getBackendName": getBackendName, diff --git a/provider/rancher/config_test.go b/provider/rancher/config_test.go index 1bcd2f2c6..b0fc04583 100644 --- a/provider/rancher/config_test.go +++ b/provider/rancher/config_test.go @@ -41,6 +41,7 @@ func TestProviderBuildConfiguration(t *testing.T) { label.TraefikBackend: "foobar", label.TraefikBackendCircuitBreakerExpression: "NetworkErrorRatio() > 0.5", + label.TraefikBackendResponseForwardingFlushInterval: "10ms", label.TraefikBackendHealthCheckScheme: "http", label.TraefikBackendHealthCheckPath: "/health", label.TraefikBackendHealthCheckPort: "880", @@ -277,6 +278,9 @@ func TestProviderBuildConfiguration(t *testing.T) { CircuitBreaker: &types.CircuitBreaker{ Expression: "NetworkErrorRatio() > 0.5", }, + ResponseForwarding: &types.ResponseForwarding{ + FlushInterval: "10ms", + }, LoadBalancer: &types.LoadBalancer{ Method: "drr", Sticky: true, diff --git a/server/server_configuration.go b/server/server_configuration.go index 73cc07eab..3dbc7f08e 100644 --- a/server/server_configuration.go +++ b/server/server_configuration.go @@ -11,6 +11,7 @@ import ( "strings" "time" + "github.com/containous/flaeg/parse" "github.com/containous/mux" "github.com/containous/traefik/configuration" "github.com/containous/traefik/healthcheck" @@ -163,7 +164,7 @@ func (s *Server) loadFrontendConfig( postConfigs = append(postConfigs, postConfig) } - fwd, err := s.buildForwarder(entryPointName, entryPoint, frontendName, frontend, responseModifier) + fwd, err := s.buildForwarder(entryPointName, entryPoint, frontendName, frontend, responseModifier, backend) if err != nil { return nil, fmt.Errorf("failed to create the forwarder for frontend %s: %v", frontendName, err) } @@ -216,7 +217,7 @@ func (s *Server) loadFrontendConfig( func (s *Server) buildForwarder(entryPointName string, entryPoint *configuration.EntryPoint, frontendName string, frontend *types.Frontend, - responseModifier modifyResponse) (http.Handler, error) { + responseModifier modifyResponse, backend *types.Backend) (http.Handler, error) { roundTripper, err := s.getRoundTripper(entryPointName, frontend.PassTLSCert, entryPoint.TLS) if err != nil { @@ -228,6 +229,14 @@ func (s *Server) buildForwarder(entryPointName string, entryPoint *configuration return nil, fmt.Errorf("error creating rewriter for frontend %s: %v", frontendName, err) } + var flushInterval parse.Duration + if backend.ResponseForwarding != nil { + err := flushInterval.Set(backend.ResponseForwarding.FlushInterval) + if err != nil { + return nil, fmt.Errorf("error creating flush interval for frontend %s: %v", frontendName, err) + } + } + var fwd http.Handler fwd, err = forward.New( forward.Stream(true), @@ -236,6 +245,7 @@ func (s *Server) buildForwarder(entryPointName string, entryPoint *configuration forward.Rewriter(rewriter), forward.ResponseModifier(responseModifier), forward.BufferPool(s.bufferPool), + forward.StreamingFlushInterval(time.Duration(flushInterval)), forward.WebsocketConnectionClosedHook(func(req *http.Request, conn net.Conn) { server := req.Context().Value(http.ServerContextKey).(*http.Server) if server != nil { diff --git a/templates/consul_catalog.tmpl b/templates/consul_catalog.tmpl index 2004bc6a0..d04fbe46b 100644 --- a/templates/consul_catalog.tmpl +++ b/templates/consul_catalog.tmpl @@ -8,6 +8,14 @@ expression = "{{ $circuitBreaker.Expression }}" {{end}} + {{ $responseForwarding := getResponseForwarding $service.TraefikLabels }} + {{if $responseForwarding }} + [backends."backend-{{ $backendName }}".responseForwarding] + flushInterval = "{{ $responseForwarding.FlushInterval }}" + {{end}} + + + {{ $loadBalancer := getLoadBalancer $service.TraefikLabels }} {{if $loadBalancer }} [backends."backend-{{ $backendName }}".loadBalancer] diff --git a/templates/docker.tmpl b/templates/docker.tmpl index a26345745..bea2990b0 100644 --- a/templates/docker.tmpl +++ b/templates/docker.tmpl @@ -9,6 +9,12 @@ expression = "{{ $circuitBreaker.Expression }}" {{end}} + {{ $responseForwarding := getResponseForwarding $backend.SegmentLabels }} + {{if $responseForwarding }} + [backends."backend-{{ $backendName }}".responseForwarding] + flushInterval = "{{ $responseForwarding.FlushInterval }}" + {{end}} + {{ $loadBalancer := getLoadBalancer $backend.SegmentLabels }} {{if $loadBalancer }} [backends."backend-{{ $backendName }}".loadBalancer] diff --git a/templates/ecs.tmpl b/templates/ecs.tmpl index b80e3afef..6dd57231b 100644 --- a/templates/ecs.tmpl +++ b/templates/ecs.tmpl @@ -8,6 +8,12 @@ expression = "{{ $circuitBreaker.Expression }}" {{end}} + {{ $responseForwarding := getResponseForwarding $firstInstance.SegmentLabels }} + {{if $responseForwarding }} + [backends."backend-{{ $serviceName }}".responseForwarding] + flushInterval = "{{ $responseForwarding.FlushInterval }}" + {{end}} + {{ $loadBalancer := getLoadBalancer $firstInstance.SegmentLabels }} {{if $loadBalancer }} [backends."backend-{{ $serviceName }}".loadBalancer] diff --git a/templates/kubernetes.tmpl b/templates/kubernetes.tmpl index c522ffae5..fef76bc12 100644 --- a/templates/kubernetes.tmpl +++ b/templates/kubernetes.tmpl @@ -8,6 +8,11 @@ expression = "{{ $backend.CircuitBreaker.Expression }}" {{end}} + {{if $backend.ResponseForwarding }} + [backends."{{ $backendName }}".responseForwarding] + flushInterval = "{{ $backend.responseForwarding.FlushInterval }}" + {{end}} + [backends."{{ $backendName }}".loadBalancer] method = "{{ $backend.LoadBalancer.Method }}" sticky = {{ $backend.LoadBalancer.Sticky }} diff --git a/templates/kv.tmpl b/templates/kv.tmpl index 73842e1d2..15062243d 100644 --- a/templates/kv.tmpl +++ b/templates/kv.tmpl @@ -7,6 +7,12 @@ [backends."{{ $backendName }}".circuitBreaker] expression = "{{ $circuitBreaker.Expression }}" {{end}} + + {{ $responseForwarding := getResponseForwarding $backend }} + {{if $responseForwarding }} + [backends."{{ $backendName }}".responseForwarding] + flushInterval = "{{ $responseForwarding.flushInterval }}" + {{end}} {{ $loadBalancer := getLoadBalancer $backend }} {{if $loadBalancer }} diff --git a/templates/marathon.tmpl b/templates/marathon.tmpl index 15c5ce285..7a5038e44 100644 --- a/templates/marathon.tmpl +++ b/templates/marathon.tmpl @@ -10,6 +10,12 @@ [backends."{{ $backendName }}".circuitBreaker] expression = "{{ $circuitBreaker.Expression }}" {{end}} + + {{ $responseForwarding := getResponseForwarding $app.SegmentLabels }} + {{if $responseForwarding }} + [backends."{{ $backendName }}".responseForwarding] + flushInterval = "{{ $responseForwarding.FlushInterval }}" + {{end}} {{ $loadBalancer := getLoadBalancer $app.SegmentLabels }} {{if $loadBalancer }} diff --git a/templates/mesos.tmpl b/templates/mesos.tmpl index 51f30308c..a77ba862d 100644 --- a/templates/mesos.tmpl +++ b/templates/mesos.tmpl @@ -11,6 +11,12 @@ expression = "{{ $circuitBreaker.Expression }}" {{end}} + {{ $responseForwarding := getResponseForwarding $app.TraefikLabels }} + {{if $responseForwarding }} + [backends."backend-{{ $backendName }}".responseForwarding] + flushInterval = "{{ $responseForwarding.FlushInterval }}" + {{end}} + {{ $loadBalancer := getLoadBalancer $app.TraefikLabels }} {{if $loadBalancer }} [backends."backend-{{ $backendName }}".loadBalancer] diff --git a/templates/rancher.tmpl b/templates/rancher.tmpl index eb4976870..197761c1d 100644 --- a/templates/rancher.tmpl +++ b/templates/rancher.tmpl @@ -10,6 +10,12 @@ expression = "{{ $circuitBreaker.Expression }}" {{end}} + {{ $responseForwarding := getResponseForwarding $backend.SegmentLabels }} + {{if $responseForwarding }} + [backends."backend-{{ $backendName }}".responseForwarding] + flushInterval = "{{ $responseForwarding.FlushInterval }}" + {{end}} + {{ $loadBalancer := getLoadBalancer $backend.SegmentLabels }} {{if $loadBalancer }} [backends."backend-{{ $backendName }}".loadBalancer] diff --git a/types/types.go b/types/types.go index 99393afb5..52844053f 100644 --- a/types/types.go +++ b/types/types.go @@ -22,12 +22,18 @@ import ( // Backend holds backend configuration. type Backend struct { - Servers map[string]Server `json:"servers,omitempty"` - CircuitBreaker *CircuitBreaker `json:"circuitBreaker,omitempty"` - LoadBalancer *LoadBalancer `json:"loadBalancer,omitempty"` - MaxConn *MaxConn `json:"maxConn,omitempty"` - HealthCheck *HealthCheck `json:"healthCheck,omitempty"` - Buffering *Buffering `json:"buffering,omitempty"` + Servers map[string]Server `json:"servers,omitempty"` + CircuitBreaker *CircuitBreaker `json:"circuitBreaker,omitempty"` + LoadBalancer *LoadBalancer `json:"loadBalancer,omitempty"` + MaxConn *MaxConn `json:"maxConn,omitempty"` + HealthCheck *HealthCheck `json:"healthCheck,omitempty"` + Buffering *Buffering `json:"buffering,omitempty"` + ResponseForwarding *ResponseForwarding `json:"forwardingResponse,omitempty"` +} + +// ResponseForwarding holds configuration for the forward of the response +type ResponseForwarding struct { + FlushInterval string `json:"flushInterval,omitempty"` } // MaxConn holds maximum connection configuration From 19546ab5188b27b7b5c2f2a873c08a7090272979 Mon Sep 17 00:00:00 2001 From: Aaron <44198148+whalehub@users.noreply.github.com> Date: Tue, 30 Oct 2018 10:49:41 +0100 Subject: [PATCH 28/29] Fix mistake in the documentation of several backends (#4133) --- docs/configuration/backends/consulcatalog.md | 2 +- docs/configuration/backends/docker.md | 2 +- docs/configuration/backends/ecs.md | 2 +- docs/configuration/backends/kubernetes.md | 2 +- docs/configuration/backends/marathon.md | 2 +- docs/configuration/backends/mesos.md | 2 +- docs/configuration/backends/rancher.md | 2 +- docs/configuration/backends/servicefabric.md | 2 +- 8 files changed, 8 insertions(+), 8 deletions(-) diff --git a/docs/configuration/backends/consulcatalog.md b/docs/configuration/backends/consulcatalog.md index 7dc9248cb..7a1dbf577 100644 --- a/docs/configuration/backends/consulcatalog.md +++ b/docs/configuration/backends/consulcatalog.md @@ -201,7 +201,7 @@ If you need to support multiple frontends for a service, for example when having | `.frontend.headers.frameDeny=false` | Adds the `X-Frame-Options` header with the value of `DENY`. | | `.frontend.headers.hostsProxyHeaders=EXPR` | Provides a list of headers that the proxied hostname may be stored.
Format: `HEADER1,HEADER2` | | `.frontend.headers.isDevelopment=false` | This will cause the `AllowedHosts`, `SSLRedirect`, and `STSSeconds`/`STSIncludeSubdomains` options to be ignored during development.
When deploying to production, be sure to set this to false. | -| `.frontend.headers.publicKey=VALUE` | Adds pinned HTST public key header. | +| `.frontend.headers.publicKey=VALUE` | Adds HPKP header. | | `.frontend.headers.referrerPolicy=VALUE` | Adds referrer policy header. | | `.frontend.headers.SSLRedirect=true` | Forces the frontend to redirect to SSL if a non-SSL request is sent. | | `.frontend.headers.SSLTemporaryRedirect=true` | Forces the frontend to redirect to SSL if a non-SSL request is sent, but by sending a 302 instead of a 301. | diff --git a/docs/configuration/backends/docker.md b/docs/configuration/backends/docker.md index e8d133ac2..05113eef1 100644 --- a/docs/configuration/backends/docker.md +++ b/docs/configuration/backends/docker.md @@ -316,7 +316,7 @@ The result will be `user:$$apr1$$9Cv/OMGj$$ZomWQzuQbL.3TRCS81A1g/`, note additio | `traefik.frontend.headers.frameDeny=false` | Adds the `X-Frame-Options` header with the value of `DENY`. | | `traefik.frontend.headers.hostsProxyHeaders=EXPR ` | Provides a list of headers that the proxied hostname may be stored.
Format: `HEADER1,HEADER2` | | `traefik.frontend.headers.isDevelopment=false` | This will cause the `AllowedHosts`, `SSLRedirect`, and `STSSeconds`/`STSIncludeSubdomains` options to be ignored during development.
When deploying to production, be sure to set this to false. | -| `traefik.frontend.headers.publicKey=VALUE` | Adds pinned HTST public key header. | +| `traefik.frontend.headers.publicKey=VALUE` | Adds HPKP header. | | `traefik.frontend.headers.referrerPolicy=VALUE` | Adds referrer policy header. | | `traefik.frontend.headers.SSLRedirect=true` | Forces the frontend to redirect to SSL if a non-SSL request is sent. | | `traefik.frontend.headers.SSLTemporaryRedirect=true` | Forces the frontend to redirect to SSL if a non-SSL request is sent, but by sending a 302 instead of a 301. | diff --git a/docs/configuration/backends/ecs.md b/docs/configuration/backends/ecs.md index b4d84319c..02f7f74df 100644 --- a/docs/configuration/backends/ecs.md +++ b/docs/configuration/backends/ecs.md @@ -229,7 +229,7 @@ Labels can be used on task containers to override default behaviour: | `traefik.frontend.headers.forceSTSHeader=false` | Adds the STS header to non-SSL requests. | | `traefik.frontend.headers.frameDeny=false` | Adds the `X-Frame-Options` header with the value of `DENY`. | | `traefik.frontend.headers.hostsProxyHeaders=EXPR ` | Provides a list of headers that the proxied hostname may be stored.
Format: `HEADER1,HEADER2` | -| `traefik.frontend.headers.publicKey=VALUE` | Adds pinned HTST public key header. | +| `traefik.frontend.headers.publicKey=VALUE` | Adds HPKP header. | | `traefik.frontend.headers.referrerPolicy=VALUE` | Adds referrer policy header. | | `traefik.frontend.headers.isDevelopment=false` | This will cause the `AllowedHosts`, `SSLRedirect`, and `STSSeconds`/`STSIncludeSubdomains` options to be ignored during development.
When deploying to production, be sure to set this to false. | | `traefik.frontend.headers.SSLRedirect=true` | Forces the frontend to redirect to SSL if a non-SSL request is sent. | diff --git a/docs/configuration/backends/kubernetes.md b/docs/configuration/backends/kubernetes.md index c38af2bbf..0036d60f6 100644 --- a/docs/configuration/backends/kubernetes.md +++ b/docs/configuration/backends/kubernetes.md @@ -322,7 +322,7 @@ The following security annotations are applicable on the Ingress object: | `ingress.kubernetes.io/hsts-preload: "true"` | Adds the preload flag to the HSTS header. | | `ingress.kubernetes.io/is-development: "false"` | This will cause the `AllowedHosts`, `SSLRedirect`, and `STSSeconds`/`STSIncludeSubdomains` options to be ignored during development.
When deploying to production, be sure to set this to false. | | `ingress.kubernetes.io/proxy-headers: EXPR` | Provides a list of headers that the proxied hostname may be stored. Format: `HEADER1,HEADER2` | -| `ingress.kubernetes.io/public-key: VALUE` | Adds pinned HTST public key header. | +| `ingress.kubernetes.io/public-key: VALUE` | Adds HPKP header. | | `ingress.kubernetes.io/referrer-policy: VALUE` | Adds referrer policy header. | | `ingress.kubernetes.io/ssl-redirect: "true"` | Forces the frontend to redirect to SSL if a non-SSL request is sent. | | `ingress.kubernetes.io/ssl-temporary-redirect: "true"` | Forces the frontend to redirect to SSL if a non-SSL request is sent, but by sending a 302 instead of a 301. | diff --git a/docs/configuration/backends/marathon.md b/docs/configuration/backends/marathon.md index ef4b20b99..4ed62f892 100644 --- a/docs/configuration/backends/marathon.md +++ b/docs/configuration/backends/marathon.md @@ -289,7 +289,7 @@ The following labels can be defined on Marathon applications. They adjust the be | `traefik.frontend.headers.frameDeny=false` | Adds the `X-Frame-Options` header with the value of `DENY`. | | `traefik.frontend.headers.hostsProxyHeaders=EXPR ` | Provides a list of headers that the proxied hostname may be stored.
Format: `HEADER1,HEADER2` | | `traefik.frontend.headers.isDevelopment=false` | This will cause the `AllowedHosts`, `SSLRedirect`, and `STSSeconds`/`STSIncludeSubdomains` options to be ignored during development.
When deploying to production, be sure to set this to false. | -| `traefik.frontend.headers.publicKey=VALUE` | Adds pinned HTST public key header. | +| `traefik.frontend.headers.publicKey=VALUE` | Adds HPKP header. | | `traefik.frontend.headers.referrerPolicy=VALUE` | Adds referrer policy header. | | `traefik.frontend.headers.SSLRedirect=true` | Forces the frontend to redirect to SSL if a non-SSL request is sent. | | `traefik.frontend.headers.SSLTemporaryRedirect=true` | Forces the frontend to redirect to SSL if a non-SSL request is sent, but by sending a 302 instead of a 301. | diff --git a/docs/configuration/backends/mesos.md b/docs/configuration/backends/mesos.md index 13568bdc8..d65877d0a 100644 --- a/docs/configuration/backends/mesos.md +++ b/docs/configuration/backends/mesos.md @@ -201,7 +201,7 @@ The following labels can be defined on Mesos tasks. They adjust the behavior for | `traefik.frontend.headers.frameDeny=false` | Adds the `X-Frame-Options` header with the value of `DENY`. | | `traefik.frontend.headers.hostsProxyHeaders=EXPR ` | Provides a list of headers that the proxied hostname may be stored.
Format: `HEADER1,HEADER2` | | `traefik.frontend.headers.isDevelopment=false` | This will cause the `AllowedHosts`, `SSLRedirect`, and `STSSeconds`/`STSIncludeSubdomains` options to be ignored during development.
When deploying to production, be sure to set this to false. | -| `traefik.frontend.headers.publicKey=VALUE` | Adds pinned HTST public key header. | +| `traefik.frontend.headers.publicKey=VALUE` | Adds HPKP header. | | `traefik.frontend.headers.referrerPolicy=VALUE` | Adds referrer policy header. | | `traefik.frontend.headers.SSLRedirect=true` | Forces the frontend to redirect to SSL if a non-SSL request is sent. | | `traefik.frontend.headers.SSLTemporaryRedirect=true` | Forces the frontend to redirect to SSL if a non-SSL request is sent, but by sending a 302 instead of a 301. | diff --git a/docs/configuration/backends/rancher.md b/docs/configuration/backends/rancher.md index 9e96c2c2f..fe0e8b20d 100644 --- a/docs/configuration/backends/rancher.md +++ b/docs/configuration/backends/rancher.md @@ -231,7 +231,7 @@ Labels can be used on task containers to override default behavior: | `traefik.frontend.headers.frameDeny=false` | Adds the `X-Frame-Options` header with the value of `DENY`. | | `traefik.frontend.headers.hostsProxyHeaders=EXPR ` | Provides a list of headers that the proxied hostname may be stored.
Format: `HEADER1,HEADER2` | | `traefik.frontend.headers.isDevelopment=false` | This will cause the `AllowedHosts`, `SSLRedirect`, and `STSSeconds`/`STSIncludeSubdomains` options to be ignored during development.
When deploying to production, be sure to set this to false. | -| `traefik.frontend.headers.publicKey=VALUE` | Adds pinned HTST public key header. | +| `traefik.frontend.headers.publicKey=VALUE` | Adds HPKP header. | | `traefik.frontend.headers.referrerPolicy=VALUE` | Adds referrer policy header. | | `traefik.frontend.headers.SSLRedirect=true` | Forces the frontend to redirect to SSL if a non-SSL request is sent. | | `traefik.frontend.headers.SSLTemporaryRedirect=true` | Forces the frontend to redirect to SSL if a non-SSL request is sent, but by sending a 302 instead of a 301. | diff --git a/docs/configuration/backends/servicefabric.md b/docs/configuration/backends/servicefabric.md index 62944e6d3..65a4700ec 100644 --- a/docs/configuration/backends/servicefabric.md +++ b/docs/configuration/backends/servicefabric.md @@ -153,6 +153,6 @@ Labels, set through extensions or the property manager, can be used on services | `traefik.frontend.headers.browserXSSFilter=true` | Adds the X-XSS-Protection header with the value `1; mode=block`. | | `traefik.frontend.headers.customBrowserXSSValue=VALUE` | Set custom value for X-XSS-Protection header. This overrides the BrowserXssFilter option. | | `traefik.frontend.headers.contentSecurityPolicy=VALUE` | Adds CSP Header with the custom value. | -| `traefik.frontend.headers.publicKey=VALUE` | Adds pinned HTST public key header. | +| `traefik.frontend.headers.publicKey=VALUE` | Adds HPKP header. | | `traefik.frontend.headers.referrerPolicy=VALUE` | Adds referrer policy header. | | `traefik.frontend.headers.isDevelopment=false` | This will cause the `AllowedHosts`, `SSLRedirect`, and `STSSeconds`/`STSIncludeSubdomains` options to be ignored during development.
When deploying to production, be sure to set this to false. | From 1fad7e5a1c0c90a90b73d530950508cf9b939297 Mon Sep 17 00:00:00 2001 From: Ludovic Fernandez Date: Tue, 30 Oct 2018 11:32:04 +0100 Subject: [PATCH 29/29] Prepare release v1.7.4 --- CHANGELOG.md | 35 +++++++++++++++++++++++++++++++++++ 1 file changed, 35 insertions(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index ab2a1245e..6cc854efb 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,5 +1,40 @@ # Change Log +## [v1.7.4](https://github.com/containous/traefik/tree/v1.7.4) (2018-10-30) +[All Commits](https://github.com/containous/traefik/compare/v1.7.3...v1.7.4) + +**Bug fixes:** +- **[acme]** Support custom DNS resolvers for Let's Encrypt. ([#4101](https://github.com/containous/traefik/pull/4101) by [ldez](https://github.com/ldez)) +- **[acme]** fix: netcup and DuckDNS. ([#4094](https://github.com/containous/traefik/pull/4094) by [ldez](https://github.com/ldez)) +- **[authentication,logs,middleware]** Fix display of client username field ([#4093](https://github.com/containous/traefik/pull/4093) by [Ullaakut](https://github.com/Ullaakut)) +- **[authentication,middleware]** Nil request body with retry ([#4075](https://github.com/containous/traefik/pull/4075) by [ldez](https://github.com/ldez)) +- **[consul,consulcatalog,docker,ecs,k8s,marathon,mesos,rancher]** Add flush interval option on backend ([#4112](https://github.com/containous/traefik/pull/4112) by [juliens](https://github.com/juliens)) +- **[consulcatalog,docker,ecs,marathon,mesos,rancher]** Remove the trailing dot if the domain is not defined. ([#4095](https://github.com/containous/traefik/pull/4095) by [ldez](https://github.com/ldez)) +- **[docker]** Provider docker shutdown problem ([#4122](https://github.com/containous/traefik/pull/4122) by [juliens](https://github.com/juliens)) +- **[k8s]** Add default path if nothing present ([#4097](https://github.com/containous/traefik/pull/4097) by [SantoDE](https://github.com/SantoDE)) +- **[k8s]** Add the missing pass-client-tls annotation to the kubernetes provider ([#4118](https://github.com/containous/traefik/pull/4118) by [jbdoumenjou](https://github.com/jbdoumenjou)) +- **[logs]** Fix access log field parsing ([#4113](https://github.com/containous/traefik/pull/4113) by [Ullaakut](https://github.com/Ullaakut)) +- **[middleware]** Add static redirect ([#4090](https://github.com/containous/traefik/pull/4090) by [SantoDE](https://github.com/SantoDE)) +- **[rules]** Add keepTrailingSlash option ([#4062](https://github.com/containous/traefik/pull/4062) by [juliens](https://github.com/juliens)) +- **[rules]** Case insensitive host rule ([#3931](https://github.com/containous/traefik/pull/3931) by [bgandon](https://github.com/bgandon)) +- **[tls]** Fix certificate insertion loop to keep valid certificate and ignore the bad one ([#4050](https://github.com/containous/traefik/pull/4050) by [jbdoumenjou](https://github.com/jbdoumenjou)) +- **[webui]** Typo in the UI. ([#4096](https://github.com/containous/traefik/pull/4096) by [ldez](https://github.com/ldez)) + +**Documentation:** +- **[acme]** Adds the note: acme.domains is a startup configuration ([#4065](https://github.com/containous/traefik/pull/4065) by [geraldcroes](https://github.com/geraldcroes)) +- **[acme]** exoscale move from .ch to .com ([#4130](https://github.com/containous/traefik/pull/4130) by [greut](https://github.com/greut)) +- **[acme]** Fixing a typo. ([#4124](https://github.com/containous/traefik/pull/4124) by [konovalov-nk](https://github.com/konovalov-nk)) +- **[acme]** Add a note about TLS-ALPN challenge. ([#4106](https://github.com/containous/traefik/pull/4106) by [ldez](https://github.com/ldez)) +- **[acme]** Clarify DuckDNS does not support multiple TXT records ([#4061](https://github.com/containous/traefik/pull/4061) by [KnicKnic](https://github.com/KnicKnic)) +- **[docker]** Domain is also optional for "normal" mode ([#4086](https://github.com/containous/traefik/pull/4086) by [herver](https://github.com/herver)) +- **[provider]** Fix mistake in the documentation of several backends ([#4133](https://github.com/containous/traefik/pull/4133) by [whalehub](https://github.com/whalehub)) +- Replaces emilevauge/whoami by containous/whoami in the documentation ([#4111](https://github.com/containous/traefik/pull/4111) by [geraldcroes](https://github.com/geraldcroes)) +- Uses ASCII characters to spell Traefik ([#4063](https://github.com/containous/traefik/pull/4063) by [geraldcroes](https://github.com/geraldcroes)) + +**Misc:** +- **[tls]** Add double wildcard test ([#4091](https://github.com/containous/traefik/pull/4091) by [dtomcej](https://github.com/dtomcej)) +- **[webui]** Removed unused imports ([#4123](https://github.com/containous/traefik/pull/4123) by [mwvdev](https://github.com/mwvdev)) + ## [v1.7.3](https://github.com/containous/traefik/tree/v1.7.3) (2018-10-15) [All Commits](https://github.com/containous/traefik/compare/v1.7.2...v1.7.3)