From 72ffa91fe0770e40ba538f2e7e2a62415c7f7f5f Mon Sep 17 00:00:00 2001 From: Ludovic Fernandez Date: Mon, 18 Mar 2019 11:30:07 +0100 Subject: [PATCH] Clean old --- cmd/configuration.go | 102 +- cmd/convert/convert.go | 151 -- cmd/storeconfig/storeconfig.go | 57 +- cmd/traefik/traefik.go | 8 - integration/constraint_test.go | 238 -- integration/consul_catalog_test.go | 725 ----- integration/consul_test.go | 584 ---- integration/dynamodb_test.go | 168 -- integration/etcd3_test.go | 676 ----- integration/eureka_test.go | 100 - integration/fixtures/consul/simple.toml | 18 - integration/fixtures/consul/simple_https.toml | 20 - .../fixtures/consul_catalog/simple.toml | 13 - integration/fixtures/dynamodb/simple.toml | 18 - integration/fixtures/etcd/simple.toml | 18 - integration/fixtures/etcd/simple_https.toml | 22 - integration/fixtures/eureka/simple.toml | 13 - integration/fixtures/mesos/simple.toml | 9 - integration/https_test.go | 4 +- integration/integration_test.go | 18 +- integration/mesos_test.go | 30 - integration/resources/compose/constraints.yml | 17 - integration/resources/compose/consul.yml | 25 - .../resources/compose/consul_catalog.yml | 22 - integration/resources/compose/consul_tls.yml | 14 - integration/resources/compose/dynamodb.yml | 16 - integration/resources/compose/etcd3.yml | 33 - integration/resources/compose/eureka.yml | 5 - integration/resources/compose/mesos.yml | 34 - old/acme/account.go | 335 --- old/acme/acme.go | 840 ------ old/acme/acme_example.json | 43 - old/acme/acme_test.go | 807 ------ old/acme/challenge_http_provider.go | 102 - old/acme/challenge_tls_provider.go | 132 - old/acme/localStore.go | 177 -- old/acme/localStore_test.go | 32 - old/api/dashboard.go | 39 - old/api/debug.go | 48 - old/api/handler.go | 252 -- old/cluster/datastore.go | 247 -- old/cluster/leadership.go | 146 - old/cluster/store.go | 16 - old/configuration/configuration.go | 444 --- old/configuration/convert.go | 218 -- old/configuration/entrypoints.go | 322 --- old/configuration/router/internal_router.go | 116 - old/log/logger.go | 315 --- .../accesslog/capture_request_reader.go | 18 - .../accesslog/capture_response_writer.go | 68 - old/middlewares/accesslog/logdata.go | 120 - old/middlewares/accesslog/logger.go | 334 --- .../accesslog/logger_formatters.go | 82 - .../accesslog/logger_formatters_test.go | 140 - old/middlewares/accesslog/logger_test.go | 644 ----- old/middlewares/accesslog/parser.go | 54 - old/middlewares/accesslog/parser_test.go | 75 - old/middlewares/accesslog/save_backend.go | 64 - old/middlewares/accesslog/save_frontend.go | 51 - old/middlewares/accesslog/save_retries.go | 19 - .../accesslog/save_retries_test.go | 48 - old/middlewares/accesslog/save_username.go | 60 - old/middlewares/addPrefix.go | 35 - old/middlewares/addPrefix_test.go | 66 - old/middlewares/auth/authenticator.go | 167 -- old/middlewares/auth/authenticator_test.go | 297 -- old/middlewares/auth/forward.go | 157 -- old/middlewares/auth/forward_test.go | 392 --- old/middlewares/auth/parser.go | 48 - old/middlewares/cbreaker.go | 40 - old/middlewares/compress.go | 33 - old/middlewares/compress_test.go | 248 -- old/middlewares/empty_backend_handler.go | 30 - old/middlewares/empty_backend_handler_test.go | 83 - old/middlewares/errorpages/error_pages.go | 236 -- .../errorpages/error_pages_test.go | 384 --- .../forwardedheaders/forwarded_header.go | 52 - .../forwardedheaders/forwarded_header_test.go | 128 - old/middlewares/handlerSwitcher.go | 36 - old/middlewares/headers.go | 71 - old/middlewares/headers_test.go | 118 - old/middlewares/ip_whitelister.go | 67 - old/middlewares/ip_whitelister_test.go | 92 - old/middlewares/recover.go | 51 - old/middlewares/recover_test.go | 45 - old/middlewares/redirect/redirect.go | 163 -- old/middlewares/replace_path.go | 28 - old/middlewares/replace_path_regex.go | 40 - old/middlewares/replace_path_regex_test.go | 80 - old/middlewares/replace_path_test.go | 41 - old/middlewares/request_host.go | 45 - old/middlewares/request_host_test.go | 94 - old/middlewares/retry.go | 185 -- old/middlewares/retry_test.go | 304 --- old/middlewares/routes.go | 28 - old/middlewares/secure.go | 36 - old/middlewares/stateful.go | 12 - old/middlewares/stats.go | 115 - old/middlewares/stripPrefix.go | 56 - old/middlewares/stripPrefixRegex.go | 59 - old/middlewares/stripPrefixRegex_test.go | 97 - old/middlewares/stripPrefix_test.go | 143 - old/middlewares/tlsClientHeaders.go | 289 -- old/middlewares/tlsClientHeaders_test.go | 1011 ------- old/middlewares/tracing/carrier.go | 25 - old/middlewares/tracing/datadog/datadog.go | 50 - old/middlewares/tracing/entrypoint.go | 57 - old/middlewares/tracing/entrypoint_test.go | 70 - old/middlewares/tracing/forwarder.go | 63 - old/middlewares/tracing/forwarder_test.go | 93 - old/middlewares/tracing/jaeger/jaeger.go | 73 - old/middlewares/tracing/jaeger/logger.go | 15 - old/middlewares/tracing/status_code.go | 57 - old/middlewares/tracing/tracing.go | 197 -- old/middlewares/tracing/tracing_test.go | 133 - old/middlewares/tracing/wrapper.go | 66 - old/middlewares/tracing/zipkin/zipkin.go | 49 - old/ping/ping.go | 36 - old/provider/boltdb/boltdb.go | 48 - old/provider/consul/consul.go | 48 - old/provider/consulcatalog/config.go | 227 -- old/provider/consulcatalog/config_test.go | 1358 ---------- old/provider/consulcatalog/consul_catalog.go | 618 ----- .../consulcatalog/consul_catalog_test.go | 862 ------ old/provider/consulcatalog/convert_types.go | 29 - .../consulcatalog/convert_types_test.go | 64 - old/provider/dynamodb/dynamodb.go | 217 -- old/provider/dynamodb/dynamodb_test.go | 140 - old/provider/ecs/builder_test.go | 80 - old/provider/ecs/cluster.go | 32 - old/provider/ecs/cluster_test.go | 145 - old/provider/ecs/config.go | 252 -- old/provider/ecs/config_segment_test.go | 901 ------ old/provider/ecs/config_test.go | 1288 --------- old/provider/ecs/ecs.go | 442 --- old/provider/ecs/ecs_test.go | 88 - old/provider/etcd/etcd.go | 48 - old/provider/eureka/config.go | 57 - old/provider/eureka/config_test.go | 182 -- old/provider/eureka/eureka.go | 92 - old/provider/kv/filler_test.go | 203 -- old/provider/kv/keynames.go | 132 - old/provider/kv/kv.go | 128 - old/provider/kv/kv_config.go | 730 ----- old/provider/kv/kv_config_test.go | 2410 ----------------- old/provider/kv/kv_mock_test.go | 124 - old/provider/kv/kv_test.go | 54 - old/provider/label/label.go | 212 -- old/provider/label/label_test.go | 692 ----- old/provider/label/names.go | 240 -- old/provider/label/partial.go | 431 --- old/provider/label/partial_test.go | 1014 ------- old/provider/label/segment.go | 115 - old/provider/label/segment_test.go | 95 - old/provider/mesos/config.go | 310 --- old/provider/mesos/config_segment_test.go | 412 --- old/provider/mesos/config_test.go | 1288 --------- old/provider/mesos/mesos.go | 173 -- old/provider/mesos/mesos_helper_test.go | 184 -- old/provider/mesos/mesos_test.go | 37 - old/provider/provider.go | 132 - old/provider/provider_test.go | 487 ---- old/provider/rancher/api.go | 273 -- old/provider/rancher/config.go | 210 -- old/provider/rancher/config_test.go | 1209 --------- old/provider/rancher/metadata.go | 138 - old/provider/rancher/rancher.go | 80 - old/provider/rest/rest.go | 68 - old/provider/zk/zk.go | 48 - old/tls/certificate.go | 244 -- old/tls/certificate_store.go | 137 - old/tls/generate/generate.go | 94 - old/tls/tls.go | 101 - old/types/dns_resolvers.go | 44 - old/types/domain_test.go | 182 -- old/types/domains.go | 88 - old/types/internal_router.go | 10 - old/types/logs.go | 200 -- old/types/logs_test.go | 419 --- old/types/types.go | 685 ----- old/types/types_test.go | 186 -- pkg/collector/collector.go | 3 +- pkg/config/static/static_config.go | 56 +- .../accesslog/capture_response_writer.go | 2 +- pkg/middlewares/accesslog/logger.go | 4 +- pkg/middlewares/accesslog/logger_test.go | 4 +- pkg/middlewares/customerrors/custom_errors.go | 2 +- .../middlewares/pipelining/pipelining.go | 21 +- .../middlewares/pipelining/pipelining_test.go | 5 +- pkg/provider/kubernetes/crd/client.go | 2 +- pkg/provider/kubernetes/ingress/client.go | 2 +- pkg/provider/marathon/marathon.go | 4 +- pkg/provider/marathon/readiness.go | 2 +- pkg/server/roundtripper.go | 3 +- pkg/server/service/service.go | 8 +- 195 files changed, 83 insertions(+), 37524 deletions(-) delete mode 100644 cmd/convert/convert.go delete mode 100644 integration/constraint_test.go delete mode 100644 integration/consul_catalog_test.go delete mode 100644 integration/consul_test.go delete mode 100644 integration/dynamodb_test.go delete mode 100644 integration/etcd3_test.go delete mode 100644 integration/eureka_test.go delete mode 100644 integration/fixtures/consul/simple.toml delete mode 100644 integration/fixtures/consul/simple_https.toml delete mode 100644 integration/fixtures/consul_catalog/simple.toml delete mode 100644 integration/fixtures/dynamodb/simple.toml delete mode 100644 integration/fixtures/etcd/simple.toml delete mode 100644 integration/fixtures/etcd/simple_https.toml delete mode 100644 integration/fixtures/eureka/simple.toml delete mode 100644 integration/fixtures/mesos/simple.toml delete mode 100644 integration/mesos_test.go delete mode 100644 integration/resources/compose/constraints.yml delete mode 100644 integration/resources/compose/consul.yml delete mode 100644 integration/resources/compose/consul_catalog.yml delete mode 100644 integration/resources/compose/consul_tls.yml delete mode 100644 integration/resources/compose/dynamodb.yml delete mode 100644 integration/resources/compose/etcd3.yml delete mode 100644 integration/resources/compose/eureka.yml delete mode 100644 integration/resources/compose/mesos.yml delete mode 100644 old/acme/account.go delete mode 100644 old/acme/acme.go delete mode 100644 old/acme/acme_example.json delete mode 100644 old/acme/acme_test.go delete mode 100644 old/acme/challenge_http_provider.go delete mode 100644 old/acme/challenge_tls_provider.go delete mode 100644 old/acme/localStore.go delete mode 100644 old/acme/localStore_test.go delete mode 100644 old/api/dashboard.go delete mode 100644 old/api/debug.go delete mode 100644 old/api/handler.go delete mode 100644 old/cluster/datastore.go delete mode 100644 old/cluster/leadership.go delete mode 100644 old/cluster/store.go delete mode 100644 old/configuration/configuration.go delete mode 100644 old/configuration/convert.go delete mode 100644 old/configuration/entrypoints.go delete mode 100644 old/configuration/router/internal_router.go delete mode 100644 old/log/logger.go delete mode 100644 old/middlewares/accesslog/capture_request_reader.go delete mode 100644 old/middlewares/accesslog/capture_response_writer.go delete mode 100644 old/middlewares/accesslog/logdata.go delete mode 100644 old/middlewares/accesslog/logger.go delete mode 100644 old/middlewares/accesslog/logger_formatters.go delete mode 100644 old/middlewares/accesslog/logger_formatters_test.go delete mode 100644 old/middlewares/accesslog/logger_test.go delete mode 100644 old/middlewares/accesslog/parser.go delete mode 100644 old/middlewares/accesslog/parser_test.go delete mode 100644 old/middlewares/accesslog/save_backend.go delete mode 100644 old/middlewares/accesslog/save_frontend.go delete mode 100644 old/middlewares/accesslog/save_retries.go delete mode 100644 old/middlewares/accesslog/save_retries_test.go delete mode 100644 old/middlewares/accesslog/save_username.go delete mode 100644 old/middlewares/addPrefix.go delete mode 100644 old/middlewares/addPrefix_test.go delete mode 100644 old/middlewares/auth/authenticator.go delete mode 100644 old/middlewares/auth/authenticator_test.go delete mode 100644 old/middlewares/auth/forward.go delete mode 100644 old/middlewares/auth/forward_test.go delete mode 100644 old/middlewares/auth/parser.go delete mode 100644 old/middlewares/cbreaker.go delete mode 100644 old/middlewares/compress.go delete mode 100644 old/middlewares/compress_test.go delete mode 100644 old/middlewares/empty_backend_handler.go delete mode 100644 old/middlewares/empty_backend_handler_test.go delete mode 100644 old/middlewares/errorpages/error_pages.go delete mode 100644 old/middlewares/errorpages/error_pages_test.go delete mode 100644 old/middlewares/forwardedheaders/forwarded_header.go delete mode 100644 old/middlewares/forwardedheaders/forwarded_header_test.go delete mode 100644 old/middlewares/handlerSwitcher.go delete mode 100644 old/middlewares/headers.go delete mode 100644 old/middlewares/headers_test.go delete mode 100644 old/middlewares/ip_whitelister.go delete mode 100644 old/middlewares/ip_whitelister_test.go delete mode 100644 old/middlewares/recover.go delete mode 100644 old/middlewares/recover_test.go delete mode 100644 old/middlewares/redirect/redirect.go delete mode 100644 old/middlewares/replace_path.go delete mode 100644 old/middlewares/replace_path_regex.go delete mode 100644 old/middlewares/replace_path_regex_test.go delete mode 100644 old/middlewares/replace_path_test.go delete mode 100644 old/middlewares/request_host.go delete mode 100644 old/middlewares/request_host_test.go delete mode 100644 old/middlewares/retry.go delete mode 100644 old/middlewares/retry_test.go delete mode 100644 old/middlewares/routes.go delete mode 100644 old/middlewares/secure.go delete mode 100644 old/middlewares/stateful.go delete mode 100644 old/middlewares/stats.go delete mode 100644 old/middlewares/stripPrefix.go delete mode 100644 old/middlewares/stripPrefixRegex.go delete mode 100644 old/middlewares/stripPrefixRegex_test.go delete mode 100644 old/middlewares/stripPrefix_test.go delete mode 100644 old/middlewares/tlsClientHeaders.go delete mode 100644 old/middlewares/tlsClientHeaders_test.go delete mode 100644 old/middlewares/tracing/carrier.go delete mode 100644 old/middlewares/tracing/datadog/datadog.go delete mode 100644 old/middlewares/tracing/entrypoint.go delete mode 100644 old/middlewares/tracing/entrypoint_test.go delete mode 100644 old/middlewares/tracing/forwarder.go delete mode 100644 old/middlewares/tracing/forwarder_test.go delete mode 100644 old/middlewares/tracing/jaeger/jaeger.go delete mode 100644 old/middlewares/tracing/jaeger/logger.go delete mode 100644 old/middlewares/tracing/status_code.go delete mode 100644 old/middlewares/tracing/tracing.go delete mode 100644 old/middlewares/tracing/tracing_test.go delete mode 100644 old/middlewares/tracing/wrapper.go delete mode 100644 old/middlewares/tracing/zipkin/zipkin.go delete mode 100644 old/ping/ping.go delete mode 100644 old/provider/boltdb/boltdb.go delete mode 100644 old/provider/consul/consul.go delete mode 100644 old/provider/consulcatalog/config.go delete mode 100644 old/provider/consulcatalog/config_test.go delete mode 100644 old/provider/consulcatalog/consul_catalog.go delete mode 100644 old/provider/consulcatalog/consul_catalog_test.go delete mode 100644 old/provider/consulcatalog/convert_types.go delete mode 100644 old/provider/consulcatalog/convert_types_test.go delete mode 100644 old/provider/dynamodb/dynamodb.go delete mode 100644 old/provider/dynamodb/dynamodb_test.go delete mode 100644 old/provider/ecs/builder_test.go delete mode 100644 old/provider/ecs/cluster.go delete mode 100644 old/provider/ecs/cluster_test.go delete mode 100644 old/provider/ecs/config.go delete mode 100644 old/provider/ecs/config_segment_test.go delete mode 100644 old/provider/ecs/config_test.go delete mode 100644 old/provider/ecs/ecs.go delete mode 100644 old/provider/ecs/ecs_test.go delete mode 100644 old/provider/etcd/etcd.go delete mode 100644 old/provider/eureka/config.go delete mode 100644 old/provider/eureka/config_test.go delete mode 100644 old/provider/eureka/eureka.go delete mode 100644 old/provider/kv/filler_test.go delete mode 100644 old/provider/kv/keynames.go delete mode 100644 old/provider/kv/kv.go delete mode 100644 old/provider/kv/kv_config.go delete mode 100644 old/provider/kv/kv_config_test.go delete mode 100644 old/provider/kv/kv_mock_test.go delete mode 100644 old/provider/kv/kv_test.go delete mode 100644 old/provider/label/label.go delete mode 100644 old/provider/label/label_test.go delete mode 100644 old/provider/label/names.go delete mode 100644 old/provider/label/partial.go delete mode 100644 old/provider/label/partial_test.go delete mode 100644 old/provider/label/segment.go delete mode 100644 old/provider/label/segment_test.go delete mode 100644 old/provider/mesos/config.go delete mode 100644 old/provider/mesos/config_segment_test.go delete mode 100644 old/provider/mesos/config_test.go delete mode 100644 old/provider/mesos/mesos.go delete mode 100644 old/provider/mesos/mesos_helper_test.go delete mode 100644 old/provider/mesos/mesos_test.go delete mode 100644 old/provider/provider.go delete mode 100644 old/provider/provider_test.go delete mode 100644 old/provider/rancher/api.go delete mode 100644 old/provider/rancher/config.go delete mode 100644 old/provider/rancher/config_test.go delete mode 100644 old/provider/rancher/metadata.go delete mode 100644 old/provider/rancher/rancher.go delete mode 100644 old/provider/rest/rest.go delete mode 100644 old/provider/zk/zk.go delete mode 100644 old/tls/certificate.go delete mode 100644 old/tls/certificate_store.go delete mode 100644 old/tls/generate/generate.go delete mode 100644 old/tls/tls.go delete mode 100644 old/types/dns_resolvers.go delete mode 100644 old/types/domain_test.go delete mode 100644 old/types/domains.go delete mode 100644 old/types/internal_router.go delete mode 100644 old/types/logs.go delete mode 100644 old/types/logs_test.go delete mode 100644 old/types/types.go delete mode 100644 old/types/types_test.go rename {old => pkg}/middlewares/pipelining/pipelining.go (74%) rename {old => pkg}/middlewares/pipelining/pipelining_test.go (94%) diff --git a/cmd/configuration.go b/cmd/configuration.go index d6f222645..e268e94a5 100644 --- a/cmd/configuration.go +++ b/cmd/configuration.go @@ -4,19 +4,8 @@ import ( "time" "github.com/containous/flaeg/parse" - "github.com/containous/traefik/old/configuration" - "github.com/containous/traefik/old/middlewares/accesslog" - "github.com/containous/traefik/old/provider/boltdb" - "github.com/containous/traefik/old/provider/consul" - "github.com/containous/traefik/old/provider/consulcatalog" - "github.com/containous/traefik/old/provider/dynamodb" - "github.com/containous/traefik/old/provider/ecs" - "github.com/containous/traefik/old/provider/etcd" - "github.com/containous/traefik/old/provider/eureka" - "github.com/containous/traefik/old/provider/mesos" - "github.com/containous/traefik/old/provider/rancher" - "github.com/containous/traefik/old/provider/zk" "github.com/containous/traefik/pkg/config/static" + "github.com/containous/traefik/pkg/middlewares/accesslog" "github.com/containous/traefik/pkg/ping" "github.com/containous/traefik/pkg/provider/docker" "github.com/containous/traefik/pkg/provider/file" @@ -134,7 +123,7 @@ func NewTraefikDefaultPointersConfiguration() *TraefikConfiguration { defaultMetrics := types.Metrics{ Prometheus: &types.Prometheus{ Buckets: types.Buckets{0.1, 0.3, 1.2, 5}, - EntryPoint: configuration.DefaultInternalEntryPointName, + EntryPoint: static.DefaultInternalEntryPointName, }, Datadog: &types.Datadog{ Address: "localhost:8125", @@ -167,7 +156,7 @@ func NewTraefikDefaultPointersConfiguration() *TraefikConfiguration { // default Rest var defaultRest rest.Provider - defaultRest.EntryPoint = configuration.DefaultInternalEntryPointName + defaultRest.EntryPoint = static.DefaultInternalEntryPointName // default Marathon var defaultMarathon marathon.Provider @@ -180,91 +169,16 @@ func NewTraefikDefaultPointersConfiguration() *TraefikConfiguration { defaultMarathon.KeepAlive = parse.Duration(10 * time.Second) defaultMarathon.DefaultRule = marathon.DefaultTemplateRule - // default Consul - var defaultConsul consul.Provider - defaultConsul.Watch = true - defaultConsul.Endpoint = "127.0.0.1:8500" - defaultConsul.Prefix = "traefik" - - // default CatalogProvider - var defaultConsulCatalog consulcatalog.Provider - defaultConsulCatalog.Endpoint = "127.0.0.1:8500" - defaultConsulCatalog.ExposedByDefault = true - defaultConsulCatalog.Prefix = "traefik" - defaultConsulCatalog.FrontEndRule = "Host:{{.ServiceName}}.{{.Domain}}" - defaultConsulCatalog.Stale = false - - // default Etcd - var defaultEtcd etcd.Provider - defaultEtcd.Watch = true - defaultEtcd.Endpoint = "127.0.0.1:2379" - defaultEtcd.Prefix = "/traefik" - - // default Zookeeper - var defaultZookeeper zk.Provider - defaultZookeeper.Watch = true - defaultZookeeper.Endpoint = "127.0.0.1:2181" - defaultZookeeper.Prefix = "traefik" - - // default Boltdb - var defaultBoltDb boltdb.Provider - defaultBoltDb.Watch = true - defaultBoltDb.Endpoint = "127.0.0.1:4001" - defaultBoltDb.Prefix = "/traefik" - // default Kubernetes var defaultKubernetes ingress.Provider defaultKubernetes.Watch = true - // default Mesos - var defaultMesos mesos.Provider - defaultMesos.Watch = true - defaultMesos.Endpoint = "http://127.0.0.1:5050" - defaultMesos.ExposedByDefault = true - defaultMesos.RefreshSeconds = 30 - defaultMesos.ZkDetectionTimeout = 30 - defaultMesos.StateTimeoutSecond = 30 - - // default ECS - var defaultECS ecs.Provider - defaultECS.Watch = true - defaultECS.ExposedByDefault = true - defaultECS.AutoDiscoverClusters = false - defaultECS.Clusters = ecs.Clusters{"default"} - defaultECS.RefreshSeconds = 15 - - // default Rancher - var defaultRancher rancher.Provider - defaultRancher.Watch = true - defaultRancher.ExposedByDefault = true - defaultRancher.RefreshSeconds = 15 - - // default DynamoDB - var defaultDynamoDB dynamodb.Provider - defaultDynamoDB.RefreshSeconds = 15 - defaultDynamoDB.TableName = "traefik" - defaultDynamoDB.Watch = true - - // default Eureka - var defaultEureka eureka.Provider - defaultEureka.RefreshSeconds = parse.Duration(30 * time.Second) - defaultProviders := static.Providers{ - File: &defaultFile, - Docker: &defaultDocker, - Rest: &defaultRest, - Marathon: &defaultMarathon, - Consul: &defaultConsul, - ConsulCatalog: &defaultConsulCatalog, - Etcd: &defaultEtcd, - Zookeeper: &defaultZookeeper, - Boltdb: &defaultBoltDb, - Kubernetes: &defaultKubernetes, - Mesos: &defaultMesos, - ECS: &defaultECS, - Rancher: &defaultRancher, - Eureka: &defaultEureka, - DynamoDB: &defaultDynamoDB, + File: &defaultFile, + Docker: &defaultDocker, + Rest: &defaultRest, + Marathon: &defaultMarathon, + Kubernetes: &defaultKubernetes, } return &TraefikConfiguration{ diff --git a/cmd/convert/convert.go b/cmd/convert/convert.go deleted file mode 100644 index 829e8a8b3..000000000 --- a/cmd/convert/convert.go +++ /dev/null @@ -1,151 +0,0 @@ -package main - -import ( - "os" - "strings" - - "github.com/BurntSushi/toml" - "github.com/containous/traefik/old/log" - "github.com/containous/traefik/old/types" - "github.com/containous/traefik/pkg/config" - "github.com/sirupsen/logrus" -) - -var oldvalue = ` -[backends] - [backends.backend1] - [backends.backend1.servers.server1] - url = "http://127.0.0.1:9010" - weight = 1 - [backends.backend2] - [backends.backend2.servers.server1] - url = "http://127.0.0.1:9020" - weight = 1 - -[frontends] - [frontends.frontend1] - backend = "backend1" - [frontends.frontend1.routes.test_1] - rule = "Host:snitest.com" - [frontends.frontend2] - backend = "backend2" - [frontends.frontend2.routes.test_2] - rule = "Host:snitest.org" - -` - -// Temporary utility to convert dynamic conf v1 to v2 -func main() { - log.SetOutput(os.Stdout) - log.SetLevel(logrus.DebugLevel) - - oldConfig := &types.Configuration{} - _, err := toml.Decode(oldvalue, oldConfig) - if err != nil { - log.Fatal(err) - } - - newConfig := config.HTTPConfiguration{ - Routers: make(map[string]*config.Router), - Middlewares: make(map[string]*config.Middleware), - Services: make(map[string]*config.Service), - } - - for frontendName, frontend := range oldConfig.Frontends { - newConfig.Routers[replaceFrontend(frontendName)] = convertFrontend(frontend) - if frontend.PassHostHeader { - log.Warn("ignore PassHostHeader") - } - } - - for backendName, backend := range oldConfig.Backends { - newConfig.Services[replaceBackend(backendName)] = convertBackend(backend) - } - - encoder := toml.NewEncoder(os.Stdout) - err = encoder.Encode(newConfig) - if err != nil { - log.Fatal(err) - } -} - -func replaceBackend(name string) string { - return strings.Replace(name, "backend", "service", -1) -} - -func replaceFrontend(name string) string { - return strings.Replace(name, "frontend", "router", -1) -} - -func convertFrontend(frontend *types.Frontend) *config.Router { - router := &config.Router{ - EntryPoints: frontend.EntryPoints, - Middlewares: nil, - Service: replaceBackend(frontend.Backend), - Priority: frontend.Priority, - } - - if len(frontend.Routes) > 1 { - log.Fatal("Multiple routes") - } - - for _, route := range frontend.Routes { - router.Rule = route.Rule - } - - return router -} - -func convertBackend(backend *types.Backend) *config.Service { - service := &config.Service{ - LoadBalancer: &config.LoadBalancerService{ - Stickiness: nil, - Servers: nil, - Method: "", - HealthCheck: nil, - PassHostHeader: false, - }, - } - - if backend.Buffering != nil { - log.Warn("Buffering not implemented") - } - - if backend.CircuitBreaker != nil { - log.Warn("CircuitBreaker not implemented") - } - - if backend.MaxConn != nil { - log.Warn("MaxConn not implemented") - } - - for _, oldserver := range backend.Servers { - service.LoadBalancer.Servers = append(service.LoadBalancer.Servers, config.Server{ - URL: oldserver.URL, - Weight: oldserver.Weight, - }) - } - - if backend.LoadBalancer != nil { - service.LoadBalancer.Method = backend.LoadBalancer.Method - if backend.LoadBalancer.Stickiness != nil { - service.LoadBalancer.Stickiness = &config.Stickiness{ - CookieName: backend.LoadBalancer.Stickiness.CookieName, - } - } - - if backend.HealthCheck != nil { - service.LoadBalancer.HealthCheck = &config.HealthCheck{ - Scheme: backend.HealthCheck.Scheme, - Path: backend.HealthCheck.Path, - Port: backend.HealthCheck.Port, - Interval: backend.HealthCheck.Interval, - Timeout: backend.HealthCheck.Timeout, - Hostname: backend.HealthCheck.Hostname, - Headers: backend.HealthCheck.Headers, - } - } - } - - return service -} diff --git a/cmd/storeconfig/storeconfig.go b/cmd/storeconfig/storeconfig.go index 80d89af6d..fd7070f9f 100644 --- a/cmd/storeconfig/storeconfig.go +++ b/cmd/storeconfig/storeconfig.go @@ -5,7 +5,6 @@ import ( "fmt" stdlog "log" - "github.com/abronan/valkeyrie/store" "github.com/containous/flaeg" "github.com/containous/staert" "github.com/containous/traefik/cmd" @@ -18,6 +17,7 @@ func NewCmd(traefikConfiguration *cmd.TraefikConfiguration, traefikPointersConfi Description: `Stores the static traefik configuration into a Key-value stores. Traefik will not start.`, Config: traefikConfiguration, DefaultPointersConfig: traefikPointersConfiguration, + HideHelp: true, // TODO storeconfig Metadata: map[string]string{ "parseAllSources": "true", }, @@ -116,34 +116,35 @@ func Run(kv *staert.KvSource, traefikConfiguration *cmd.TraefikConfiguration) fu // TLS support is enable for Consul and Etcd backends func CreateKvSource(traefikConfiguration *cmd.TraefikConfiguration) (*staert.KvSource, error) { var kv *staert.KvSource - var kvStore store.Store + // var kvStore store.Store var err error - switch { - case traefikConfiguration.Providers.Consul != nil: - kvStore, err = traefikConfiguration.Providers.Consul.CreateStore() - kv = &staert.KvSource{ - Store: kvStore, - Prefix: traefikConfiguration.Providers.Consul.Prefix, - } - case traefikConfiguration.Providers.Etcd != nil: - kvStore, err = traefikConfiguration.Providers.Etcd.CreateStore() - kv = &staert.KvSource{ - Store: kvStore, - Prefix: traefikConfiguration.Providers.Etcd.Prefix, - } - case traefikConfiguration.Providers.Zookeeper != nil: - kvStore, err = traefikConfiguration.Providers.Zookeeper.CreateStore() - kv = &staert.KvSource{ - Store: kvStore, - Prefix: traefikConfiguration.Providers.Zookeeper.Prefix, - } - case traefikConfiguration.Providers.Boltdb != nil: - kvStore, err = traefikConfiguration.Providers.Boltdb.CreateStore() - kv = &staert.KvSource{ - Store: kvStore, - Prefix: traefikConfiguration.Providers.Boltdb.Prefix, - } - } + // TODO kv store + // switch { + // case traefikConfiguration.Providers.Consul != nil: + // kvStore, err = traefikConfiguration.Providers.Consul.CreateStore() + // kv = &staert.KvSource{ + // Store: kvStore, + // Prefix: traefikConfiguration.Providers.Consul.Prefix, + // } + // case traefikConfiguration.Providers.Etcd != nil: + // kvStore, err = traefikConfiguration.Providers.Etcd.CreateStore() + // kv = &staert.KvSource{ + // Store: kvStore, + // Prefix: traefikConfiguration.Providers.Etcd.Prefix, + // } + // case traefikConfiguration.Providers.Zookeeper != nil: + // kvStore, err = traefikConfiguration.Providers.Zookeeper.CreateStore() + // kv = &staert.KvSource{ + // Store: kvStore, + // Prefix: traefikConfiguration.Providers.Zookeeper.Prefix, + // } + // case traefikConfiguration.Providers.Boltdb != nil: + // kvStore, err = traefikConfiguration.Providers.Boltdb.CreateStore() + // kv = &staert.KvSource{ + // Store: kvStore, + // Prefix: traefikConfiguration.Providers.Boltdb.Prefix, + // } + // } return kv, err } diff --git a/cmd/traefik/traefik.go b/cmd/traefik/traefik.go index 0ac7cf095..afb0757b3 100644 --- a/cmd/traefik/traefik.go +++ b/cmd/traefik/traefik.go @@ -20,8 +20,6 @@ import ( "github.com/containous/traefik/cmd/healthcheck" "github.com/containous/traefik/cmd/storeconfig" cmdVersion "github.com/containous/traefik/cmd/version" - "github.com/containous/traefik/old/provider/ecs" - oldtypes "github.com/containous/traefik/old/types" "github.com/containous/traefik/pkg/collector" "github.com/containous/traefik/pkg/config" "github.com/containous/traefik/pkg/config/static" @@ -117,7 +115,6 @@ Complete documentation is available at https://traefik.io`, f.AddParser(reflect.TypeOf(traefiktls.FilesOrContents{}), &traefiktls.FilesOrContents{}) f.AddParser(reflect.TypeOf(types.Constraints{}), &types.Constraints{}) f.AddParser(reflect.TypeOf(k8s.Namespaces{}), &k8s.Namespaces{}) - f.AddParser(reflect.TypeOf(ecs.Clusters{}), &ecs.Clusters{}) f.AddParser(reflect.TypeOf([]types.Domain{}), &types.Domains{}) f.AddParser(reflect.TypeOf(types.DNSResolvers{}), &types.DNSResolvers{}) f.AddParser(reflect.TypeOf(types.Buckets{}), &types.Buckets{}) @@ -126,11 +123,6 @@ Complete documentation is available at https://traefik.io`, f.AddParser(reflect.TypeOf(types.FieldNames{}), &types.FieldNames{}) f.AddParser(reflect.TypeOf(types.FieldHeaderNames{}), &types.FieldHeaderNames{}) - // FIXME Remove with ACME - f.AddParser(reflect.TypeOf([]oldtypes.Domain{}), &oldtypes.Domains{}) - // FIXME Remove with old providers - f.AddParser(reflect.TypeOf(oldtypes.Constraints{}), &oldtypes.Constraints{}) - // add commands f.AddCommand(cmdVersion.NewCmd()) f.AddCommand(storeConfigCmd) diff --git a/integration/constraint_test.go b/integration/constraint_test.go deleted file mode 100644 index db45e7a3e..000000000 --- a/integration/constraint_test.go +++ /dev/null @@ -1,238 +0,0 @@ -package integration - -import ( - "fmt" - "net/http" - "time" - - "github.com/containous/traefik/integration/try" - "github.com/go-check/check" - "github.com/hashicorp/consul/api" - checker "github.com/vdemeester/shakers" -) - -// Constraint test suite -type ConstraintSuite struct { - BaseSuite - consulIP string - consulClient *api.Client -} - -func (s *ConstraintSuite) SetUpSuite(c *check.C) { - - s.createComposeProject(c, "constraints") - s.composeProject.Start(c) - - consul := s.composeProject.Container(c, "consul") - - s.consulIP = consul.NetworkSettings.IPAddress - config := api.DefaultConfig() - config.Address = s.consulIP + ":8500" - consulClient, err := api.NewClient(config) - if err != nil { - c.Fatalf("Error creating consul client") - } - s.consulClient = consulClient - - // Wait for consul to elect itself leader - err = try.Do(3*time.Second, func() error { - leader, errLeader := consulClient.Status().Leader() - - if errLeader != nil || len(leader) == 0 { - return fmt.Errorf("leader not found. %v", errLeader) - } - - return nil - }) - c.Assert(err, checker.IsNil) -} - -func (s *ConstraintSuite) registerService(name string, address string, port int, tags []string) error { - catalog := s.consulClient.Catalog() - _, err := catalog.Register( - &api.CatalogRegistration{ - Node: address, - Address: address, - Service: &api.AgentService{ - ID: name, - Service: name, - Address: address, - Port: port, - Tags: tags, - }, - }, - &api.WriteOptions{}, - ) - return err -} - -func (s *ConstraintSuite) deregisterService(name string, address string) error { - catalog := s.consulClient.Catalog() - _, err := catalog.Deregister( - &api.CatalogDeregistration{ - Node: address, - Address: address, - ServiceID: name, - }, - &api.WriteOptions{}, - ) - return err -} - -func (s *ConstraintSuite) TestMatchConstraintGlobal(c *check.C) { - cmd, display := s.traefikCmd( - withConfigFile("fixtures/consul_catalog/simple.toml"), - "--consulCatalog", - "--consulCatalog.endpoint="+s.consulIP+":8500", - "--consulCatalog.domain=consul.localhost", - "--constraints=tag==api") - defer display(c) - err := cmd.Start() - c.Assert(err, checker.IsNil) - defer cmd.Process.Kill() - - whoami := s.composeProject.Container(c, "whoami") - - err = s.registerService("test", whoami.NetworkSettings.IPAddress, 80, []string{"traefik.tags=api"}) - c.Assert(err, checker.IsNil, check.Commentf("Error registering service")) - defer s.deregisterService("test", whoami.NetworkSettings.IPAddress) - - req, err := http.NewRequest(http.MethodGet, "http://127.0.0.1:8000/", nil) - c.Assert(err, checker.IsNil) - req.Host = "test.consul.localhost" - - err = try.Request(req, 5*time.Second, try.StatusCodeIs(http.StatusOK)) - c.Assert(err, checker.IsNil) -} - -func (s *ConstraintSuite) TestDoesNotMatchConstraintGlobal(c *check.C) { - cmd, display := s.traefikCmd( - withConfigFile("fixtures/consul_catalog/simple.toml"), - "--consulCatalog", - "--consulCatalog.endpoint="+s.consulIP+":8500", - "--consulCatalog.domain=consul.localhost", - "--constraints=tag==api") - defer display(c) - err := cmd.Start() - c.Assert(err, checker.IsNil) - defer cmd.Process.Kill() - - whoami := s.composeProject.Container(c, "whoami") - - err = s.registerService("test", whoami.NetworkSettings.IPAddress, 80, []string{}) - c.Assert(err, checker.IsNil, check.Commentf("Error registering service")) - defer s.deregisterService("test", whoami.NetworkSettings.IPAddress) - - req, err := http.NewRequest(http.MethodGet, "http://127.0.0.1:8000/", nil) - c.Assert(err, checker.IsNil) - req.Host = "test.consul.localhost" - - err = try.Request(req, 5*time.Second, try.StatusCodeIs(http.StatusNotFound)) - c.Assert(err, checker.IsNil) -} - -func (s *ConstraintSuite) TestMatchConstraintProvider(c *check.C) { - cmd, display := s.traefikCmd( - withConfigFile("fixtures/consul_catalog/simple.toml"), - "--consulCatalog", - "--consulCatalog.endpoint="+s.consulIP+":8500", - "--consulCatalog.domain=consul.localhost", - "--consulCatalog.constraints=tag==api") - defer display(c) - err := cmd.Start() - c.Assert(err, checker.IsNil) - defer cmd.Process.Kill() - - whoami := s.composeProject.Container(c, "whoami") - - err = s.registerService("test", whoami.NetworkSettings.IPAddress, 80, []string{"traefik.tags=api"}) - c.Assert(err, checker.IsNil, check.Commentf("Error registering service")) - defer s.deregisterService("test", whoami.NetworkSettings.IPAddress) - - req, err := http.NewRequest(http.MethodGet, "http://127.0.0.1:8000/", nil) - c.Assert(err, checker.IsNil) - req.Host = "test.consul.localhost" - - err = try.Request(req, 5*time.Second, try.StatusCodeIs(http.StatusOK)) - c.Assert(err, checker.IsNil) -} - -func (s *ConstraintSuite) TestDoesNotMatchConstraintProvider(c *check.C) { - cmd, display := s.traefikCmd( - withConfigFile("fixtures/consul_catalog/simple.toml"), - "--consulCatalog", - "--consulCatalog.endpoint="+s.consulIP+":8500", - "--consulCatalog.domain=consul.localhost", - "--consulCatalog.constraints=tag==api") - defer display(c) - err := cmd.Start() - c.Assert(err, checker.IsNil) - defer cmd.Process.Kill() - - whoami := s.composeProject.Container(c, "whoami") - - err = s.registerService("test", whoami.NetworkSettings.IPAddress, 80, []string{}) - c.Assert(err, checker.IsNil, check.Commentf("Error registering service")) - defer s.deregisterService("test", whoami.NetworkSettings.IPAddress) - - req, err := http.NewRequest(http.MethodGet, "http://127.0.0.1:8000/", nil) - c.Assert(err, checker.IsNil) - req.Host = "test.consul.localhost" - - err = try.Request(req, 5*time.Second, try.StatusCodeIs(http.StatusNotFound)) - c.Assert(err, checker.IsNil) -} - -func (s *ConstraintSuite) TestMatchMultipleConstraint(c *check.C) { - cmd, display := s.traefikCmd( - withConfigFile("fixtures/consul_catalog/simple.toml"), - "--consulCatalog", - "--consulCatalog.endpoint="+s.consulIP+":8500", - "--consulCatalog.domain=consul.localhost", - "--consulCatalog.constraints=tag==api", - "--constraints=tag!=us-*") - defer display(c) - err := cmd.Start() - c.Assert(err, checker.IsNil) - defer cmd.Process.Kill() - - whoami := s.composeProject.Container(c, "whoami") - - err = s.registerService("test", whoami.NetworkSettings.IPAddress, 80, []string{"traefik.tags=api", "traefik.tags=eu-1"}) - c.Assert(err, checker.IsNil, check.Commentf("Error registering service")) - defer s.deregisterService("test", whoami.NetworkSettings.IPAddress) - - req, err := http.NewRequest(http.MethodGet, "http://127.0.0.1:8000/", nil) - c.Assert(err, checker.IsNil) - req.Host = "test.consul.localhost" - - err = try.Request(req, 5*time.Second, try.StatusCodeIs(http.StatusOK)) - c.Assert(err, checker.IsNil) -} - -func (s *ConstraintSuite) TestDoesNotMatchMultipleConstraint(c *check.C) { - cmd, display := s.traefikCmd( - withConfigFile("fixtures/consul_catalog/simple.toml"), - "--consulCatalog", - "--consulCatalog.endpoint="+s.consulIP+":8500", - "--consulCatalog.domain=consul.localhost", - "--consulCatalog.constraints=tag==api", - "--constraints=tag!=us-*") - defer display(c) - err := cmd.Start() - c.Assert(err, checker.IsNil) - defer cmd.Process.Kill() - - whoami := s.composeProject.Container(c, "whoami") - - err = s.registerService("test", whoami.NetworkSettings.IPAddress, 80, []string{"traefik.tags=api", "traefik.tags=us-1"}) - c.Assert(err, checker.IsNil, check.Commentf("Error registering service")) - defer s.deregisterService("test", whoami.NetworkSettings.IPAddress) - - req, err := http.NewRequest(http.MethodGet, "http://127.0.0.1:8000/", nil) - c.Assert(err, checker.IsNil) - req.Host = "test.consul.localhost" - - err = try.Request(req, 5*time.Second, try.StatusCodeIs(http.StatusNotFound)) - c.Assert(err, checker.IsNil) -} diff --git a/integration/consul_catalog_test.go b/integration/consul_catalog_test.go deleted file mode 100644 index f663a91fe..000000000 --- a/integration/consul_catalog_test.go +++ /dev/null @@ -1,725 +0,0 @@ -package integration - -import ( - "bytes" - "fmt" - "net/http" - "time" - - "github.com/containous/traefik/integration/try" - "github.com/containous/traefik/old/provider/label" - "github.com/go-check/check" - "github.com/hashicorp/consul/api" - checker "github.com/vdemeester/shakers" -) - -// Consul catalog test suites -type ConsulCatalogSuite struct { - BaseSuite - consulIP string - consulClient *api.Client -} - -func (s *ConsulCatalogSuite) SetUpSuite(c *check.C) { - - s.createComposeProject(c, "consul_catalog") - s.composeProject.Start(c) - - consul := s.composeProject.Container(c, "consul") - s.consulIP = consul.NetworkSettings.IPAddress - config := api.DefaultConfig() - config.Address = s.consulIP + ":8500" - s.createConsulClient(config, c) - - // Wait for consul to elect itself leader - err := s.waitToElectConsulLeader() - c.Assert(err, checker.IsNil) - -} - -func (s *ConsulCatalogSuite) waitToElectConsulLeader() error { - return try.Do(15*time.Second, func() error { - leader, err := s.consulClient.Status().Leader() - - if err != nil || len(leader) == 0 { - return fmt.Errorf("leader not found. %v", err) - } - - return nil - }) -} -func (s *ConsulCatalogSuite) createConsulClient(config *api.Config, c *check.C) *api.Client { - consulClient, err := api.NewClient(config) - if err != nil { - c.Fatalf("Error creating consul client. %v", err) - } - s.consulClient = consulClient - return consulClient -} - -func (s *ConsulCatalogSuite) registerService(name string, address string, port int, tags []string) error { - catalog := s.consulClient.Catalog() - _, err := catalog.Register( - &api.CatalogRegistration{ - Node: address, - Address: address, - Service: &api.AgentService{ - ID: name, - Service: name, - Address: address, - Port: port, - Tags: tags, - }, - }, - &api.WriteOptions{}, - ) - return err -} - -func (s *ConsulCatalogSuite) registerAgentService(name string, address string, port int, tags []string, withHealthCheck bool) error { - agent := s.consulClient.Agent() - var healthCheck *api.AgentServiceCheck - if withHealthCheck { - healthCheck = &api.AgentServiceCheck{ - HTTP: "http://" + address, - Interval: "10s", - Timeout: "3s", - } - } else { - healthCheck = nil - } - return agent.ServiceRegister( - &api.AgentServiceRegistration{ - ID: address, - Tags: tags, - Name: name, - Address: address, - Port: port, - Check: healthCheck, - }, - ) -} - -func (s *ConsulCatalogSuite) registerCheck(name string, address string, port int) error { - agent := s.consulClient.Agent() - checkRegistration := &api.AgentCheckRegistration{ - ID: fmt.Sprintf("%s-%s", name, address), - Name: name, - ServiceID: address, - } - checkRegistration.HTTP = fmt.Sprintf("http://%s:%d/health", address, port) - checkRegistration.Interval = "2s" - checkRegistration.CheckID = address - return agent.CheckRegister(checkRegistration) -} - -func (s *ConsulCatalogSuite) deregisterAgentService(address string) error { - agent := s.consulClient.Agent() - return agent.ServiceDeregister(address) -} - -func (s *ConsulCatalogSuite) deregisterService(name string, address string) error { - catalog := s.consulClient.Catalog() - _, err := catalog.Deregister( - &api.CatalogDeregistration{ - Node: address, - Address: address, - ServiceID: name, - }, - &api.WriteOptions{}, - ) - return err -} - -func (s *ConsulCatalogSuite) consulEnableServiceMaintenance(name string) error { - return s.consulClient.Agent().EnableServiceMaintenance(name, fmt.Sprintf("Maintenance mode for service %s", name)) -} - -func (s *ConsulCatalogSuite) consulDisableServiceMaintenance(name string) error { - return s.consulClient.Agent().DisableServiceMaintenance(name) -} - -func (s *ConsulCatalogSuite) consulEnableNodeMaintenance() error { - return s.consulClient.Agent().EnableNodeMaintenance("Maintenance mode for node") -} - -func (s *ConsulCatalogSuite) consulDisableNodeMaintenance() error { - return s.consulClient.Agent().DisableNodeMaintenance() -} - -func (s *ConsulCatalogSuite) TestSimpleConfiguration(c *check.C) { - cmd, display := s.traefikCmd( - withConfigFile("fixtures/consul_catalog/simple.toml"), - "--consulCatalog", - "--consulCatalog.endpoint="+s.consulIP+":8500") - defer display(c) - err := cmd.Start() - c.Assert(err, checker.IsNil) - defer cmd.Process.Kill() - - // TODO validate : run on 80 - // Expected a 404 as we did not configure anything - err = try.GetRequest("http://127.0.0.1:8000/", 500*time.Millisecond, try.StatusCodeIs(http.StatusNotFound)) - c.Assert(err, checker.IsNil) -} - -func (s *ConsulCatalogSuite) TestSingleService(c *check.C) { - cmd, display := s.traefikCmd( - withConfigFile("fixtures/consul_catalog/simple.toml"), - "--consulCatalog", - "--consulCatalog.endpoint="+s.consulIP+":8500", - "--consulCatalog.domain=consul.localhost") - defer display(c) - err := cmd.Start() - c.Assert(err, checker.IsNil) - defer cmd.Process.Kill() - - // Wait for Traefik to turn ready. - err = try.GetRequest("http://127.0.0.1:8000/", 2*time.Second, try.StatusCodeIs(http.StatusNotFound)) - c.Assert(err, checker.IsNil) - - whoami := s.composeProject.Container(c, "whoami1") - - err = s.registerService("test", whoami.NetworkSettings.IPAddress, 80, []string{}) - c.Assert(err, checker.IsNil, check.Commentf("Error registering service")) - - req, err := http.NewRequest(http.MethodGet, "http://127.0.0.1:8000/", nil) - c.Assert(err, checker.IsNil) - req.Host = "test.consul.localhost" - - err = try.Request(req, 10*time.Second, try.StatusCodeIs(http.StatusOK), try.HasBody()) - c.Assert(err, checker.IsNil) - - s.deregisterService("test", whoami.NetworkSettings.IPAddress) - err = try.Request(req, 10*time.Second, try.StatusCodeIs(http.StatusNotFound), try.HasBody()) - c.Assert(err, checker.IsNil) -} - -func (s *ConsulCatalogSuite) TestExposedByDefaultFalseSingleService(c *check.C) { - cmd, display := s.traefikCmd( - withConfigFile("fixtures/consul_catalog/simple.toml"), - "--consulCatalog", - "--consulCatalog.exposedByDefault=false", - "--consulCatalog.endpoint="+s.consulIP+":8500", - "--consulCatalog.domain=consul.localhost") - defer display(c) - err := cmd.Start() - c.Assert(err, checker.IsNil) - defer cmd.Process.Kill() - - whoami := s.composeProject.Container(c, "whoami1") - - err = s.registerService("test", whoami.NetworkSettings.IPAddress, 80, []string{}) - c.Assert(err, checker.IsNil, check.Commentf("Error registering service")) - defer s.deregisterService("test", whoami.NetworkSettings.IPAddress) - - req, err := http.NewRequest(http.MethodGet, "http://127.0.0.1:8000/", nil) - c.Assert(err, checker.IsNil) - req.Host = "test.consul.localhost" - - err = try.Request(req, 5*time.Second, try.StatusCodeIs(http.StatusNotFound), try.HasBody()) - c.Assert(err, checker.IsNil) -} - -func (s *ConsulCatalogSuite) TestExposedByDefaultFalseSimpleServiceMultipleNode(c *check.C) { - cmd, display := s.traefikCmd( - withConfigFile("fixtures/consul_catalog/simple.toml"), - "--consulCatalog", - "--consulCatalog.exposedByDefault=false", - "--consulCatalog.endpoint="+s.consulIP+":8500", - "--consulCatalog.domain=consul.localhost") - defer display(c) - err := cmd.Start() - c.Assert(err, checker.IsNil) - defer cmd.Process.Kill() - - whoami := s.composeProject.Container(c, "whoami1") - err = s.registerService("test", whoami.NetworkSettings.IPAddress, 80, []string{}) - c.Assert(err, checker.IsNil, check.Commentf("Error registering service")) - defer s.deregisterService("test", whoami.NetworkSettings.IPAddress) - - whoami2 := s.composeProject.Container(c, "whoami2") - err = s.registerService("test", whoami2.NetworkSettings.IPAddress, 80, []string{label.TraefikEnable + "=true"}) - c.Assert(err, checker.IsNil, check.Commentf("Error registering service")) - defer s.deregisterService("test", whoami2.NetworkSettings.IPAddress) - - req, err := http.NewRequest(http.MethodGet, "http://127.0.0.1:8000/", nil) - c.Assert(err, checker.IsNil) - req.Host = "test.consul.localhost" - - err = try.Request(req, 5*time.Second, try.StatusCodeIs(http.StatusOK), try.HasBody()) - c.Assert(err, checker.IsNil) -} - -func (s *ConsulCatalogSuite) TestExposedByDefaultTrueSimpleServiceMultipleNode(c *check.C) { - cmd, display := s.traefikCmd( - withConfigFile("fixtures/consul_catalog/simple.toml"), - "--consulCatalog", - "--consulCatalog.exposedByDefault=true", - "--consulCatalog.endpoint="+s.consulIP+":8500", - "--consulCatalog.domain=consul.localhost") - defer display(c) - err := cmd.Start() - c.Assert(err, checker.IsNil) - defer cmd.Process.Kill() - - whoami := s.composeProject.Container(c, "whoami1") - whoami2 := s.composeProject.Container(c, "whoami2") - - err = s.registerService("test", whoami.NetworkSettings.IPAddress, 80, []string{"name=whoami1"}) - c.Assert(err, checker.IsNil, check.Commentf("Error registering service")) - defer s.deregisterService("test", whoami.NetworkSettings.IPAddress) - - err = s.registerService("test", whoami2.NetworkSettings.IPAddress, 80, []string{"name=whoami2"}) - c.Assert(err, checker.IsNil, check.Commentf("Error registering service")) - defer s.deregisterService("test", whoami2.NetworkSettings.IPAddress) - - req, err := http.NewRequest(http.MethodGet, "http://127.0.0.1:8000/", nil) - c.Assert(err, checker.IsNil) - req.Host = "test.consul.localhost" - - err = try.Request(req, 5*time.Second, try.StatusCodeIs(http.StatusOK), try.HasBody()) - c.Assert(err, checker.IsNil) - - err = try.GetRequest("http://127.0.0.1:8080/api/providers/consul_catalog/backends", 60*time.Second, - try.BodyContains(whoami.NetworkSettings.IPAddress, whoami2.NetworkSettings.IPAddress)) - c.Assert(err, checker.IsNil) -} - -func (s *ConsulCatalogSuite) TestRefreshConfigWithMultipleNodeWithoutHealthCheck(c *check.C) { - cmd, display := s.traefikCmd( - withConfigFile("fixtures/consul_catalog/simple.toml"), - "--consulCatalog", - "--consulCatalog.exposedByDefault=true", - "--consulCatalog.endpoint="+s.consulIP+":8500", - "--consulCatalog.domain=consul.localhost") - defer display(c) - err := cmd.Start() - c.Assert(err, checker.IsNil) - defer cmd.Process.Kill() - - whoami := s.composeProject.Container(c, "whoami1") - whoami2 := s.composeProject.Container(c, "whoami2") - - err = s.registerService("test", whoami.NetworkSettings.IPAddress, 80, []string{"name=whoami1"}) - c.Assert(err, checker.IsNil, check.Commentf("Error registering service")) - defer s.deregisterService("test", whoami.NetworkSettings.IPAddress) - - err = s.registerAgentService("test", whoami.NetworkSettings.IPAddress, 80, []string{"name=whoami1"}, true) - c.Assert(err, checker.IsNil, check.Commentf("Error registering agent service")) - defer s.deregisterAgentService(whoami.NetworkSettings.IPAddress) - - req, err := http.NewRequest(http.MethodGet, "http://127.0.0.1:8000/", nil) - c.Assert(err, checker.IsNil) - req.Host = "test.consul.localhost" - - err = try.Request(req, 5*time.Second, try.StatusCodeIs(http.StatusOK), try.HasBody()) - c.Assert(err, checker.IsNil) - - err = try.GetRequest("http://127.0.0.1:8080/api/providers/consul_catalog/backends", 60*time.Second, - try.BodyContains(whoami.NetworkSettings.IPAddress)) - c.Assert(err, checker.IsNil) - - err = s.registerService("test", whoami2.NetworkSettings.IPAddress, 80, []string{"name=whoami2"}) - c.Assert(err, checker.IsNil, check.Commentf("Error registering service")) - - err = try.GetRequest("http://127.0.0.1:8080/api/providers/consul_catalog/backends", 60*time.Second, - try.BodyContains(whoami.NetworkSettings.IPAddress, whoami2.NetworkSettings.IPAddress)) - c.Assert(err, checker.IsNil) - - s.deregisterService("test", whoami2.NetworkSettings.IPAddress) - - err = try.GetRequest("http://127.0.0.1:8080/api/providers/consul_catalog/backends", 60*time.Second, - try.BodyContains(whoami.NetworkSettings.IPAddress)) - c.Assert(err, checker.IsNil) - - err = s.registerService("test", whoami2.NetworkSettings.IPAddress, 80, []string{"name=whoami2"}) - c.Assert(err, checker.IsNil, check.Commentf("Error registering service")) - defer s.deregisterService("test", whoami2.NetworkSettings.IPAddress) - - err = try.GetRequest("http://127.0.0.1:8080/api/providers/consul_catalog/backends", 60*time.Second, - try.BodyContains(whoami.NetworkSettings.IPAddress, whoami2.NetworkSettings.IPAddress)) - c.Assert(err, checker.IsNil) -} - -func (s *ConsulCatalogSuite) TestBasicAuthSimpleService(c *check.C) { - cmd, display := s.traefikCmd( - withConfigFile("fixtures/consul_catalog/simple.toml"), - "--consulCatalog", - "--consulCatalog.exposedByDefault=true", - "--consulCatalog.endpoint="+s.consulIP+":8500", - "--consulCatalog.domain=consul.localhost") - defer display(c) - err := cmd.Start() - c.Assert(err, checker.IsNil) - defer cmd.Process.Kill() - - whoami := s.composeProject.Container(c, "whoami1") - - err = s.registerService("test", whoami.NetworkSettings.IPAddress, 80, []string{ - label.TraefikFrontendAuthBasic + "=test:$2a$06$O5NksJPAcgrC9MuANkSoE.Xe9DSg7KcLLFYNr1Lj6hPcMmvgwxhme,test2:$2y$10$xP1SZ70QbZ4K2bTGKJOhpujkpcLxQcB3kEPF6XAV19IdcqsZTyDEe", - }) - c.Assert(err, checker.IsNil, check.Commentf("Error registering service")) - defer s.deregisterService("test", whoami.NetworkSettings.IPAddress) - - req, err := http.NewRequest(http.MethodGet, "http://127.0.0.1:8000/", nil) - c.Assert(err, checker.IsNil) - req.Host = "test.consul.localhost" - - err = try.Request(req, 5*time.Second, try.StatusCodeIs(http.StatusUnauthorized), try.HasBody()) - c.Assert(err, checker.IsNil) - - req.SetBasicAuth("test", "test") - err = try.Request(req, 5*time.Second, try.StatusCodeIs(http.StatusOK), try.HasBody()) - c.Assert(err, checker.IsNil) - - req.SetBasicAuth("test2", "test2") - err = try.Request(req, 5*time.Second, try.StatusCodeIs(http.StatusOK), try.HasBody()) - c.Assert(err, checker.IsNil) -} - -func (s *ConsulCatalogSuite) TestRefreshConfigTagChange(c *check.C) { - cmd, display := s.traefikCmd( - withConfigFile("fixtures/consul_catalog/simple.toml"), - "--consulCatalog", - "--consulCatalog.exposedByDefault=false", - "--consulCatalog.watch=true", - "--consulCatalog.endpoint="+s.consulIP+":8500", - "--consulCatalog.domain=consul.localhost") - defer display(c) - err := cmd.Start() - c.Assert(err, checker.IsNil) - defer cmd.Process.Kill() - - whoami := s.composeProject.Container(c, "whoami1") - - err = s.registerService("test", whoami.NetworkSettings.IPAddress, 80, - []string{"name=whoami1", label.TraefikEnable + "=false", label.TraefikBackendCircuitBreakerExpression + "=NetworkErrorRatio() > 0.5"}) - c.Assert(err, checker.IsNil, check.Commentf("Error registering service")) - defer s.deregisterService("test", whoami.NetworkSettings.IPAddress) - - err = try.GetRequest("http://127.0.0.1:8080/api/providers/consul_catalog/backends", 5*time.Second, - try.BodyContains(whoami.NetworkSettings.IPAddress)) - c.Assert(err, checker.NotNil) - - err = s.registerService("test", whoami.NetworkSettings.IPAddress, 80, - []string{"name=whoami1", label.TraefikEnable + "=true", label.TraefikBackendCircuitBreakerExpression + "=ResponseCodeRatio(500, 600, 0, 600) > 0.5"}) - c.Assert(err, checker.IsNil, check.Commentf("Error registering service")) - - req, err := http.NewRequest(http.MethodGet, "http://127.0.0.1:8000/", nil) - c.Assert(err, checker.IsNil) - req.Host = "test.consul.localhost" - - err = try.Request(req, 20*time.Second, try.StatusCodeIs(http.StatusOK), try.HasBody()) - c.Assert(err, checker.IsNil) - - err = try.GetRequest("http://127.0.0.1:8080/api/providers/consul_catalog/backends", 60*time.Second, - try.BodyContains(whoami.NetworkSettings.IPAddress)) - c.Assert(err, checker.IsNil) -} - -func (s *ConsulCatalogSuite) TestCircuitBreaker(c *check.C) { - cmd, display := s.traefikCmd( - withConfigFile("fixtures/consul_catalog/simple.toml"), - "--retry", - "--retry.attempts=1", - "--forwardingTimeouts.dialTimeout=5s", - "--forwardingTimeouts.responseHeaderTimeout=10s", - "--consulCatalog", - "--consulCatalog.exposedByDefault=false", - "--consulCatalog.watch=true", - "--consulCatalog.endpoint="+s.consulIP+":8500", - "--consulCatalog.domain=consul.localhost") - defer display(c) - err := cmd.Start() - c.Assert(err, checker.IsNil) - defer cmd.Process.Kill() - - whoami := s.composeProject.Container(c, "whoami1") - err = s.registerService("test", whoami.NetworkSettings.IPAddress, 80, - []string{"name=whoami1", label.TraefikEnable + "=true", label.TraefikBackendCircuitBreakerExpression + "=NetworkErrorRatio() > 0.5"}) - c.Assert(err, checker.IsNil, check.Commentf("Error registering service")) - defer s.deregisterService("test", whoami.NetworkSettings.IPAddress) - - whoami2 := s.composeProject.Container(c, "whoami2") - err = s.registerService("test", whoami2.NetworkSettings.IPAddress, 42, - []string{"name=whoami2", label.TraefikEnable + "=true", label.TraefikBackendCircuitBreakerExpression + "=NetworkErrorRatio() > 0.5"}) - c.Assert(err, checker.IsNil, check.Commentf("Error registering service")) - defer s.deregisterService("test", whoami2.NetworkSettings.IPAddress) - - whoami3 := s.composeProject.Container(c, "whoami3") - err = s.registerService("test", whoami3.NetworkSettings.IPAddress, 42, - []string{"name=whoami3", label.TraefikEnable + "=true", label.TraefikBackendCircuitBreakerExpression + "=NetworkErrorRatio() > 0.5"}) - c.Assert(err, checker.IsNil, check.Commentf("Error registering service")) - defer s.deregisterService("test", whoami3.NetworkSettings.IPAddress) - - req, err := http.NewRequest(http.MethodGet, "http://127.0.0.1:8000/", nil) - c.Assert(err, checker.IsNil) - req.Host = "test.consul.localhost" - - err = try.Request(req, 20*time.Second, try.StatusCodeIs(http.StatusServiceUnavailable), try.HasBody()) - c.Assert(err, checker.IsNil) -} - -func (s *ConsulCatalogSuite) TestRefreshConfigPortChange(c *check.C) { - cmd, display := s.traefikCmd( - withConfigFile("fixtures/consul_catalog/simple.toml"), - "--consulCatalog", - "--consulCatalog.exposedByDefault=false", - "--consulCatalog.watch=true", - "--consulCatalog.endpoint="+s.consulIP+":8500", - "--consulCatalog.domain=consul.localhost") - defer display(c) - err := cmd.Start() - c.Assert(err, checker.IsNil) - defer cmd.Process.Kill() - - whoami := s.composeProject.Container(c, "whoami1") - - err = s.registerService("test", whoami.NetworkSettings.IPAddress, 81, []string{"name=whoami1", "traefik.enable=true"}) - c.Assert(err, checker.IsNil, check.Commentf("Error registering service")) - - req, err := http.NewRequest(http.MethodGet, "http://127.0.0.1:8000/", nil) - c.Assert(err, checker.IsNil) - req.Host = "test.consul.localhost" - - err = try.Request(req, 20*time.Second, try.StatusCodeIs(http.StatusBadGateway)) - c.Assert(err, checker.IsNil) - - err = try.GetRequest("http://127.0.0.1:8080/api/providers/consul_catalog/backends", 5*time.Second, try.BodyContains(whoami.NetworkSettings.IPAddress)) - c.Assert(err, checker.IsNil) - - err = s.registerService("test", whoami.NetworkSettings.IPAddress, 80, []string{"name=whoami1", label.TraefikEnable + "=true"}) - c.Assert(err, checker.IsNil, check.Commentf("Error registering service")) - - defer s.deregisterService("test", whoami.NetworkSettings.IPAddress) - - err = try.GetRequest("http://127.0.0.1:8080/api/providers/consul_catalog/backends", 60*time.Second, try.BodyContains(whoami.NetworkSettings.IPAddress)) - c.Assert(err, checker.IsNil) - - err = try.Request(req, 20*time.Second, try.StatusCodeIs(http.StatusOK), try.HasBody()) - c.Assert(err, checker.IsNil) -} - -func (s *ConsulCatalogSuite) TestRetryWithConsulServer(c *check.C) { - // Scale consul to 0 to be able to start traefik before and test retry - s.composeProject.Scale(c, "consul", 0) - - cmd, display := s.traefikCmd( - withConfigFile("fixtures/consul_catalog/simple.toml"), - "--consulCatalog", - "--consulCatalog.watch=false", - "--consulCatalog.exposedByDefault=true", - "--consulCatalog.endpoint="+s.consulIP+":8500", - "--consulCatalog.domain=consul.localhost") - defer display(c) - err := cmd.Start() - c.Assert(err, checker.IsNil) - defer cmd.Process.Kill() - - // Wait for Traefik to turn ready. - err = try.GetRequest("http://127.0.0.1:8000/", 2*time.Second, try.StatusCodeIs(http.StatusNotFound)) - c.Assert(err, checker.IsNil) - - req, err := http.NewRequest(http.MethodGet, "http://127.0.0.1:8000/", nil) - c.Assert(err, checker.IsNil) - req.Host = "test.consul.localhost" - - // Request should fail - err = try.Request(req, 2*time.Second, try.StatusCodeIs(http.StatusNotFound), try.HasBody()) - c.Assert(err, checker.IsNil) - - // Scale consul to 1 - s.composeProject.Scale(c, "consul", 1) - err = s.waitToElectConsulLeader() - c.Assert(err, checker.IsNil) - - whoami := s.composeProject.Container(c, "whoami1") - // Register service - err = s.registerService("test", whoami.NetworkSettings.IPAddress, 80, []string{}) - c.Assert(err, checker.IsNil, check.Commentf("Error registering service")) - - // Provider consul catalog should be present - err = try.GetRequest("http://127.0.0.1:8080/api/providers", 10*time.Second, try.BodyContains("consul_catalog")) - c.Assert(err, checker.IsNil) - - // Should be ok - err = try.Request(req, 10*time.Second, try.StatusCodeIs(http.StatusOK), try.HasBody()) - c.Assert(err, checker.IsNil) -} - -func (s *ConsulCatalogSuite) TestServiceWithMultipleHealthCheck(c *check.C) { - // Scale consul to 0 to be able to start traefik before and test retry - s.composeProject.Scale(c, "consul", 0) - - cmd, display := s.traefikCmd( - withConfigFile("fixtures/consul_catalog/simple.toml"), - "--consulCatalog", - "--consulCatalog.watch=false", - "--consulCatalog.exposedByDefault=true", - "--consulCatalog.endpoint="+s.consulIP+":8500", - "--consulCatalog.domain=consul.localhost") - defer display(c) - err := cmd.Start() - c.Assert(err, checker.IsNil) - defer cmd.Process.Kill() - - // Wait for Traefik to turn ready. - err = try.GetRequest("http://127.0.0.1:8000/", 2*time.Second, try.StatusCodeIs(http.StatusNotFound)) - c.Assert(err, checker.IsNil) - - req, err := http.NewRequest(http.MethodGet, "http://127.0.0.1:8000/", nil) - c.Assert(err, checker.IsNil) - req.Host = "test.consul.localhost" - - // Request should fail - err = try.Request(req, 2*time.Second, try.StatusCodeIs(http.StatusNotFound), try.HasBody()) - c.Assert(err, checker.IsNil) - - // Scale consul to 1 - s.composeProject.Scale(c, "consul", 1) - err = s.waitToElectConsulLeader() - c.Assert(err, checker.IsNil) - - whoami := s.composeProject.Container(c, "whoami1") - // Register service - err = s.registerAgentService("test", whoami.NetworkSettings.IPAddress, 80, []string{"name=whoami1"}, true) - c.Assert(err, checker.IsNil, check.Commentf("Error registering agent service")) - defer s.deregisterAgentService(whoami.NetworkSettings.IPAddress) - - // Register one healthcheck - err = s.registerCheck("test", whoami.NetworkSettings.IPAddress, 80) - c.Assert(err, checker.IsNil, check.Commentf("Error registering check")) - - // Provider consul catalog should be present - err = try.GetRequest("http://127.0.0.1:8080/api/providers", 10*time.Second, try.BodyContains("consul_catalog")) - c.Assert(err, checker.IsNil) - - // Should be ok - err = try.Request(req, 10*time.Second, try.StatusCodeIs(http.StatusOK), try.HasBody()) - c.Assert(err, checker.IsNil) - - // Change health value of service to critical - reqHealth, err := http.NewRequest(http.MethodPost, fmt.Sprintf("http://%s:80/health", whoami.NetworkSettings.IPAddress), bytes.NewBuffer([]byte("500"))) - c.Assert(err, checker.IsNil) - reqHealth.Host = "test.consul.localhost" - - err = try.Request(reqHealth, 10*time.Second, try.StatusCodeIs(http.StatusOK)) - c.Assert(err, checker.IsNil) - - // Should be a 404 - err = try.Request(req, 10*time.Second, try.StatusCodeIs(http.StatusNotFound), try.HasBody()) - c.Assert(err, checker.IsNil) - - // Change health value of service to passing - reqHealth, err = http.NewRequest(http.MethodPost, fmt.Sprintf("http://%s:80/health", whoami.NetworkSettings.IPAddress), bytes.NewBuffer([]byte("200"))) - c.Assert(err, checker.IsNil) - err = try.Request(reqHealth, 10*time.Second, try.StatusCodeIs(http.StatusOK)) - c.Assert(err, checker.IsNil) - - // Should be a 200 - err = try.Request(req, 10*time.Second, try.StatusCodeIs(http.StatusOK), try.HasBody()) - c.Assert(err, checker.IsNil) -} - -func (s *ConsulCatalogSuite) TestMaintenanceMode(c *check.C) { - cmd, display := s.traefikCmd( - withConfigFile("fixtures/consul_catalog/simple.toml"), - "--consulCatalog", - "--consulCatalog.endpoint="+s.consulIP+":8500", - "--consulCatalog.domain=consul.localhost") - defer display(c) - err := cmd.Start() - c.Assert(err, checker.IsNil) - defer cmd.Process.Kill() - - // Wait for Traefik to turn ready. - err = try.GetRequest("http://127.0.0.1:8000/", 2*time.Second, try.StatusCodeIs(http.StatusNotFound)) - c.Assert(err, checker.IsNil) - - whoami := s.composeProject.Container(c, "whoami1") - - err = s.registerAgentService("test", whoami.NetworkSettings.IPAddress, 80, []string{}, false) - c.Assert(err, checker.IsNil, check.Commentf("Error registering service")) - - req, err := http.NewRequest(http.MethodGet, "http://127.0.0.1:8000/", nil) - c.Assert(err, checker.IsNil) - req.Host = "test.consul.localhost" - - err = try.Request(req, 10*time.Second, try.StatusCodeIs(http.StatusOK), try.HasBody()) - c.Assert(err, checker.IsNil) - - // Enable service maintenance mode - err = s.consulEnableServiceMaintenance(whoami.NetworkSettings.IPAddress) - c.Assert(err, checker.IsNil) - - err = try.Request(req, 10*time.Second, try.StatusCodeIs(http.StatusNotFound), try.HasBody()) - c.Assert(err, checker.IsNil) - - // Disable service maintenance mode - err = s.consulDisableServiceMaintenance(whoami.NetworkSettings.IPAddress) - c.Assert(err, checker.IsNil) - - err = try.Request(req, 10*time.Second, try.StatusCodeIs(http.StatusOK), try.HasBody()) - c.Assert(err, checker.IsNil) - - // Enable node maintenance mode - err = s.consulEnableNodeMaintenance() - c.Assert(err, checker.IsNil) - - err = try.Request(req, 10*time.Second, try.StatusCodeIs(http.StatusNotFound), try.HasBody()) - c.Assert(err, checker.IsNil) - - // Disable node maintenance mode - err = s.consulDisableNodeMaintenance() - c.Assert(err, checker.IsNil) - - err = try.Request(req, 10*time.Second, try.StatusCodeIs(http.StatusOK), try.HasBody()) - c.Assert(err, checker.IsNil) -} - -func (s *ConsulCatalogSuite) TestMultipleFrontendRule(c *check.C) { - cmd, display := s.traefikCmd( - withConfigFile("fixtures/consul_catalog/simple.toml"), - "--consulCatalog", - "--consulCatalog.endpoint="+s.consulIP+":8500", - "--consulCatalog.domain=consul.localhost") - defer display(c) - err := cmd.Start() - c.Assert(err, checker.IsNil) - defer cmd.Process.Kill() - - // Wait for Traefik to turn ready. - err = try.GetRequest("http://127.0.0.1:8000/", 2*time.Second, try.StatusCodeIs(http.StatusNotFound)) - c.Assert(err, checker.IsNil) - - whoami := s.composeProject.Container(c, "whoami1") - - err = s.registerService("test", whoami.NetworkSettings.IPAddress, 80, - []string{ - "traefik.frontends.service1.rule=Host:whoami1.consul.localhost", - "traefik.frontends.service2.rule=Host:whoami2.consul.localhost", - }) - c.Assert(err, checker.IsNil, check.Commentf("Error registering service")) - - req, err := http.NewRequest(http.MethodGet, "http://127.0.0.1:8000/", nil) - c.Assert(err, checker.IsNil) - req.Host = "test.consul.localhost" - - err = try.Request(req, 10*time.Second, try.StatusCodeIs(http.StatusOK), try.HasBody()) - c.Assert(err, checker.IsNil) - - req, err = http.NewRequest(http.MethodGet, "http://127.0.0.1:8000/", nil) - c.Assert(err, checker.IsNil) - req.Host = "whoami1.consul.localhost" - - err = try.Request(req, 10*time.Second, try.StatusCodeIs(http.StatusOK), try.HasBody()) - c.Assert(err, checker.IsNil) - - req, err = http.NewRequest(http.MethodGet, "http://127.0.0.1:8000/", nil) - c.Assert(err, checker.IsNil) - req.Host = "whoami2.consul.localhost" - - err = try.Request(req, 10*time.Second, try.StatusCodeIs(http.StatusOK), try.HasBody()) - c.Assert(err, checker.IsNil) -} diff --git a/integration/consul_test.go b/integration/consul_test.go deleted file mode 100644 index 702e9bd68..000000000 --- a/integration/consul_test.go +++ /dev/null @@ -1,584 +0,0 @@ -package integration - -import ( - "context" - "crypto/tls" - "fmt" - "io/ioutil" - "net/http" - "os" - "sync" - "time" - - "github.com/abronan/valkeyrie" - "github.com/abronan/valkeyrie/store" - "github.com/abronan/valkeyrie/store/consul" - "github.com/containous/staert" - "github.com/containous/traefik/integration/try" - "github.com/containous/traefik/old/cluster" - "github.com/go-check/check" - checker "github.com/vdemeester/shakers" -) - -// Consul test suites (using libcompose) -type ConsulSuite struct { - BaseSuite - kv store.Store -} - -func (s *ConsulSuite) setupConsul(c *check.C) { - s.createComposeProject(c, "consul") - s.composeProject.Start(c) - - consul.Register() - kv, err := valkeyrie.NewStore( - store.CONSUL, - []string{s.composeProject.Container(c, "consul").NetworkSettings.IPAddress + ":8500"}, - &store.Config{ - ConnectionTimeout: 10 * time.Second, - }, - ) - if err != nil { - c.Fatal("Cannot create store consul") - } - s.kv = kv - - // wait for consul - err = try.Do(60*time.Second, try.KVExists(kv, "test")) - c.Assert(err, checker.IsNil) -} - -func (s *ConsulSuite) TearDownTest(c *check.C) { - // shutdown and delete compose project - if s.composeProject != nil { - s.composeProject.Stop(c) - } -} - -func (s *ConsulSuite) TearDownSuite(c *check.C) {} - -func (s *ConsulSuite) TestSimpleConfiguration(c *check.C) { - s.setupConsul(c) - consulHost := s.composeProject.Container(c, "consul").NetworkSettings.IPAddress - file := s.adaptFile(c, "fixtures/consul/simple.toml", struct{ ConsulHost string }{consulHost}) - defer os.Remove(file) - - cmd, display := s.traefikCmd(withConfigFile(file)) - defer display(c) - err := cmd.Start() - c.Assert(err, checker.IsNil) - defer cmd.Process.Kill() - - // Expected a 404 as we did not configure anything - err = try.GetRequest("http://127.0.0.1:8000/", 1*time.Second, try.StatusCodeIs(http.StatusNotFound)) - c.Assert(err, checker.IsNil) -} - -func (s *ConsulSuite) TestNominalConfiguration(c *check.C) { - s.setupConsul(c) - consulHost := s.composeProject.Container(c, "consul").NetworkSettings.IPAddress - file := s.adaptFile(c, "fixtures/consul/simple.toml", struct{ ConsulHost string }{consulHost}) - defer os.Remove(file) - - cmd, display := s.traefikCmd(withConfigFile(file)) - defer display(c) - err := cmd.Start() - c.Assert(err, checker.IsNil) - defer cmd.Process.Kill() - - whoami1IP := s.composeProject.Container(c, "whoami1").NetworkSettings.IPAddress - whoami2IP := s.composeProject.Container(c, "whoami2").NetworkSettings.IPAddress - whoami3IP := s.composeProject.Container(c, "whoami3").NetworkSettings.IPAddress - whoami4IP := s.composeProject.Container(c, "whoami4").NetworkSettings.IPAddress - - backend1 := map[string]string{ - "traefik/backends/backend1/circuitbreaker/expression": "NetworkErrorRatio() > 0.5", - "traefik/backends/backend1/servers/server1/url": "http://" + whoami1IP + ":80", - "traefik/backends/backend1/servers/server1/weight": "10", - "traefik/backends/backend1/servers/server2/url": "http://" + whoami2IP + ":80", - "traefik/backends/backend1/servers/server2/weight": "1", - } - backend2 := map[string]string{ - "traefik/backends/backend2/loadbalancer/method": "drr", - "traefik/backends/backend2/servers/server1/url": "http://" + whoami3IP + ":80", - "traefik/backends/backend2/servers/server1/weight": "1", - "traefik/backends/backend2/servers/server2/url": "http://" + whoami4IP + ":80", - "traefik/backends/backend2/servers/server2/weight": "2", - } - frontend1 := map[string]string{ - "traefik/frontends/frontend1/backend": "backend2", - "traefik/frontends/frontend1/entrypoints": "http", - "traefik/frontends/frontend1/priority": "1", - "traefik/frontends/frontend1/routes/test_1/rule": "Host:test.localhost", - } - frontend2 := map[string]string{ - "traefik/frontends/frontend2/backend": "backend1", - "traefik/frontends/frontend2/entrypoints": "http", - "traefik/frontends/frontend2/priority": "10", - "traefik/frontends/frontend2/routes/test_2/rule": "Path:/test", - } - for key, value := range backend1 { - err := s.kv.Put(key, []byte(value), nil) - c.Assert(err, checker.IsNil) - } - for key, value := range backend2 { - err := s.kv.Put(key, []byte(value), nil) - c.Assert(err, checker.IsNil) - } - for key, value := range frontend1 { - err := s.kv.Put(key, []byte(value), nil) - c.Assert(err, checker.IsNil) - } - for key, value := range frontend2 { - err := s.kv.Put(key, []byte(value), nil) - c.Assert(err, checker.IsNil) - } - - // wait for consul - err = try.Do(60*time.Second, try.KVExists(s.kv, "traefik/frontends/frontend2/routes/test_2/rule")) - c.Assert(err, checker.IsNil) - - // wait for traefik - err = try.GetRequest("http://127.0.0.1:8081/api/providers", 60*time.Second, try.BodyContains("Path:/test")) - c.Assert(err, checker.IsNil) - - req, err := http.NewRequest(http.MethodGet, "http://127.0.0.1:8000/", nil) - c.Assert(err, checker.IsNil) - req.Host = "test.localhost" - - err = try.Request(req, 500*time.Millisecond, - try.StatusCodeIs(http.StatusOK), - try.BodyContainsOr(whoami3IP, whoami4IP)) - c.Assert(err, checker.IsNil) - - req, err = http.NewRequest(http.MethodGet, "http://127.0.0.1:8000/test", nil) - c.Assert(err, checker.IsNil) - - err = try.Request(req, 500*time.Millisecond, - try.StatusCodeIs(http.StatusOK), - try.BodyContainsOr(whoami1IP, whoami2IP)) - c.Assert(err, checker.IsNil) - - req, err = http.NewRequest(http.MethodGet, "http://127.0.0.1:8000/test2", nil) - c.Assert(err, checker.IsNil) - err = try.Request(req, 500*time.Millisecond, try.StatusCodeIs(http.StatusNotFound)) - c.Assert(err, checker.IsNil) - - req, err = http.NewRequest(http.MethodGet, "http://127.0.0.1:8000/", nil) - c.Assert(err, checker.IsNil) - req.Host = "test2.localhost" - err = try.Request(req, 500*time.Millisecond, try.StatusCodeIs(http.StatusNotFound)) - c.Assert(err, checker.IsNil) -} - -func (s *ConsulSuite) TestGlobalConfiguration(c *check.C) { - s.setupConsul(c) - consulHost := s.composeProject.Container(c, "consul").NetworkSettings.IPAddress - err := s.kv.Put("traefik/entrypoints/http/address", []byte(":8001"), nil) - c.Assert(err, checker.IsNil) - - // wait for consul - err = try.Do(60*time.Second, try.KVExists(s.kv, "traefik/entrypoints/http/address")) - c.Assert(err, checker.IsNil) - - // start traefik - cmd, display := s.traefikCmd( - withConfigFile("fixtures/simple_web.toml"), - "--consul", - "--consul.endpoint="+consulHost+":8500") - defer display(c) - - err = cmd.Start() - c.Assert(err, checker.IsNil) - defer cmd.Process.Kill() - - whoami1IP := s.composeProject.Container(c, "whoami1").NetworkSettings.IPAddress - whoami2IP := s.composeProject.Container(c, "whoami2").NetworkSettings.IPAddress - whoami3IP := s.composeProject.Container(c, "whoami3").NetworkSettings.IPAddress - whoami4IP := s.composeProject.Container(c, "whoami4").NetworkSettings.IPAddress - - backend1 := map[string]string{ - "traefik/backends/backend1/circuitbreaker/expression": "NetworkErrorRatio() > 0.5", - "traefik/backends/backend1/servers/server1/url": "http://" + whoami1IP + ":80", - "traefik/backends/backend1/servers/server1/weight": "10", - "traefik/backends/backend1/servers/server2/url": "http://" + whoami2IP + ":80", - "traefik/backends/backend1/servers/server2/weight": "1", - } - backend2 := map[string]string{ - "traefik/backends/backend2/loadbalancer/method": "drr", - "traefik/backends/backend2/servers/server1/url": "http://" + whoami3IP + ":80", - "traefik/backends/backend2/servers/server1/weight": "1", - "traefik/backends/backend2/servers/server2/url": "http://" + whoami4IP + ":80", - "traefik/backends/backend2/servers/server2/weight": "2", - } - frontend1 := map[string]string{ - "traefik/frontends/frontend1/backend": "backend2", - "traefik/frontends/frontend1/entrypoints": "http", - "traefik/frontends/frontend1/priority": "1", - "traefik/frontends/frontend1/routes/test_1/rule": "Host:test.localhost", - } - frontend2 := map[string]string{ - "traefik/frontends/frontend2/backend": "backend1", - "traefik/frontends/frontend2/entrypoints": "http", - "traefik/frontends/frontend2/priority": "10", - "traefik/frontends/frontend2/routes/test_2/rule": "Path:/test", - } - for key, value := range backend1 { - err := s.kv.Put(key, []byte(value), nil) - c.Assert(err, checker.IsNil) - } - for key, value := range backend2 { - err := s.kv.Put(key, []byte(value), nil) - c.Assert(err, checker.IsNil) - } - for key, value := range frontend1 { - err := s.kv.Put(key, []byte(value), nil) - c.Assert(err, checker.IsNil) - } - for key, value := range frontend2 { - err := s.kv.Put(key, []byte(value), nil) - c.Assert(err, checker.IsNil) - } - - // wait for consul - err = try.Do(60*time.Second, try.KVExists(s.kv, "traefik/frontends/frontend2/routes/test_2/rule")) - c.Assert(err, checker.IsNil) - - // wait for traefik - err = try.GetRequest("http://127.0.0.1:8080/api/providers", 60*time.Second, try.BodyContains("Path:/test")) - c.Assert(err, checker.IsNil) - - // check - req, err := http.NewRequest(http.MethodGet, "http://127.0.0.1:8001/", nil) - c.Assert(err, checker.IsNil) - req.Host = "test.localhost" - - err = try.Request(req, 500*time.Millisecond, try.StatusCodeIs(http.StatusOK)) - c.Assert(err, checker.IsNil) -} - -func (s *ConsulSuite) TestCommandStoreConfig(c *check.C) { - s.setupConsul(c) - consulHost := s.composeProject.Container(c, "consul").NetworkSettings.IPAddress - - cmd, display := s.traefikCmd( - "storeconfig", - withConfigFile("fixtures/simple_web.toml"), - "--consul.endpoint="+consulHost+":8500") - defer display(c) - err := cmd.Start() - c.Assert(err, checker.IsNil) - - // wait for traefik finish without error - err = cmd.Wait() - c.Assert(err, checker.IsNil) - - expectedData := map[string]string{ - "/traefik/loglevel": "DEBUG", - "/traefik/defaultentrypoints/0": "http", - "/traefik/entrypoints/http/address": ":8000", - "/traefik/api/entrypoint": "traefik", - "/traefik/consul/endpoint": consulHost + ":8500", - } - - for key, value := range expectedData { - var p *store.KVPair - err = try.Do(60*time.Second, func() error { - p, err = s.kv.Get(key, nil) - return err - }) - c.Assert(err, checker.IsNil) - - c.Assert(string(p.Value), checker.Equals, value) - } -} - -func (s *ConsulSuite) TestCommandStoreConfigWithFile(c *check.C) { - s.setupConsul(c) - consulHost := s.composeProject.Container(c, "consul").NetworkSettings.IPAddress - - cmd, display := s.traefikCmd( - "storeconfig", - withConfigFile("fixtures/simple_default.toml"), - "--consul.endpoint="+consulHost+":8500", - "--file.filename=fixtures/file/dir/simple1.toml") - defer display(c) - err := cmd.Start() - c.Assert(err, checker.IsNil) - - // wait for traefik finish without error - err = cmd.Wait() - c.Assert(err, checker.IsNil) - - expectedData := map[string]string{ - "/traefik/backends/backend1/servers/server1/url": "http://172.17.0.2:80", - "/traefik/frontends/frontend1/backend": "backend1", - "/traefik/frontends/frontend1/routes/test_1/rule": "Path:/test1", - } - - for key, value := range expectedData { - var p *store.KVPair - err = try.Do(10*time.Second, func() error { - p, err = s.kv.Get(key, nil) - return err - }) - c.Assert(err, checker.IsNil) - c.Assert(string(p.Value), checker.Equals, value) - } - - checkNotExistsMap := []string{ - "/traefik/file", - } - - for _, value := range checkNotExistsMap { - err = try.Do(10*time.Second, func() error { - if exists, err := s.kv.Exists(value, nil); err == nil && exists { - return fmt.Errorf("%s key is not suppose to exist in KV", value) - } - return nil - }) - c.Assert(err, checker.IsNil) - } -} - -type TestStruct struct { - String string - Int int -} - -func (s *ConsulSuite) TestDatastore(c *check.C) { - s.setupConsul(c) - consulHost := s.composeProject.Container(c, "consul").NetworkSettings.IPAddress - kvSource, err := staert.NewKvSource(store.CONSUL, []string{consulHost + ":8500"}, &store.Config{ - ConnectionTimeout: 10 * time.Second, - }, "traefik") - c.Assert(err, checker.IsNil) - - ctx := context.Background() - datastore1, err := cluster.NewDataStore(ctx, *kvSource, &TestStruct{}, nil) - c.Assert(err, checker.IsNil) - datastore2, err := cluster.NewDataStore(ctx, *kvSource, &TestStruct{}, nil) - c.Assert(err, checker.IsNil) - - setter1, _, err := datastore1.Begin() - c.Assert(err, checker.IsNil) - err = setter1.Commit(&TestStruct{ - String: "foo", - Int: 1, - }) - c.Assert(err, checker.IsNil) - - err = try.Do(3*time.Second, datastoreContains(datastore1, "foo")) - c.Assert(err, checker.IsNil) - - err = try.Do(3*time.Second, datastoreContains(datastore2, "foo")) - c.Assert(err, checker.IsNil) - - setter2, _, err := datastore2.Begin() - c.Assert(err, checker.IsNil) - err = setter2.Commit(&TestStruct{ - String: "bar", - Int: 2, - }) - c.Assert(err, checker.IsNil) - - err = try.Do(3*time.Second, datastoreContains(datastore1, "bar")) - c.Assert(err, checker.IsNil) - - err = try.Do(3*time.Second, datastoreContains(datastore2, "bar")) - c.Assert(err, checker.IsNil) - - wg := &sync.WaitGroup{} - wg.Add(4) - go func() { - for i := 0; i < 100; i++ { - setter1, _, err := datastore1.Begin() - c.Assert(err, checker.IsNil) - err = setter1.Commit(&TestStruct{ - String: "datastore1", - Int: i, - }) - c.Assert(err, checker.IsNil) - } - wg.Done() - }() - go func() { - for i := 0; i < 100; i++ { - setter2, _, err := datastore2.Begin() - c.Assert(err, checker.IsNil) - err = setter2.Commit(&TestStruct{ - String: "datastore2", - Int: i, - }) - c.Assert(err, checker.IsNil) - } - wg.Done() - }() - go func() { - for i := 0; i < 100; i++ { - test1 := datastore1.Get().(*TestStruct) - c.Assert(test1, checker.NotNil) - } - wg.Done() - }() - go func() { - for i := 0; i < 100; i++ { - test2 := datastore2.Get().(*TestStruct) - c.Assert(test2, checker.NotNil) - } - wg.Done() - }() - wg.Wait() -} - -func datastoreContains(datastore *cluster.Datastore, expectedValue string) func() error { - return func() error { - kvStruct := datastore.Get().(*TestStruct) - if kvStruct.String != expectedValue { - return fmt.Errorf("got %s, wanted %s", kvStruct.String, expectedValue) - } - return nil - } -} - -func (s *ConsulSuite) TestSNIDynamicTlsConfig(c *check.C) { - s.setupConsul(c) - consulHost := s.composeProject.Container(c, "consul").NetworkSettings.IPAddress - // start Traefik - file := s.adaptFile(c, "fixtures/consul/simple_https.toml", struct{ ConsulHost string }{consulHost}) - defer os.Remove(file) - cmd, display := s.traefikCmd(withConfigFile(file)) - defer display(c) - err := cmd.Start() - c.Assert(err, checker.IsNil) - defer cmd.Process.Kill() - - // prepare to config - whoami1IP := s.composeProject.Container(c, "whoami1").NetworkSettings.IPAddress - whoami2IP := s.composeProject.Container(c, "whoami2").NetworkSettings.IPAddress - whoami3IP := s.composeProject.Container(c, "whoami3").NetworkSettings.IPAddress - whoami4IP := s.composeProject.Container(c, "whoami4").NetworkSettings.IPAddress - - snitestComCert, err := ioutil.ReadFile("fixtures/https/snitest.com.cert") - c.Assert(err, checker.IsNil) - snitestComKey, err := ioutil.ReadFile("fixtures/https/snitest.com.key") - c.Assert(err, checker.IsNil) - snitestOrgCert, err := ioutil.ReadFile("fixtures/https/snitest.org.cert") - c.Assert(err, checker.IsNil) - snitestOrgKey, err := ioutil.ReadFile("fixtures/https/snitest.org.key") - c.Assert(err, checker.IsNil) - - backend1 := map[string]string{ - "traefik/backends/backend1/circuitbreaker/expression": "NetworkErrorRatio() > 0.5", - "traefik/backends/backend1/servers/server1/url": "http://" + whoami1IP + ":80", - "traefik/backends/backend1/servers/server1/weight": "1", - "traefik/backends/backend1/servers/server2/url": "http://" + whoami2IP + ":80", - "traefik/backends/backend1/servers/server2/weight": "1", - } - backend2 := map[string]string{ - "traefik/backends/backend2/loadbalancer/method": "drr", - "traefik/backends/backend2/servers/server1/url": "http://" + whoami3IP + ":80", - "traefik/backends/backend2/servers/server1/weight": "1", - "traefik/backends/backend2/servers/server2/url": "http://" + whoami4IP + ":80", - "traefik/backends/backend2/servers/server2/weight": "1", - } - frontend1 := map[string]string{ - "traefik/frontends/frontend1/backend": "backend2", - "traefik/frontends/frontend1/entrypoints": "https", - "traefik/frontends/frontend1/priority": "1", - "traefik/frontends/frontend1/routes/test_1/rule": "Host:snitest.com", - } - - frontend2 := map[string]string{ - "traefik/frontends/frontend2/backend": "backend1", - "traefik/frontends/frontend2/entrypoints": "https", - "traefik/frontends/frontend2/priority": "10", - "traefik/frontends/frontend2/routes/test_2/rule": "Host:snitest.org", - } - - tlsconfigure1 := map[string]string{ - "traefik/tls/snitestcom/entrypoints": "https", - "traefik/tls/snitestcom/certificate/keyfile": string(snitestComKey), - "traefik/tls/snitestcom/certificate/certfile": string(snitestComCert), - } - - tlsconfigure2 := map[string]string{ - "traefik/tls/snitestorg/entrypoints": "https", - "traefik/tls/snitestorg/certificate/keyfile": string(snitestOrgKey), - "traefik/tls/snitestorg/certificate/certfile": string(snitestOrgCert), - } - - // config backends,frontends and first tls keypair - for key, value := range backend1 { - err := s.kv.Put(key, []byte(value), nil) - c.Assert(err, checker.IsNil) - } - for key, value := range backend2 { - err := s.kv.Put(key, []byte(value), nil) - c.Assert(err, checker.IsNil) - } - for key, value := range frontend1 { - err := s.kv.Put(key, []byte(value), nil) - c.Assert(err, checker.IsNil) - } - for key, value := range frontend2 { - err := s.kv.Put(key, []byte(value), nil) - c.Assert(err, checker.IsNil) - } - for key, value := range tlsconfigure1 { - err := s.kv.Put(key, []byte(value), nil) - c.Assert(err, checker.IsNil) - } - - tr1 := &http.Transport{ - TLSClientConfig: &tls.Config{ - InsecureSkipVerify: true, - ServerName: "snitest.com", - }, - } - - tr2 := &http.Transport{ - TLSClientConfig: &tls.Config{ - InsecureSkipVerify: true, - ServerName: "snitest.org", - }, - } - - // wait for consul - err = try.Do(60*time.Second, func() error { - _, err := s.kv.Get("traefik/tls/snitestcom/certificate/keyfile", nil) - return err - }) - c.Assert(err, checker.IsNil) - - req, err := http.NewRequest(http.MethodGet, "https://127.0.0.1:4443/", nil) - c.Assert(err, checker.IsNil) - req.Host = tr1.TLSClientConfig.ServerName - req.Header.Set("Host", tr1.TLSClientConfig.ServerName) - req.Header.Set("Accept", "*/*") - - err = try.RequestWithTransport(req, 30*time.Second, tr1, try.HasCn(tr1.TLSClientConfig.ServerName)) - c.Assert(err, checker.IsNil) - - // now we configure the second keypair in consul and the request for host "snitest.org" will use the second keypair - for key, value := range tlsconfigure2 { - err := s.kv.Put(key, []byte(value), nil) - c.Assert(err, checker.IsNil) - } - - // wait for consul - err = try.Do(60*time.Second, func() error { - _, err := s.kv.Get("traefik/tls/snitestorg/certificate/keyfile", nil) - return err - }) - c.Assert(err, checker.IsNil) - - req, err = http.NewRequest(http.MethodGet, "https://127.0.0.1:4443/", nil) - c.Assert(err, checker.IsNil) - req.Host = tr2.TLSClientConfig.ServerName - req.Header.Set("Host", tr2.TLSClientConfig.ServerName) - req.Header.Set("Accept", "*/*") - - err = try.RequestWithTransport(req, 30*time.Second, tr2, try.HasCn(tr2.TLSClientConfig.ServerName)) - c.Assert(err, checker.IsNil) -} diff --git a/integration/dynamodb_test.go b/integration/dynamodb_test.go deleted file mode 100644 index fc376e285..000000000 --- a/integration/dynamodb_test.go +++ /dev/null @@ -1,168 +0,0 @@ -package integration - -import ( - "net/http" - "os" - "time" - - "github.com/aws/aws-sdk-go/aws" - "github.com/aws/aws-sdk-go/aws/credentials" - "github.com/aws/aws-sdk-go/aws/session" - "github.com/aws/aws-sdk-go/service/dynamodb" - "github.com/aws/aws-sdk-go/service/dynamodb/dynamodbattribute" - "github.com/containous/traefik/integration/try" - "github.com/containous/traefik/old/types" - "github.com/go-check/check" - checker "github.com/vdemeester/shakers" -) - -type DynamoDBSuite struct { - BaseSuite -} - -type DynamoDBItem struct { - ID string `dynamodbav:"id"` - Name string `dynamodbav:"name"` -} - -type DynamoDBBackendItem struct { - DynamoDBItem - Backend types.Backend `dynamodbav:"backend"` -} - -type DynamoDBFrontendItem struct { - DynamoDBItem - Frontend types.Frontend `dynamodbav:"frontend"` -} - -func (s *DynamoDBSuite) SetUpSuite(c *check.C) { - s.createComposeProject(c, "dynamodb") - s.composeProject.Start(c) - dynamoURL := "http://" + s.composeProject.Container(c, "dynamo").NetworkSettings.IPAddress + ":8000" - config := &aws.Config{ - Region: aws.String("us-east-1"), - Credentials: credentials.NewStaticCredentials("id", "secret", ""), - Endpoint: aws.String(dynamoURL), - } - var sess *session.Session - err := try.Do(60*time.Second, func() error { - var err error - sess, err = session.NewSession(config) - return err - }) - c.Assert(err, checker.IsNil) - svc := dynamodb.New(sess) - - // create dynamodb table - params := &dynamodb.CreateTableInput{ - AttributeDefinitions: []*dynamodb.AttributeDefinition{ - { - AttributeName: aws.String("id"), - AttributeType: aws.String("S"), - }, - }, - KeySchema: []*dynamodb.KeySchemaElement{ - { - AttributeName: aws.String("id"), - KeyType: aws.String("HASH"), - }, - }, - ProvisionedThroughput: &dynamodb.ProvisionedThroughput{ - ReadCapacityUnits: aws.Int64(1), - WriteCapacityUnits: aws.Int64(1), - }, - TableName: aws.String("traefik"), - } - _, err = svc.CreateTable(params) - if err != nil { - c.Error(err) - return - } - - // load config into dynamodb - whoami1 := "http://" + s.composeProject.Container(c, "whoami1").NetworkSettings.IPAddress + ":80" - whoami2 := "http://" + s.composeProject.Container(c, "whoami2").NetworkSettings.IPAddress + ":80" - whoami3 := "http://" + s.composeProject.Container(c, "whoami3").NetworkSettings.IPAddress + ":80" - - backend := DynamoDBBackendItem{ - Backend: types.Backend{ - Servers: map[string]types.Server{ - "whoami1": { - URL: whoami1, - }, - "whoami2": { - URL: whoami2, - }, - "whoami3": { - URL: whoami3, - }, - }, - }, - DynamoDBItem: DynamoDBItem{ - ID: "whoami_backend", - Name: "whoami", - }, - } - - frontend := DynamoDBFrontendItem{ - Frontend: types.Frontend{ - EntryPoints: []string{ - "http", - }, - Backend: "whoami", - Routes: map[string]types.Route{ - "hostRule": { - Rule: "Host:test.traefik.io", - }, - }, - }, - DynamoDBItem: DynamoDBItem{ - ID: "whoami_frontend", - Name: "whoami", - }, - } - backendAttributeValue, err := dynamodbattribute.MarshalMap(backend) - c.Assert(err, checker.IsNil) - frontendAttributeValue, err := dynamodbattribute.MarshalMap(frontend) - c.Assert(err, checker.IsNil) - putParams := &dynamodb.PutItemInput{ - Item: backendAttributeValue, - TableName: aws.String("traefik"), - } - _, err = svc.PutItem(putParams) - c.Assert(err, checker.IsNil) - - putParams = &dynamodb.PutItemInput{ - Item: frontendAttributeValue, - TableName: aws.String("traefik"), - } - _, err = svc.PutItem(putParams) - c.Assert(err, checker.IsNil) -} - -func (s *DynamoDBSuite) TestSimpleConfiguration(c *check.C) { - dynamoURL := "http://" + s.composeProject.Container(c, "dynamo").NetworkSettings.IPAddress + ":8000" - file := s.adaptFile(c, "fixtures/dynamodb/simple.toml", struct{ DynamoURL string }{dynamoURL}) - defer os.Remove(file) - cmd, display := s.traefikCmd(withConfigFile(file)) - defer display(c) - err := cmd.Start() - c.Assert(err, checker.IsNil) - defer cmd.Process.Kill() - - err = try.GetRequest("http://127.0.0.1:8081/api/providers", 120*time.Second, try.BodyContains("Host:test.traefik.io")) - c.Assert(err, checker.IsNil) - - req, err := http.NewRequest(http.MethodGet, "http://127.0.0.1:8080", nil) - c.Assert(err, checker.IsNil) - req.Host = "test.traefik.io" - - err = try.Request(req, 200*time.Millisecond, try.StatusCodeIs(http.StatusOK)) - c.Assert(err, checker.IsNil) -} - -func (s *DynamoDBSuite) TearDownSuite(c *check.C) { - if s.composeProject != nil { - s.composeProject.Stop(c) - } -} diff --git a/integration/etcd3_test.go b/integration/etcd3_test.go deleted file mode 100644 index 856109aeb..000000000 --- a/integration/etcd3_test.go +++ /dev/null @@ -1,676 +0,0 @@ -package integration - -import ( - "crypto/tls" - "io/ioutil" - "net/http" - "os" - "strings" - "time" - - "github.com/abronan/valkeyrie" - "github.com/abronan/valkeyrie/store" - etcdv3 "github.com/abronan/valkeyrie/store/etcd/v3" - "github.com/containous/traefik/integration/try" - "github.com/go-check/check" - checker "github.com/vdemeester/shakers" -) - -const ( - traefikEtcdURL = "http://127.0.0.1:8000/" - traefikWebEtcdURL = "http://127.0.0.1:8081/" -) - -var ( - ipEtcd string - ipWhoami01 string - ipWhoami02 string - ipWhoami03 string - ipWhoami04 string -) - -// Etcd test suites (using libcompose) -type Etcd3Suite struct { - BaseSuite - kv store.Store -} - -func (s *Etcd3Suite) getIPAddress(c *check.C, service, defaultIP string) string { - var ip string - for _, value := range s.composeProject.Container(c, service).NetworkSettings.Networks { - if len(value.IPAddress) > 0 { - ip = value.IPAddress - break - } - } - - if len(ip) == 0 { - return defaultIP - } - - return ip -} - -func (s *Etcd3Suite) SetUpSuite(c *check.C) { - s.createComposeProject(c, "etcd3") - s.composeProject.Start(c) - - ipEtcd = s.getIPAddress(c, "etcd", "172.18.0.2") - ipWhoami01 = s.getIPAddress(c, "whoami1", "172.18.0.3") - ipWhoami02 = s.getIPAddress(c, "whoami2", "172.18.0.4") - ipWhoami03 = s.getIPAddress(c, "whoami3", "172.18.0.5") - ipWhoami04 = s.getIPAddress(c, "whoami4", "172.18.0.6") - - etcdv3.Register() - url := ipEtcd + ":2379" - kv, err := valkeyrie.NewStore( - store.ETCDV3, - []string{url}, - &store.Config{ - ConnectionTimeout: 30 * time.Second, - }, - ) - if err != nil { - c.Fatalf("Cannot create store etcd %v", err) - } - s.kv = kv - - // wait for etcd - err = try.Do(60*time.Second, func() error { - _, err := kv.Exists("test", nil) - return err - }) - c.Assert(err, checker.IsNil) -} - -func (s *Etcd3Suite) TearDownTest(c *check.C) { - // Delete all Traefik keys from ETCD - _ = s.kv.DeleteTree("/traefik") -} - -func (s *Etcd3Suite) TearDownSuite(c *check.C) { - // shutdown and delete compose project - if s.composeProject != nil { - s.composeProject.Stop(c) - } -} - -func (s *Etcd3Suite) TestSimpleConfiguration(c *check.C) { - file := s.adaptFile(c, "fixtures/etcd/simple.toml", struct { - EtcdHost string - }{ - ipEtcd, - }) - defer os.Remove(file) - - cmd, display := s.traefikCmd(withConfigFile(file)) - defer display(c) - err := cmd.Start() - c.Assert(err, checker.IsNil) - defer cmd.Process.Kill() - - // TODO validate : run on 80 - // Expected a 404 as we did not configure anything - err = try.GetRequest(traefikEtcdURL, 1*time.Second, try.StatusCodeIs(http.StatusNotFound)) - c.Assert(err, checker.IsNil) -} - -func (s *Etcd3Suite) TestNominalConfiguration(c *check.C) { - file := s.adaptFile(c, "fixtures/etcd/simple.toml", struct { - EtcdHost string - }{ - ipEtcd, - }) - defer os.Remove(file) - - cmd, display := s.traefikCmd(withConfigFile(file)) - defer display(c) - err := cmd.Start() - c.Assert(err, checker.IsNil) - defer cmd.Process.Kill() - - backend1 := map[string]string{ - "/traefik/backends/backend1/circuitbreaker/expression": "NetworkErrorRatio() > 0.5", - "/traefik/backends/backend1/servers/server1/url": "http://" + ipWhoami01 + ":80", - "/traefik/backends/backend1/servers/server1/weight": "10", - "/traefik/backends/backend1/servers/server2/url": "http://" + ipWhoami02 + ":80", - "/traefik/backends/backend1/servers/server2/weight": "1", - } - backend2 := map[string]string{ - "/traefik/backends/backend2/loadbalancer/method": "drr", - "/traefik/backends/backend2/servers/server1/url": "http://" + ipWhoami03 + ":80", - "/traefik/backends/backend2/servers/server1/weight": "1", - "/traefik/backends/backend2/servers/server2/url": "http://" + ipWhoami04 + ":80", - "/traefik/backends/backend2/servers/server2/weight": "2", - } - frontend1 := map[string]string{ - "/traefik/frontends/frontend1/backend": "backend2", - "/traefik/frontends/frontend1/entrypoints": "http", - "/traefik/frontends/frontend1/priority": "1", - "/traefik/frontends/frontend1/routes/test_1/rule": "Host:test.localhost", - } - frontend2 := map[string]string{ - "/traefik/frontends/frontend2/backend": "backend1", - "/traefik/frontends/frontend2/entrypoints": "http", - "/traefik/frontends/frontend2/priority": "10", - "/traefik/frontends/frontend2/routes/test_2/rule": "Path:/test", - } - for key, value := range backend1 { - err := s.kv.Put(key, []byte(value), nil) - c.Assert(err, checker.IsNil) - } - for key, value := range backend2 { - err := s.kv.Put(key, []byte(value), nil) - c.Assert(err, checker.IsNil) - } - for key, value := range frontend1 { - err := s.kv.Put(key, []byte(value), nil) - c.Assert(err, checker.IsNil) - } - for key, value := range frontend2 { - err := s.kv.Put(key, []byte(value), nil) - c.Assert(err, checker.IsNil) - } - - // wait for etcd - err = try.Do(60*time.Second, func() error { - _, err := s.kv.Exists("/traefik/frontends/frontend2/routes/test_2/rule", nil) - return err - }) - c.Assert(err, checker.IsNil) - - // wait for traefik - err = try.GetRequest(traefikWebEtcdURL+"api/providers", 60*time.Second, try.BodyContains("Path:/test")) - c.Assert(err, checker.IsNil) - - client := &http.Client{} - req, err := http.NewRequest(http.MethodGet, traefikEtcdURL, nil) - c.Assert(err, checker.IsNil) - req.Host = "test.localhost" - response, err := client.Do(req) - - c.Assert(err, checker.IsNil) - c.Assert(response.StatusCode, checker.Equals, http.StatusOK) - - body, err := ioutil.ReadAll(response.Body) - c.Assert(err, checker.IsNil) - if !strings.Contains(string(body), ipWhoami03) && - !strings.Contains(string(body), ipWhoami04) { - c.Fail() - } - - req, err = http.NewRequest(http.MethodGet, traefikEtcdURL+"test", nil) - c.Assert(err, checker.IsNil) - response, err = client.Do(req) - - c.Assert(err, checker.IsNil) - c.Assert(response.StatusCode, checker.Equals, http.StatusOK) - - body, err = ioutil.ReadAll(response.Body) - c.Assert(err, checker.IsNil) - if !strings.Contains(string(body), ipWhoami01) && - !strings.Contains(string(body), ipWhoami02) { - c.Fail() - } - - req, err = http.NewRequest(http.MethodGet, traefikEtcdURL+"test2", nil) - c.Assert(err, checker.IsNil) - req.Host = "test2.localhost" - resp, err := client.Do(req) - c.Assert(err, checker.IsNil) - c.Assert(resp.StatusCode, checker.Equals, http.StatusNotFound) - - resp, err = http.Get(traefikEtcdURL) - c.Assert(err, checker.IsNil) - c.Assert(resp.StatusCode, checker.Equals, http.StatusNotFound) -} - -func (s *Etcd3Suite) TestGlobalConfiguration(c *check.C) { - err := s.kv.Put("/traefik/entrypoints/http/address", []byte(":8001"), nil) - c.Assert(err, checker.IsNil) - - // wait for etcd - err = try.Do(60*time.Second, func() error { - _, err := s.kv.Exists("/traefik/entrypoints/http/address", nil) - return err - }) - c.Assert(err, checker.IsNil) - - // start traefik - cmd, display := s.traefikCmd( - withConfigFile("fixtures/simple_web.toml"), - "--etcd", - "--etcd.endpoint="+ipEtcd+":4001") - defer display(c) - err = cmd.Start() - c.Assert(err, checker.IsNil) - defer cmd.Process.Kill() - - backend1 := map[string]string{ - "/traefik/backends/backend1/circuitbreaker/expression": "NetworkErrorRatio() > 0.5", - "/traefik/backends/backend1/servers/server1/url": "http://" + ipWhoami01 + ":80", - "/traefik/backends/backend1/servers/server1/weight": "10", - "/traefik/backends/backend1/servers/server2/url": "http://" + ipWhoami02 + ":80", - "/traefik/backends/backend1/servers/server2/weight": "1", - } - backend2 := map[string]string{ - "/traefik/backends/backend2/loadbalancer/method": "drr", - "/traefik/backends/backend2/servers/server1/url": "http://" + ipWhoami03 + ":80", - "/traefik/backends/backend2/servers/server1/weight": "1", - "/traefik/backends/backend2/servers/server2/url": "http://" + ipWhoami04 + ":80", - "/traefik/backends/backend2/servers/server2/weight": "2", - } - frontend1 := map[string]string{ - "/traefik/frontends/frontend1/backend": "backend2", - "/traefik/frontends/frontend1/entrypoints": "http", - "/traefik/frontends/frontend1/priority": "1", - "/traefik/frontends/frontend1/routes/test_1/rule": "Host:test.localhost", - } - frontend2 := map[string]string{ - "/traefik/frontends/frontend2/backend": "backend1", - "/traefik/frontends/frontend2/entrypoints": "http", - "/traefik/frontends/frontend2/priority": "10", - "/traefik/frontends/frontend2/routes/test_2/rule": "Path:/test", - } - for key, value := range backend1 { - err := s.kv.Put(key, []byte(value), nil) - c.Assert(err, checker.IsNil) - } - for key, value := range backend2 { - err := s.kv.Put(key, []byte(value), nil) - c.Assert(err, checker.IsNil) - } - for key, value := range frontend1 { - err := s.kv.Put(key, []byte(value), nil) - c.Assert(err, checker.IsNil) - } - for key, value := range frontend2 { - err := s.kv.Put(key, []byte(value), nil) - c.Assert(err, checker.IsNil) - } - - // wait for etcd - err = try.Do(60*time.Second, func() error { - _, err := s.kv.Exists("/traefik/frontends/frontend2/routes/test_2/rule", nil) - return err - }) - c.Assert(err, checker.IsNil) - - // wait for traefik - err = try.GetRequest("http://127.0.0.1:8080/api/providers", 60*time.Second, try.BodyContains("Path:/test")) - c.Assert(err, checker.IsNil) - - // check - req, err := http.NewRequest(http.MethodGet, "http://127.0.0.1:8001/", nil) - c.Assert(err, checker.IsNil) - req.Host = "test.localhost" - - err = try.Request(req, 500*time.Millisecond, try.StatusCodeIs(http.StatusOK)) - c.Assert(err, checker.IsNil) -} - -func (s *Etcd3Suite) TestCertificatesContentWithSNIConfigHandshake(c *check.C) { - // start traefik - cmd, display := s.traefikCmd( - withConfigFile("fixtures/simple_web.toml"), - "--etcd", - "--etcd.endpoint="+ipEtcd+":4001") - defer display(c) - - // Copy the contents of the certificate files into ETCD - snitestComCert, err := ioutil.ReadFile("fixtures/https/snitest.com.cert") - c.Assert(err, checker.IsNil) - snitestComKey, err := ioutil.ReadFile("fixtures/https/snitest.com.key") - c.Assert(err, checker.IsNil) - snitestOrgCert, err := ioutil.ReadFile("fixtures/https/snitest.org.cert") - c.Assert(err, checker.IsNil) - snitestOrgKey, err := ioutil.ReadFile("fixtures/https/snitest.org.key") - c.Assert(err, checker.IsNil) - - globalConfig := map[string]string{ - "/traefik/entrypoints/https/address": ":4443", - "/traefik/entrypoints/https/tls/certificates/0/certfile": string(snitestComCert), - "/traefik/entrypoints/https/tls/certificates/0/keyfile": string(snitestComKey), - "/traefik/entrypoints/https/tls/certificates/1/certfile": string(snitestOrgCert), - "/traefik/entrypoints/https/tls/certificates/1/keyfile": string(snitestOrgKey), - "/traefik/defaultentrypoints/0": "https", - } - - backend1 := map[string]string{ - "/traefik/backends/backend1/circuitbreaker/expression": "NetworkErrorRatio() > 0.5", - "/traefik/backends/backend1/servers/server1/url": "http://" + ipWhoami01 + ":80", - "/traefik/backends/backend1/servers/server1/weight": "10", - "/traefik/backends/backend1/servers/server2/url": "http://" + ipWhoami02 + ":80", - "/traefik/backends/backend1/servers/server2/weight": "1", - } - backend2 := map[string]string{ - "/traefik/backends/backend2/loadbalancer/method": "drr", - "/traefik/backends/backend2/servers/server1/url": "http://" + ipWhoami03 + ":80", - "/traefik/backends/backend2/servers/server1/weight": "1", - "/traefik/backends/backend2/servers/server2/url": "http://" + ipWhoami04 + ":80", - "/traefik/backends/backend2/servers/server2/weight": "2", - } - frontend1 := map[string]string{ - "/traefik/frontends/frontend1/backend": "backend2", - "/traefik/frontends/frontend1/entrypoints": "http", - "/traefik/frontends/frontend1/priority": "1", - "/traefik/frontends/frontend1/routes/test_1/rule": "Host:snitest.com", - } - frontend2 := map[string]string{ - "/traefik/frontends/frontend2/backend": "backend1", - "/traefik/frontends/frontend2/entrypoints": "http", - "/traefik/frontends/frontend2/priority": "10", - "/traefik/frontends/frontend2/routes/test_2/rule": "Host:snitest.org", - } - for key, value := range globalConfig { - err := s.kv.Put(key, []byte(value), nil) - c.Assert(err, checker.IsNil) - } - for key, value := range backend1 { - err := s.kv.Put(key, []byte(value), nil) - c.Assert(err, checker.IsNil) - } - for key, value := range backend2 { - err := s.kv.Put(key, []byte(value), nil) - c.Assert(err, checker.IsNil) - } - for key, value := range frontend1 { - err := s.kv.Put(key, []byte(value), nil) - c.Assert(err, checker.IsNil) - } - for key, value := range frontend2 { - err := s.kv.Put(key, []byte(value), nil) - c.Assert(err, checker.IsNil) - } - - // wait for etcd - err = try.Do(60*time.Second, try.KVExists(s.kv, "/traefik/frontends/frontend1/backend")) - c.Assert(err, checker.IsNil) - - err = cmd.Start() - c.Assert(err, checker.IsNil) - defer cmd.Process.Kill() - - // wait for traefik - err = try.GetRequest("http://127.0.0.1:8080/api/providers", 60*time.Second, try.BodyContains("Host:snitest.org")) - c.Assert(err, checker.IsNil) - - // check - tlsConfig := &tls.Config{ - InsecureSkipVerify: true, - ServerName: "snitest.com", - } - conn, err := tls.Dial("tcp", "127.0.0.1:4443", tlsConfig) - c.Assert(err, checker.IsNil, check.Commentf("failed to connect to server")) - - defer conn.Close() - err = conn.Handshake() - c.Assert(err, checker.IsNil, check.Commentf("TLS handshake error")) - - cs := conn.ConnectionState() - err = cs.PeerCertificates[0].VerifyHostname("snitest.com") - c.Assert(err, checker.IsNil, check.Commentf("certificate did not match SNI servername")) -} - -func (s *Etcd3Suite) TestCommandStoreConfig(c *check.C) { - cmd, display := s.traefikCmd( - "storeconfig", - withConfigFile("fixtures/simple_web.toml"), - "--etcd.endpoint="+ipEtcd+":4001") - defer display(c) - err := cmd.Start() - c.Assert(err, checker.IsNil) - - // wait for traefik finish without error - err = cmd.Wait() - c.Assert(err, checker.IsNil) - - // CHECK - checkmap := map[string]string{ - "/traefik/loglevel": "DEBUG", - "/traefik/defaultentrypoints/0": "http", - "/traefik/entrypoints/http/address": ":8000", - "/traefik/api/entrypoint": "traefik", - "/traefik/etcd/endpoint": ipEtcd + ":4001", - } - - for key, value := range checkmap { - var p *store.KVPair - err = try.Do(60*time.Second, func() error { - p, err = s.kv.Get(key, nil) - return err - }) - c.Assert(err, checker.IsNil) - - c.Assert(string(p.Value), checker.Equals, value) - } -} - -func (s *Etcd3Suite) TestSNIDynamicTlsConfig(c *check.C) { - // start Traefik - cmd, display := s.traefikCmd( - withConfigFile("fixtures/etcd/simple_https.toml"), - "--etcd", - "--etcd.endpoint="+ipEtcd+":4001") - defer display(c) - - snitestComCert, err := ioutil.ReadFile("fixtures/https/snitest.com.cert") - c.Assert(err, checker.IsNil) - snitestComKey, err := ioutil.ReadFile("fixtures/https/snitest.com.key") - c.Assert(err, checker.IsNil) - snitestOrgCert, err := ioutil.ReadFile("fixtures/https/snitest.org.cert") - c.Assert(err, checker.IsNil) - snitestOrgKey, err := ioutil.ReadFile("fixtures/https/snitest.org.key") - c.Assert(err, checker.IsNil) - - backend1 := map[string]string{ - "/traefik/backends/backend1/circuitbreaker/expression": "NetworkErrorRatio() > 0.5", - "/traefik/backends/backend1/servers/server1/url": "http://" + ipWhoami01 + ":80", - "/traefik/backends/backend1/servers/server1/weight": "10", - "/traefik/backends/backend1/servers/server2/url": "http://" + ipWhoami02 + ":80", - "/traefik/backends/backend1/servers/server2/weight": "1", - } - backend2 := map[string]string{ - "/traefik/backends/backend2/loadbalancer/method": "drr", - "/traefik/backends/backend2/servers/server1/url": "http://" + ipWhoami03 + ":80", - "/traefik/backends/backend2/servers/server1/weight": "1", - "/traefik/backends/backend2/servers/server2/url": "http://" + ipWhoami04 + ":80", - "/traefik/backends/backend2/servers/server2/weight": "2", - } - frontend1 := map[string]string{ - "/traefik/frontends/frontend1/backend": "backend2", - "/traefik/frontends/frontend1/entrypoints": "https", - "/traefik/frontends/frontend1/priority": "1", - "/traefik/frontends/frontend1/routes/test_1/rule": "Host:snitest.com", - } - - frontend2 := map[string]string{ - "/traefik/frontends/frontend2/backend": "backend1", - "/traefik/frontends/frontend2/entrypoints": "https", - "/traefik/frontends/frontend2/priority": "10", - "/traefik/frontends/frontend2/routes/test_2/rule": "Host:snitest.org", - } - - tlsconfigure1 := map[string]string{ - "/traefik/tls/snitestcom/entrypoints": "https", - "/traefik/tls/snitestcom/certificate/keyfile": string(snitestComKey), - "/traefik/tls/snitestcom/certificate/certfile": string(snitestComCert), - } - - tlsconfigure2 := map[string]string{ - "/traefik/tls/snitestorg/entrypoints": "https", - "/traefik/tls/snitestorg/certificate/keyfile": string(snitestOrgKey), - "/traefik/tls/snitestorg/certificate/certfile": string(snitestOrgCert), - } - - // config backends,frontends and first tls keypair - for key, value := range backend1 { - err := s.kv.Put(key, []byte(value), nil) - c.Assert(err, checker.IsNil) - } - for key, value := range backend2 { - err := s.kv.Put(key, []byte(value), nil) - c.Assert(err, checker.IsNil) - } - for key, value := range frontend1 { - err := s.kv.Put(key, []byte(value), nil) - c.Assert(err, checker.IsNil) - } - for key, value := range frontend2 { - err := s.kv.Put(key, []byte(value), nil) - c.Assert(err, checker.IsNil) - } - for key, value := range tlsconfigure1 { - err := s.kv.Put(key, []byte(value), nil) - c.Assert(err, checker.IsNil) - } - - tr1 := &http.Transport{ - TLSClientConfig: &tls.Config{ - InsecureSkipVerify: true, - ServerName: "snitest.com", - }, - } - - tr2 := &http.Transport{ - TLSClientConfig: &tls.Config{ - InsecureSkipVerify: true, - ServerName: "snitest.org", - }, - } - - // wait for etcd - err = try.Do(60*time.Second, func() error { - _, err := s.kv.Get("/traefik/tls/snitestcom/certificate/keyfile", nil) - return err - }) - c.Assert(err, checker.IsNil) - - err = cmd.Start() - c.Assert(err, checker.IsNil) - defer cmd.Process.Kill() - - req, err := http.NewRequest(http.MethodGet, "https://127.0.0.1:4443/", nil) - c.Assert(err, checker.IsNil) - req.Host = tr1.TLSClientConfig.ServerName - req.Header.Set("Host", tr1.TLSClientConfig.ServerName) - req.Header.Set("Accept", "*/*") - - err = try.RequestWithTransport(req, 30*time.Second, tr1, try.HasCn(tr1.TLSClientConfig.ServerName)) - c.Assert(err, checker.IsNil) - - // now we configure the second keypair in etcd and the request for host "snitest.org" will use the second keypair - - for key, value := range tlsconfigure2 { - err := s.kv.Put(key, []byte(value), nil) - c.Assert(err, checker.IsNil) - } - - // wait for etcd - err = try.Do(60*time.Second, func() error { - _, err := s.kv.Get("/traefik/tls/snitestorg/certificate/keyfile", nil) - return err - }) - c.Assert(err, checker.IsNil) - - req, err = http.NewRequest(http.MethodGet, "https://127.0.0.1:4443/", nil) - c.Assert(err, checker.IsNil) - req.Host = tr2.TLSClientConfig.ServerName - req.Header.Set("Host", tr2.TLSClientConfig.ServerName) - req.Header.Set("Accept", "*/*") - - err = try.RequestWithTransport(req, 30*time.Second, tr2, try.HasCn(tr2.TLSClientConfig.ServerName)) - c.Assert(err, checker.IsNil) -} - -func (s *Etcd3Suite) TestDeleteSNIDynamicTlsConfig(c *check.C) { - // start Traefik - cmd, display := s.traefikCmd( - withConfigFile("fixtures/etcd/simple_https.toml"), - "--etcd", - "--etcd.endpoint="+ipEtcd+":4001") - defer display(c) - - // prepare to config - snitestComCert, err := ioutil.ReadFile("fixtures/https/snitest.com.cert") - c.Assert(err, checker.IsNil) - snitestComKey, err := ioutil.ReadFile("fixtures/https/snitest.com.key") - c.Assert(err, checker.IsNil) - - backend1 := map[string]string{ - "/traefik/backends/backend1/circuitbreaker/expression": "NetworkErrorRatio() > 0.5", - "/traefik/backends/backend1/servers/server1/url": "http://" + ipWhoami01 + ":80", - "/traefik/backends/backend1/servers/server1/weight": "1", - "/traefik/backends/backend1/servers/server2/url": "http://" + ipWhoami02 + ":80", - "/traefik/backends/backend1/servers/server2/weight": "1", - } - - frontend1 := map[string]string{ - "/traefik/frontends/frontend1/backend": "backend1", - "/traefik/frontends/frontend1/entrypoints": "https", - "/traefik/frontends/frontend1/priority": "1", - "/traefik/frontends/frontend1/routes/test_1/rule": "Host:snitest.com", - } - - tlsconfigure1 := map[string]string{ - "/traefik/tls/snitestcom/entrypoints": "https", - "/traefik/tls/snitestcom/certificate/keyfile": string(snitestComKey), - "/traefik/tls/snitestcom/certificate/certfile": string(snitestComCert), - } - - // config backends,frontends and first tls keypair - for key, value := range backend1 { - err := s.kv.Put(key, []byte(value), nil) - c.Assert(err, checker.IsNil) - } - for key, value := range frontend1 { - err := s.kv.Put(key, []byte(value), nil) - c.Assert(err, checker.IsNil) - } - for key, value := range tlsconfigure1 { - err := s.kv.Put(key, []byte(value), nil) - c.Assert(err, checker.IsNil) - } - - tr1 := &http.Transport{ - TLSClientConfig: &tls.Config{ - InsecureSkipVerify: true, - ServerName: "snitest.com", - }, - } - - // wait for etcd - err = try.Do(60*time.Second, func() error { - _, err := s.kv.Get("/traefik/tls/snitestcom/certificate/keyfile", nil) - return err - }) - c.Assert(err, checker.IsNil) - - err = cmd.Start() - c.Assert(err, checker.IsNil) - defer cmd.Process.Kill() - - req, err := http.NewRequest(http.MethodGet, "https://127.0.0.1:4443/", nil) - c.Assert(err, checker.IsNil) - req.Host = tr1.TLSClientConfig.ServerName - req.Header.Set("Host", tr1.TLSClientConfig.ServerName) - req.Header.Set("Accept", "*/*") - - err = try.RequestWithTransport(req, 30*time.Second, tr1, try.HasCn(tr1.TLSClientConfig.ServerName)) - c.Assert(err, checker.IsNil) - - // now we delete the tls cert/key pairs,so the endpoint show use default cert/key pair - for key := range tlsconfigure1 { - err := s.kv.Delete(key) - c.Assert(err, checker.IsNil) - } - - req, err = http.NewRequest(http.MethodGet, "https://127.0.0.1:4443/", nil) - c.Assert(err, checker.IsNil) - req.Host = tr1.TLSClientConfig.ServerName - req.Header.Set("Host", tr1.TLSClientConfig.ServerName) - req.Header.Set("Accept", "*/*") - - err = try.RequestWithTransport(req, 30*time.Second, tr1, try.HasCn("TRAEFIK DEFAULT CERT")) - c.Assert(err, checker.IsNil) -} diff --git a/integration/eureka_test.go b/integration/eureka_test.go deleted file mode 100644 index eb1be4994..000000000 --- a/integration/eureka_test.go +++ /dev/null @@ -1,100 +0,0 @@ -package integration - -import ( - "bytes" - "net/http" - "os" - "strings" - "text/template" - "time" - - "github.com/containous/traefik/integration/try" - "github.com/go-check/check" - - checker "github.com/vdemeester/shakers" -) - -// Eureka test suites (using libcompose) -type EurekaSuite struct { - BaseSuite - eurekaIP string - eurekaURL string -} - -func (s *EurekaSuite) SetUpSuite(c *check.C) { - s.createComposeProject(c, "eureka") - s.composeProject.Start(c) - - eureka := s.composeProject.Container(c, "eureka") - s.eurekaIP = eureka.NetworkSettings.IPAddress - s.eurekaURL = "http://" + s.eurekaIP + ":8761/eureka/apps" - - // wait for eureka - err := try.GetRequest(s.eurekaURL, 60*time.Second) - c.Assert(err, checker.IsNil) -} - -func (s *EurekaSuite) TestSimpleConfiguration(c *check.C) { - - whoami1Host := s.composeProject.Container(c, "whoami1").NetworkSettings.IPAddress - - file := s.adaptFile(c, "fixtures/eureka/simple.toml", struct{ EurekaHost string }{s.eurekaIP}) - defer os.Remove(file) - cmd, display := s.traefikCmd(withConfigFile(file)) - defer display(c) - err := cmd.Start() - c.Assert(err, checker.IsNil) - defer cmd.Process.Kill() - - eurekaTemplate := ` - { - "instance": { - "hostName": "{{ .IP }}", - "app": "{{ .ID }}", - "ipAddr": "{{ .IP }}", - "status": "UP", - "port": { - "$": {{ .Port }}, - "@enabled": "true" - }, - "dataCenterInfo": { - "name": "MyOwn" - } - } - }` - - tmpl, err := template.New("eurekaTemplate").Parse(eurekaTemplate) - c.Assert(err, checker.IsNil) - buf := new(bytes.Buffer) - templateVars := map[string]string{ - "ID": "tests-integration-traefik", - "IP": whoami1Host, - "Port": "80", - } - // add in eureka - err = tmpl.Execute(buf, templateVars) - c.Assert(err, checker.IsNil) - - req, err := http.NewRequest(http.MethodPost, s.eurekaURL+"/tests-integration-traefik", strings.NewReader(buf.String())) - c.Assert(err, checker.IsNil) - req.Header.Set("Content-Type", "application/json") - - err = try.Request(req, 500*time.Millisecond, try.StatusCodeIs(http.StatusNoContent)) - c.Assert(err, checker.IsNil) - - // wait for traefik - err = try.GetRequest("http://127.0.0.1:8080/api/providers", 60*time.Second, try.BodyContains("Host:tests-integration-traefik")) - c.Assert(err, checker.IsNil) - - req, err = http.NewRequest(http.MethodGet, "http://127.0.0.1:8000/", nil) - c.Assert(err, checker.IsNil) - req.Host = "tests-integration-traefik" - - err = try.Request(req, 500*time.Millisecond, try.StatusCodeIs(http.StatusOK)) - c.Assert(err, checker.IsNil) - - // TODO validate : run on 80 - // Expected a 404 as we did not configure anything - err = try.GetRequest("http://127.0.0.1:8000/", 500*time.Millisecond, try.StatusCodeIs(http.StatusNotFound)) - c.Assert(err, checker.IsNil) -} diff --git a/integration/fixtures/consul/simple.toml b/integration/fixtures/consul/simple.toml deleted file mode 100644 index 5a63d1f60..000000000 --- a/integration/fixtures/consul/simple.toml +++ /dev/null @@ -1,18 +0,0 @@ -[log] -logLevel = "DEBUG" - -[entrypoints] - [entrypoints.web] - address = ":8000" - [entrypoints.api] - address = ":8081" - - -[providers] - [providers.consul] - endpoint = "{{.ConsulHost}}:8500" - watch = true - prefix = "traefik" - -[api] - entryPoint = "api" diff --git a/integration/fixtures/consul/simple_https.toml b/integration/fixtures/consul/simple_https.toml deleted file mode 100644 index ad74608de..000000000 --- a/integration/fixtures/consul/simple_https.toml +++ /dev/null @@ -1,20 +0,0 @@ -[log] -logLevel = "DEBUG" - -[entrypoints] - [entrypoints.api] - address = ":8081" - [entrypoints.web] - address = ":8000" - [entrypoints.web-secure] - address = ":4443" - [entrypoints.web-secure.tls] - -[providers] - [providers.consul] - endpoint = "{{.ConsulHost}}:8500" - prefix = "traefik" - watch = true - -[api] - entryPoint = "api" diff --git a/integration/fixtures/consul_catalog/simple.toml b/integration/fixtures/consul_catalog/simple.toml deleted file mode 100644 index 11232c110..000000000 --- a/integration/fixtures/consul_catalog/simple.toml +++ /dev/null @@ -1,13 +0,0 @@ -[log] -logLevel = "DEBUG" - -[api] - -[entrypoints] - [entrypoints.web] - address = ":8000" - -[providers] - [providers.consulCatalog] - domain = "consul.localhost" - frontEndRule = "Host(`{{.ServiceName}}.{{.Domain}}`)" diff --git a/integration/fixtures/dynamodb/simple.toml b/integration/fixtures/dynamodb/simple.toml deleted file mode 100644 index 25f650ecc..000000000 --- a/integration/fixtures/dynamodb/simple.toml +++ /dev/null @@ -1,18 +0,0 @@ -[log] -logLevel = "DEBUG" - -[entrypoints] - [entrypoints.web] - address = ":8080" - [entrypoints.api] - address = ":8081" - -[providers] - [providers.dynamodb] - accessKeyID = "key" - secretAccessKey = "secret" - endpoint = "{{.DynamoURL}}" - region = "us-east-1" - -[api] - entryPoint = "api" diff --git a/integration/fixtures/etcd/simple.toml b/integration/fixtures/etcd/simple.toml deleted file mode 100644 index 12245c214..000000000 --- a/integration/fixtures/etcd/simple.toml +++ /dev/null @@ -1,18 +0,0 @@ -[log] -logLevel = "DEBUG" - -[entrypoints] - [entrypoints.web] - address = ":8000" - [entrypoints.api] - address = ":8081" - - -[providers] - [providers.etcd] - endpoint = "{{.EtcdHost}}:2379" - prefix = "/traefik" - watch = true - -[api] - entryPoint = "api" diff --git a/integration/fixtures/etcd/simple_https.toml b/integration/fixtures/etcd/simple_https.toml deleted file mode 100644 index d0063fd71..000000000 --- a/integration/fixtures/etcd/simple_https.toml +++ /dev/null @@ -1,22 +0,0 @@ -[log] -logLevel = "DEBUG" - -[entrypoints] - [entrypoints.api] - address = ":8081" - [entrypoints.web] - address = ":8000" - [entrypoints.web-secure] - address = ":4443" - [entrypoints.web-secure.tls] - - - -#[etcd] -# endpoint = "{{.EtcdHost}}:2379" -# prefix = "/traefik" -# watch = true - - -[api] - entryPoint = "api" \ No newline at end of file diff --git a/integration/fixtures/eureka/simple.toml b/integration/fixtures/eureka/simple.toml deleted file mode 100644 index f672c18ae..000000000 --- a/integration/fixtures/eureka/simple.toml +++ /dev/null @@ -1,13 +0,0 @@ -[log] -logLevel = "DEBUG" - -[entrypoints] - [entrypoints.web] - address = ":8000" - -[providers] - [providers.eureka] - endpoint = "http://{{.EurekaHost}}:8761/eureka" - delay = "1s" - -[api] diff --git a/integration/fixtures/mesos/simple.toml b/integration/fixtures/mesos/simple.toml deleted file mode 100644 index d66058b32..000000000 --- a/integration/fixtures/mesos/simple.toml +++ /dev/null @@ -1,9 +0,0 @@ -[log] -logLevel = "DEBUG" - -[entrypoints] - [entrypoints.web] - address = ":8000" - -[providers] - [providers.mesos] diff --git a/integration/https_test.go b/integration/https_test.go index 2e737847e..6ea6041c6 100644 --- a/integration/https_test.go +++ b/integration/https_test.go @@ -12,7 +12,7 @@ import ( "github.com/BurntSushi/toml" "github.com/containous/traefik/integration/try" - "github.com/containous/traefik/old/types" + "github.com/containous/traefik/pkg/config" traefiktls "github.com/containous/traefik/pkg/tls" "github.com/go-check/check" checker "github.com/vdemeester/shakers" @@ -714,7 +714,7 @@ func modifyCertificateConfFileContent(c *check.C, certFileName, confFileName, en // If certificate file is not provided, just truncate the configuration file if len(certFileName) > 0 { - tlsConf := types.Configuration{ + tlsConf := config.Configuration{ TLS: []*traefiktls.Configuration{{ Certificate: &traefiktls.Certificate{ CertFile: traefiktls.FileOrContent("fixtures/https/" + certFileName + ".cert"), diff --git a/integration/integration_test.go b/integration/integration_test.go index fbccf70b7..4ec4c80c2 100644 --- a/integration/integration_test.go +++ b/integration/integration_test.go @@ -37,18 +37,6 @@ func init() { if *container { // tests launched from a container - - // FIXME Provider tests - // check.Suite(&ConsulCatalogSuite{}) - // check.Suite(&ConsulSuite{}) - // check.Suite(&DynamoDBSuite{}) - // check.Suite(&EurekaSuite{}) - // check.Suite(&MesosSuite{}) - - // FIXME use consulcatalog - // check.Suite(&ConstraintSuite{}) - - // FIXME e2e tests check.Suite(&AccessLogSuite{}) check.Suite(&AcmeSuite{}) check.Suite(&DockerComposeSuite{}) @@ -60,8 +48,8 @@ func init() { check.Suite(&HostResolverSuite{}) check.Suite(&HTTPSSuite{}) check.Suite(&LogRotationSuite{}) - // check.Suite(&MarathonSuite{}) - // check.Suite(&MarathonSuite15{}) + check.Suite(&MarathonSuite{}) + check.Suite(&MarathonSuite15{}) check.Suite(&RateLimitSuite{}) check.Suite(&RestSuite{}) check.Suite(&RetrySuite{}) @@ -76,8 +64,6 @@ func init() { check.Suite(&K8sSuite{}) check.Suite(&ProxyProtocolSuite{}) check.Suite(&TCPSuite{}) - // FIXME Provider tests - // check.Suite(&Etcd3Suite{}) } } diff --git a/integration/mesos_test.go b/integration/mesos_test.go deleted file mode 100644 index 12f5eefb1..000000000 --- a/integration/mesos_test.go +++ /dev/null @@ -1,30 +0,0 @@ -package integration - -import ( - "net/http" - "time" - - "github.com/containous/traefik/integration/try" - "github.com/go-check/check" - checker "github.com/vdemeester/shakers" -) - -// Mesos test suites (using libcompose) -type MesosSuite struct{ BaseSuite } - -func (s *MesosSuite) SetUpSuite(c *check.C) { - s.createComposeProject(c, "mesos") -} - -func (s *MesosSuite) TestSimpleConfiguration(c *check.C) { - cmd, display := s.traefikCmd(withConfigFile("fixtures/mesos/simple.toml")) - defer display(c) - err := cmd.Start() - c.Assert(err, checker.IsNil) - defer cmd.Process.Kill() - - // TODO validate : run on 80 - // Expected a 404 as we did not configure anything - err = try.GetRequest("http://127.0.0.1:8000/", 500*time.Millisecond, try.StatusCodeIs(http.StatusNotFound)) - c.Assert(err, checker.IsNil) -} diff --git a/integration/resources/compose/constraints.yml b/integration/resources/compose/constraints.yml deleted file mode 100644 index 88359096c..000000000 --- a/integration/resources/compose/constraints.yml +++ /dev/null @@ -1,17 +0,0 @@ -consul: - image: progrium/consul - command: -server -bootstrap -log-level debug -ui-dir /ui - ports: - - "8400:8400" - - "8500:8500" - - "8600:53/udp" - expose: - - "8300" - - "8301" - - "8301/udp" - - "8302" - - "8302/udp" -whoami: - image: containous/whoami - ports: - - "8881:80" diff --git a/integration/resources/compose/consul.yml b/integration/resources/compose/consul.yml deleted file mode 100644 index e856f0560..000000000 --- a/integration/resources/compose/consul.yml +++ /dev/null @@ -1,25 +0,0 @@ -consul: - image: progrium/consul - command: -server -bootstrap -log-level debug -ui-dir /ui - ports: - - "8400:8400" - - "8500:8500" - - "8600:53/udp" - expose: - - "8300" - - "8301" - - "8301/udp" - - "8302" - - "8302/udp" - -whoami1: - image: containous/whoami - -whoami2: - image: containous/whoami - -whoami3: - image: containous/whoami - -whoami4: - image: containous/whoami diff --git a/integration/resources/compose/consul_catalog.yml b/integration/resources/compose/consul_catalog.yml deleted file mode 100644 index d9ac867d1..000000000 --- a/integration/resources/compose/consul_catalog.yml +++ /dev/null @@ -1,22 +0,0 @@ -consul: - # use v1.4.0 because https://github.com/hashicorp/consul/issues/5270 - # v1.4.1 cannot be used. - # waiting for v1.4.2 - image: consul:1.4.0 - command: agent -server -bootstrap-expect 1 -client 0.0.0.0 -log-level debug -ui - ports: - - "8400:8400" - - "8500:8500" - - "8600:53/udp" - expose: - - "8300" - - "8301" - - "8301/udp" - - "8302" - - "8302/udp" -whoami1: - image: containous/whoami -whoami2: - image: containous/whoami -whoami3: - image: containous/whoami diff --git a/integration/resources/compose/consul_tls.yml b/integration/resources/compose/consul_tls.yml deleted file mode 100644 index 3cd3f31c8..000000000 --- a/integration/resources/compose/consul_tls.yml +++ /dev/null @@ -1,14 +0,0 @@ -consul: - image: progrium/consul - command: -server -bootstrap -log-level debug -ui-dir /ui -config-dir /configs - ports: - - "8500:8500" - - "8585:8585" - expose: - - "8300" - - "8301" - - "8301/udp" - - "8302" - - "8302/udp" - volumes: - - ../tls:/configs diff --git a/integration/resources/compose/dynamodb.yml b/integration/resources/compose/dynamodb.yml deleted file mode 100644 index e2bc3c1d7..000000000 --- a/integration/resources/compose/dynamodb.yml +++ /dev/null @@ -1,16 +0,0 @@ -dynamo: - image: deangiberson/aws-dynamodb-local - command: -sharedDb - ports: - - "8000:8000" - expose: - - "8000" - -whoami1: - image: containous/whoami - -whoami2: - image: containous/whoami - -whoami3: - image: containous/whoami diff --git a/integration/resources/compose/etcd3.yml b/integration/resources/compose/etcd3.yml deleted file mode 100644 index 3b8829111..000000000 --- a/integration/resources/compose/etcd3.yml +++ /dev/null @@ -1,33 +0,0 @@ -version: '2' - -services: - - etcd: - image: quay.io/coreos/etcd:v3.2.9 - command: /usr/local/bin/etcd --data-dir=/etcd-data --name node1 --initial-advertise-peer-urls http://etcd:2380 --listen-peer-urls http://0.0.0.0:2380 --advertise-client-urls http://etcd:2379,http://etcd:4001 --listen-client-urls http://0.0.0.0:2379,http://0.0.0.0:4001 --initial-cluster node1=http://etcd:2380 --debug - expose: - - 2380 - - 2379 - - 4001 - - 7001 - - whoami1: - image: containous/whoami - depends_on: - - etcd - - whoami2: - image: containous/whoami - depends_on: - - whoami1 - - whoami3: - image: containous/whoami - depends_on: - - whoami2 - - whoami4: - image: containous/whoami - depends_on: - - whoami3 - diff --git a/integration/resources/compose/eureka.yml b/integration/resources/compose/eureka.yml deleted file mode 100644 index ef56f85d4..000000000 --- a/integration/resources/compose/eureka.yml +++ /dev/null @@ -1,5 +0,0 @@ -eureka: - image: springcloud/eureka - -whoami1: - image: containous/whoami diff --git a/integration/resources/compose/mesos.yml b/integration/resources/compose/mesos.yml deleted file mode 100644 index 14181143e..000000000 --- a/integration/resources/compose/mesos.yml +++ /dev/null @@ -1,34 +0,0 @@ -zk: - image: bobrik/zookeeper - net: host - environment: - ZK_CONFIG: tickTime=2000,initLimit=10,syncLimit=5,maxClientCnxns=128,forceSync=no,clientPort=2181 - ZK_ID: " 1" - -master: - image: mesosphere/mesos-master:0.28.1-2.0.20.ubuntu1404 - net: host - environment: - MESOS_ZK: zk://127.0.0.1:2181/mesos - MESOS_HOSTNAME: 127.0.0.1 - MESOS_IP: 127.0.0.1 - MESOS_QUORUM: " 1" - MESOS_CLUSTER: docker-compose - MESOS_WORK_DIR: /var/lib/mesos - -slave: - image: mesosphere/mesos-slave:0.28.1-2.0.20.ubuntu1404 - net: host - pid: host - privileged: true - environment: - MESOS_MASTER: zk://127.0.0.1:2181/mesos - MESOS_HOSTNAME: 127.0.0.1 - MESOS_IP: 127.0.0.1 - MESOS_CONTAINERIZERS: docker,mesos - volumes: - - /sys/fs/cgroup:/sys/fs/cgroup - - /usr/bin/docker:/usr/bin/docker:ro - - /usr/lib/x86_64-linux-gnu/libapparmor.so.1:/usr/lib/x86_64-linux-gnu/libapparmor.so.1:ro - - /var/run/docker.sock:/var/run/docker.sock - - /lib/x86_64-linux-gnu/libsystemd-journal.so.0:/lib/x86_64-linux-gnu/libsystemd-journal.so.0 diff --git a/old/acme/account.go b/old/acme/account.go deleted file mode 100644 index 7525bd655..000000000 --- a/old/acme/account.go +++ /dev/null @@ -1,335 +0,0 @@ -package acme - -import ( - "context" - "crypto" - "crypto/rand" - "crypto/rsa" - "crypto/tls" - "crypto/x509" - "fmt" - "reflect" - "regexp" - "sort" - "strings" - "sync" - "time" - - "github.com/containous/traefik/pkg/log" - acmeprovider "github.com/containous/traefik/pkg/provider/acme" - "github.com/containous/traefik/pkg/types" - "github.com/go-acme/lego/certcrypto" - "github.com/go-acme/lego/registration" -) - -// Account is used to store lets encrypt registration info -type Account struct { - Email string - Registration *registration.Resource - PrivateKey []byte - KeyType certcrypto.KeyType - DomainsCertificate DomainsCertificates - ChallengeCerts map[string]*ChallengeCert - HTTPChallenge map[string]map[string][]byte -} - -// ChallengeCert stores a challenge certificate -type ChallengeCert struct { - Certificate []byte - PrivateKey []byte - certificate *tls.Certificate -} - -// Init account struct -func (a *Account) Init() error { - err := a.DomainsCertificate.Init() - if err != nil { - return err - } - - err = a.RemoveAccountV1Values() - if err != nil { - log.Errorf("Unable to remove ACME Account V1 values during account initialization: %v", err) - } - - for _, cert := range a.ChallengeCerts { - if cert.certificate == nil { - certificate, err := tls.X509KeyPair(cert.Certificate, cert.PrivateKey) - if err != nil { - return err - } - cert.certificate = &certificate - } - - if cert.certificate.Leaf == nil { - leaf, err := x509.ParseCertificate(cert.certificate.Certificate[0]) - if err != nil { - return err - } - cert.certificate.Leaf = leaf - } - } - return nil -} - -// NewAccount creates an account -func NewAccount(email string, certs []*DomainsCertificate, keyTypeValue string) (*Account, error) { - keyType := acmeprovider.GetKeyType(context.Background(), keyTypeValue) - - // Create a user. New accounts need an email and private key to start - privateKey, err := rsa.GenerateKey(rand.Reader, 4096) - if err != nil { - return nil, err - } - - domainsCerts := DomainsCertificates{Certs: certs} - err = domainsCerts.Init() - if err != nil { - return nil, err - } - - return &Account{ - Email: email, - PrivateKey: x509.MarshalPKCS1PrivateKey(privateKey), - KeyType: keyType, - DomainsCertificate: DomainsCertificates{Certs: domainsCerts.Certs}, - ChallengeCerts: map[string]*ChallengeCert{}}, nil -} - -// GetEmail returns email -func (a *Account) GetEmail() string { - return a.Email -} - -// GetRegistration returns lets encrypt registration resource -func (a *Account) GetRegistration() *registration.Resource { - return a.Registration -} - -// GetPrivateKey returns private key -func (a *Account) GetPrivateKey() crypto.PrivateKey { - if privateKey, err := x509.ParsePKCS1PrivateKey(a.PrivateKey); err == nil { - return privateKey - } - - log.Errorf("Cannot unmarshall private key %+v", a.PrivateKey) - return nil -} - -// RemoveAccountV1Values removes ACME account V1 values -func (a *Account) RemoveAccountV1Values() error { - // Check if ACME Account is in ACME V1 format - if a.Registration != nil { - isOldRegistration, err := regexp.MatchString(acmeprovider.RegistrationURLPathV1Regexp, a.Registration.URI) - if err != nil { - return err - } - - if isOldRegistration { - a.reset() - } - } - return nil -} - -func (a *Account) reset() { - log.Debug("Reset ACME account object.") - a.Email = "" - a.Registration = nil - a.PrivateKey = nil -} - -// Certificate is used to store certificate info -type Certificate struct { - Domain string - CertURL string - CertStableURL string - PrivateKey []byte - Certificate []byte -} - -// DomainsCertificates stores a certificate for multiple domains -type DomainsCertificates struct { - Certs []*DomainsCertificate - lock sync.RWMutex -} - -func (dc *DomainsCertificates) Len() int { - return len(dc.Certs) -} - -func (dc *DomainsCertificates) Swap(i, j int) { - dc.Certs[i], dc.Certs[j] = dc.Certs[j], dc.Certs[i] -} - -func (dc *DomainsCertificates) Less(i, j int) bool { - if reflect.DeepEqual(dc.Certs[i].Domains, dc.Certs[j].Domains) { - return dc.Certs[i].tlsCert.Leaf.NotAfter.After(dc.Certs[j].tlsCert.Leaf.NotAfter) - } - - if dc.Certs[i].Domains.Main == dc.Certs[j].Domains.Main { - return strings.Join(dc.Certs[i].Domains.SANs, ",") < strings.Join(dc.Certs[j].Domains.SANs, ",") - } - - return dc.Certs[i].Domains.Main < dc.Certs[j].Domains.Main -} - -func (dc *DomainsCertificates) removeDuplicates() { - sort.Sort(dc) - for i := 0; i < len(dc.Certs); i++ { - for i2 := i + 1; i2 < len(dc.Certs); i2++ { - if reflect.DeepEqual(dc.Certs[i].Domains, dc.Certs[i2].Domains) { - // delete - log.Warnf("Remove duplicate cert: %+v, expiration :%s", dc.Certs[i2].Domains, dc.Certs[i2].tlsCert.Leaf.NotAfter.String()) - dc.Certs = append(dc.Certs[:i2], dc.Certs[i2+1:]...) - i2-- - } - } - } -} - -func (dc *DomainsCertificates) removeEmpty() { - var certs []*DomainsCertificate - for _, cert := range dc.Certs { - if cert.Certificate != nil && len(cert.Certificate.Certificate) > 0 && len(cert.Certificate.PrivateKey) > 0 { - certs = append(certs, cert) - } - } - dc.Certs = certs -} - -// Init DomainsCertificates -func (dc *DomainsCertificates) Init() error { - dc.lock.Lock() - defer dc.lock.Unlock() - - dc.removeEmpty() - - for _, domainsCertificate := range dc.Certs { - tlsCert, err := tls.X509KeyPair(domainsCertificate.Certificate.Certificate, domainsCertificate.Certificate.PrivateKey) - if err != nil { - return err - } - - domainsCertificate.tlsCert = &tlsCert - - if domainsCertificate.tlsCert.Leaf == nil { - leaf, err := x509.ParseCertificate(domainsCertificate.tlsCert.Certificate[0]) - if err != nil { - return err - } - - domainsCertificate.tlsCert.Leaf = leaf - } - } - - dc.removeDuplicates() - return nil -} - -func (dc *DomainsCertificates) renewCertificates(acmeCert *Certificate, domain types.Domain) error { - dc.lock.Lock() - defer dc.lock.Unlock() - - for _, domainsCertificate := range dc.Certs { - if reflect.DeepEqual(domain, domainsCertificate.Domains) { - tlsCert, err := tls.X509KeyPair(acmeCert.Certificate, acmeCert.PrivateKey) - if err != nil { - return err - } - - domainsCertificate.Certificate = acmeCert - domainsCertificate.tlsCert = &tlsCert - return nil - } - } - - return fmt.Errorf("certificate to renew not found for domain %s", domain.Main) -} - -func (dc *DomainsCertificates) addCertificateForDomains(acmeCert *Certificate, domain types.Domain) (*DomainsCertificate, error) { - dc.lock.Lock() - defer dc.lock.Unlock() - - tlsCert, err := tls.X509KeyPair(acmeCert.Certificate, acmeCert.PrivateKey) - if err != nil { - return nil, err - } - - cert := DomainsCertificate{Domains: domain, Certificate: acmeCert, tlsCert: &tlsCert} - dc.Certs = append(dc.Certs, &cert) - return &cert, nil -} - -func (dc *DomainsCertificates) getCertificateForDomain(domainToFind string) (*DomainsCertificate, bool) { - dc.lock.RLock() - defer dc.lock.RUnlock() - - for _, domainsCertificate := range dc.Certs { - for _, domain := range domainsCertificate.Domains.ToStrArray() { - if strings.HasPrefix(domain, "*.") && types.MatchDomain(domainToFind, domain) { - return domainsCertificate, true - } - if domain == domainToFind { - return domainsCertificate, true - } - } - } - return nil, false -} - -func (dc *DomainsCertificates) exists(domainToFind types.Domain) (*DomainsCertificate, bool) { - dc.lock.RLock() - defer dc.lock.RUnlock() - - for _, domainsCertificate := range dc.Certs { - if reflect.DeepEqual(domainToFind, domainsCertificate.Domains) { - return domainsCertificate, true - } - } - return nil, false -} - -func (dc *DomainsCertificates) toDomainsMap() map[string]*tls.Certificate { - domainsCertificatesMap := make(map[string]*tls.Certificate) - - for _, domainCertificate := range dc.Certs { - certKey := domainCertificate.Domains.Main - - if domainCertificate.Domains.SANs != nil { - sort.Strings(domainCertificate.Domains.SANs) - - for _, dnsName := range domainCertificate.Domains.SANs { - if dnsName != domainCertificate.Domains.Main { - certKey += fmt.Sprintf(",%s", dnsName) - } - } - } - domainsCertificatesMap[certKey] = domainCertificate.tlsCert - } - return domainsCertificatesMap -} - -// DomainsCertificate contains a certificate for multiple domains -type DomainsCertificate struct { - Domains types.Domain - Certificate *Certificate - tlsCert *tls.Certificate -} - -func (dc *DomainsCertificate) needRenew() bool { - for _, c := range dc.tlsCert.Certificate { - crt, err := x509.ParseCertificate(c) - if err != nil { - // If there's an error, we assume the cert is broken, and needs update - return true - } - - // <= 30 days left, renew certificate - if crt.NotAfter.Before(time.Now().Add(24 * 30 * time.Hour)) { - return true - } - } - - return false -} diff --git a/old/acme/acme.go b/old/acme/acme.go deleted file mode 100644 index d710cf48c..000000000 --- a/old/acme/acme.go +++ /dev/null @@ -1,840 +0,0 @@ -package acme - -import ( - "context" - "crypto/tls" - "errors" - "fmt" - "io/ioutil" - fmtlog "log" - "net" - "net/http" - "net/url" - "reflect" - "strings" - "sync" - "time" - - "github.com/BurntSushi/ty/fun" - "github.com/cenkalti/backoff" - "github.com/containous/mux" - "github.com/containous/staert" - "github.com/containous/traefik/old/cluster" - "github.com/containous/traefik/pkg/log" - acmeprovider "github.com/containous/traefik/pkg/provider/acme" - "github.com/containous/traefik/pkg/safe" - "github.com/containous/traefik/pkg/types" - "github.com/containous/traefik/pkg/version" - "github.com/eapache/channels" - "github.com/go-acme/lego/certificate" - "github.com/go-acme/lego/challenge" - "github.com/go-acme/lego/challenge/dns01" - "github.com/go-acme/lego/challenge/http01" - "github.com/go-acme/lego/lego" - legolog "github.com/go-acme/lego/log" - "github.com/go-acme/lego/providers/dns" - "github.com/go-acme/lego/registration" -) - -var ( - // OSCPMustStaple enables OSCP stapling as from https://github.com/go-acme/lego/issues/270 - OSCPMustStaple = false -) - -// ACME allows to connect to lets encrypt and retrieve certs -// Deprecated Please use provider/acme/Provider -type ACME struct { - Email string `description:"Email address used for registration"` - Domains []types.Domain `description:"SANs (alternative domains) to each main domain using format: --acme.domains='main.com,san1.com,san2.com' --acme.domains='main.net,san1.net,san2.net'"` - Storage string `description:"File or key used for certificates storage."` - OnDemand bool `description:"(Deprecated) Enable on demand certificate generation. This will request a certificate from Let's Encrypt during the first TLS handshake for a hostname that does not yet have a certificate."` // Deprecated - OnHostRule bool `description:"Enable certificate generation on frontends Host rules."` - CAServer string `description:"CA server to use."` - EntryPoint string `description:"Entrypoint to proxy acme challenge to."` - KeyType string `description:"KeyType used for generating certificate private key. Allow value 'EC256', 'EC384', 'RSA2048', 'RSA4096', 'RSA8192'. Default to 'RSA4096'"` - DNSChallenge *acmeprovider.DNSChallenge `description:"Activate DNS-01 Challenge"` - HTTPChallenge *acmeprovider.HTTPChallenge `description:"Activate HTTP-01 Challenge"` - TLSChallenge *acmeprovider.TLSChallenge `description:"Activate TLS-ALPN-01 Challenge"` - ACMELogging bool `description:"Enable debug logging of ACME actions."` - OverrideCertificates bool `description:"Enable to override certificates in key-value store when using storeconfig"` - client *lego.Client - store cluster.Store - challengeHTTPProvider *challengeHTTPProvider - challengeTLSProvider *challengeTLSProvider - checkOnDemandDomain func(domain string) bool - jobs *channels.InfiniteChannel - TLSConfig *tls.Config `description:"TLS config in case wildcard certs are used"` - dynamicCerts *safe.Safe - resolvingDomains map[string]struct{} - resolvingDomainsMutex sync.RWMutex -} - -func (a *ACME) init() error { - if a.ACMELogging { - legolog.Logger = log.WithoutContext() - } else { - legolog.Logger = fmtlog.New(ioutil.Discard, "", 0) - } - - a.jobs = channels.NewInfiniteChannel() - - // Init the currently resolved domain map - a.resolvingDomains = make(map[string]struct{}) - - return nil -} - -// AddRoutes add routes on internal router -func (a *ACME) AddRoutes(router *mux.Router) { - router.Methods(http.MethodGet). - Path(http01.ChallengePath("{token}")). - Handler(http.HandlerFunc(func(rw http.ResponseWriter, req *http.Request) { - if a.challengeHTTPProvider == nil { - rw.WriteHeader(http.StatusNotFound) - return - } - - vars := mux.Vars(req) - if token, ok := vars["token"]; ok { - domain, _, err := net.SplitHostPort(req.Host) - if err != nil { - log.Debugf("Unable to split host and port: %v. Fallback to request host.", err) - domain = req.Host - } - tokenValue := a.challengeHTTPProvider.getTokenValue(token, domain) - if len(tokenValue) > 0 { - _, err := rw.Write(tokenValue) - if err != nil { - http.Error(rw, err.Error(), http.StatusInternalServerError) - } - return - } - } - rw.WriteHeader(http.StatusNotFound) - })) -} - -// CreateClusterConfig creates a tls.config using ACME configuration in cluster mode -func (a *ACME) CreateClusterConfig(leadership *cluster.Leadership, tlsConfig *tls.Config, certs *safe.Safe, checkOnDemandDomain func(domain string) bool) error { - err := a.init() - if err != nil { - return err - } - - if len(a.Storage) == 0 { - return errors.New("empty Store, please provide a key for certs storage") - } - - a.checkOnDemandDomain = checkOnDemandDomain - a.dynamicCerts = certs - - tlsConfig.GetCertificate = a.getCertificate - a.TLSConfig = tlsConfig - - listener := func(object cluster.Object) error { - account := object.(*Account) - err := account.Init() - if err != nil { - return err - } - - if !leadership.IsLeader() { - a.client, err = a.buildACMEClient(account) - if err != nil { - log.Errorf("Error building ACME client %+v: %s", object, err.Error()) - } - } - return nil - } - - datastore, err := cluster.NewDataStore( - leadership.Pool.Ctx(), - staert.KvSource{ - Store: leadership.Store, - Prefix: a.Storage, - }, - &Account{}, - listener) - if err != nil { - return err - } - - a.store = datastore - a.challengeTLSProvider = &challengeTLSProvider{store: a.store} - - ticker := time.NewTicker(24 * time.Hour) - leadership.Pool.AddGoCtx(func(ctx context.Context) { - log.Info("Starting ACME renew job...") - defer log.Info("Stopped ACME renew job...") - for { - select { - case <-ctx.Done(): - return - case <-ticker.C: - a.renewCertificates() - } - } - }) - - leadership.AddListener(a.leadershipListener) - return nil -} - -func (a *ACME) leadershipListener(elected bool) error { - if elected { - _, err := a.store.Load() - if err != nil { - return err - } - - transaction, object, err := a.store.Begin() - if err != nil { - return err - } - - account := object.(*Account) - err = account.Init() - if err != nil { - return err - } - - // Reset Account values if caServer changed, thus registration URI can be updated - if account != nil && account.Registration != nil && !isAccountMatchingCaServer(account.Registration.URI, a.CAServer) { - log.Info("Account URI does not match the current CAServer. The account will be reset") - account.reset() - } - - var needRegister bool - if account == nil || len(account.Email) == 0 { - domainsCerts := DomainsCertificates{Certs: []*DomainsCertificate{}} - if account != nil { - domainsCerts = account.DomainsCertificate - } - - account, err = NewAccount(a.Email, domainsCerts.Certs, a.KeyType) - if err != nil { - return err - } - - needRegister = true - } else if len(account.KeyType) == 0 { - // Set the KeyType if not already defined in the account - account.KeyType = acmeprovider.GetKeyType(context.Background(), a.KeyType) - } - - a.client, err = a.buildACMEClient(account) - if err != nil { - return err - } - if needRegister { - // New users will need to register; be sure to save it - log.Debug("Register...") - - reg, err := a.client.Registration.Register(registration.RegisterOptions{TermsOfServiceAgreed: true}) - if err != nil { - return err - } - - account.Registration = reg - } - - err = transaction.Commit(account) - if err != nil { - return err - } - - a.retrieveCertificates() - a.renewCertificates() - a.runJobs() - } - return nil -} - -func isAccountMatchingCaServer(accountURI string, serverURI string) bool { - aru, err := url.Parse(accountURI) - if err != nil { - log.Infof("Unable to parse account.Registration URL : %v", err) - return false - } - cau, err := url.Parse(serverURI) - if err != nil { - log.Infof("Unable to parse CAServer URL : %v", err) - return false - } - return cau.Hostname() == aru.Hostname() -} - -func (a *ACME) getCertificate(clientHello *tls.ClientHelloInfo) (*tls.Certificate, error) { - domain := types.CanonicalDomain(clientHello.ServerName) - account := a.store.Get().(*Account) - - if challengeCert, ok := a.challengeTLSProvider.getCertificate(domain); ok { - log.Debugf("ACME got challenge %s", domain) - return challengeCert, nil - } - - if providedCertificate := a.getProvidedCertificate(domain); providedCertificate != nil { - return providedCertificate, nil - } - - if domainCert, ok := account.DomainsCertificate.getCertificateForDomain(domain); ok { - log.Debugf("ACME got domain cert %s", domain) - return domainCert.tlsCert, nil - } - - if a.OnDemand { - if a.checkOnDemandDomain != nil && !a.checkOnDemandDomain(domain) { - return nil, nil - } - return a.loadCertificateOnDemand(clientHello) - } - - log.Debugf("No certificate found or generated for %s", domain) - return nil, nil -} - -func (a *ACME) retrieveCertificates() { - a.jobs.In() <- func() { - log.Info("Retrieving ACME certificates...") - - a.deleteUnnecessaryDomains() - - for i := 0; i < len(a.Domains); i++ { - domain := a.Domains[i] - - // check if cert isn't already loaded - account := a.store.Get().(*Account) - if _, exists := account.DomainsCertificate.exists(domain); !exists { - var domains []string - domains = append(domains, domain.Main) - domains = append(domains, domain.SANs...) - domains, err := a.getValidDomains(domains, true) - if err != nil { - log.Errorf("Error validating ACME certificate for domain %q: %s", domains, err) - continue - } - - certificateResource, err := a.getDomainsCertificates(domains) - if err != nil { - log.Errorf("Error getting ACME certificate for domain %q: %s", domains, err) - continue - } - - transaction, object, err := a.store.Begin() - if err != nil { - log.Errorf("Error creating ACME store transaction from domain %q: %s", domain, err) - continue - } - - account = object.(*Account) - _, err = account.DomainsCertificate.addCertificateForDomains(certificateResource, domain) - if err != nil { - log.Errorf("Error adding ACME certificate for domain %q: %s", domains, err) - continue - } - - if err = transaction.Commit(account); err != nil { - log.Errorf("Error Saving ACME account %+v: %s", account, err) - continue - } - } - } - - log.Info("Retrieved ACME certificates") - } -} - -func (a *ACME) renewCertificates() { - a.jobs.In() <- func() { - log.Info("Testing certificate renew...") - account := a.store.Get().(*Account) - for _, certificateResource := range account.DomainsCertificate.Certs { - if certificateResource.needRenew() { - log.Infof("Renewing certificate from LE : %+v", certificateResource.Domains) - renewedACMECert, err := a.renewACMECertificate(certificateResource) - if err != nil { - log.Errorf("Error renewing certificate from LE: %v", err) - continue - } - operation := func() error { - return a.storeRenewedCertificate(certificateResource, renewedACMECert) - } - notify := func(err error, time time.Duration) { - log.Warnf("Renewed certificate storage error: %v, retrying in %s", err, time) - } - ebo := backoff.NewExponentialBackOff() - ebo.MaxElapsedTime = 60 * time.Second - err = backoff.RetryNotify(safe.OperationWithRecover(operation), ebo, notify) - if err != nil { - log.Errorf("Datastore cannot sync: %v", err) - continue - } - } - } - } -} - -func (a *ACME) renewACMECertificate(certificateResource *DomainsCertificate) (*Certificate, error) { - renewedCert, err := a.client.Certificate.Renew(certificate.Resource{ - Domain: certificateResource.Certificate.Domain, - CertURL: certificateResource.Certificate.CertURL, - CertStableURL: certificateResource.Certificate.CertStableURL, - PrivateKey: certificateResource.Certificate.PrivateKey, - Certificate: certificateResource.Certificate.Certificate, - }, true, OSCPMustStaple) - if err != nil { - return nil, err - } - log.Infof("Renewed certificate from LE: %+v", certificateResource.Domains) - return &Certificate{ - Domain: renewedCert.Domain, - CertURL: renewedCert.CertURL, - CertStableURL: renewedCert.CertStableURL, - PrivateKey: renewedCert.PrivateKey, - Certificate: renewedCert.Certificate, - }, nil -} - -func (a *ACME) storeRenewedCertificate(certificateResource *DomainsCertificate, renewedACMECert *Certificate) error { - transaction, object, err := a.store.Begin() - if err != nil { - return fmt.Errorf("error during transaction initialization for renewing certificate: %v", err) - } - - log.Infof("Renewing certificate in data store : %+v ", certificateResource.Domains) - account := object.(*Account) - err = account.DomainsCertificate.renewCertificates(renewedACMECert, certificateResource.Domains) - if err != nil { - return fmt.Errorf("error renewing certificate in datastore: %v ", err) - } - - log.Infof("Commit certificate renewed in data store : %+v", certificateResource.Domains) - if err = transaction.Commit(account); err != nil { - return fmt.Errorf("error saving ACME account %+v: %v", account, err) - } - - oldAccount := a.store.Get().(*Account) - for _, oldCertificateResource := range oldAccount.DomainsCertificate.Certs { - if oldCertificateResource.Domains.Main == certificateResource.Domains.Main && strings.Join(oldCertificateResource.Domains.SANs, ",") == strings.Join(certificateResource.Domains.SANs, ",") && certificateResource.Certificate != renewedACMECert { - return fmt.Errorf("renewed certificate not stored: %+v", certificateResource.Domains) - } - } - - log.Infof("Certificate successfully renewed in data store: %+v", certificateResource.Domains) - return nil -} - -func (a *ACME) buildACMEClient(account *Account) (*lego.Client, error) { - log.Debug("Building ACME client...") - caServer := "https://acme-v02.api.letsencrypt.org/directory" - if len(a.CAServer) > 0 { - caServer = a.CAServer - } - - config := lego.NewConfig(account) - config.CADirURL = caServer - config.Certificate.KeyType = account.KeyType - config.UserAgent = fmt.Sprintf("containous-traefik/%s", version.Version) - - client, err := lego.NewClient(config) - if err != nil { - return nil, err - } - - // DNS challenge - if a.DNSChallenge != nil && len(a.DNSChallenge.Provider) > 0 { - log.Debugf("Using DNS Challenge provider: %s", a.DNSChallenge.Provider) - - var provider challenge.Provider - provider, err = dns.NewDNSChallengeProviderByName(a.DNSChallenge.Provider) - if err != nil { - return nil, err - } - - err = client.Challenge.SetDNS01Provider(provider, - dns01.CondOption(len(a.DNSChallenge.Resolvers) > 0, dns01.AddRecursiveNameservers(a.DNSChallenge.Resolvers)), - dns01.CondOption(a.DNSChallenge.DisablePropagationCheck || a.DNSChallenge.DelayBeforeCheck > 0, - dns01.AddPreCheck(func(_, _ string) (bool, error) { - if a.DNSChallenge.DelayBeforeCheck > 0 { - log.Debugf("Delaying %d rather than validating DNS propagation now.", a.DNSChallenge.DelayBeforeCheck) - time.Sleep(time.Duration(a.DNSChallenge.DelayBeforeCheck)) - } - return true, nil - })), - ) - return client, err - } - - // HTTP challenge - if a.HTTPChallenge != nil && len(a.HTTPChallenge.EntryPoint) > 0 { - log.Debug("Using HTTP Challenge provider.") - - a.challengeHTTPProvider = &challengeHTTPProvider{store: a.store} - err = client.Challenge.SetHTTP01Provider(a.challengeHTTPProvider) - return client, err - } - - // TLS Challenge - if a.TLSChallenge != nil { - log.Debug("Using TLS Challenge provider.") - - err = client.Challenge.SetTLSALPN01Provider(a.challengeTLSProvider) - return client, err - } - - return nil, errors.New("ACME challenge not specified, please select TLS or HTTP or DNS Challenge") -} - -func (a *ACME) loadCertificateOnDemand(clientHello *tls.ClientHelloInfo) (*tls.Certificate, error) { - domain := types.CanonicalDomain(clientHello.ServerName) - account := a.store.Get().(*Account) - if certificateResource, ok := account.DomainsCertificate.getCertificateForDomain(domain); ok { - return certificateResource.tlsCert, nil - } - certificate, err := a.getDomainsCertificates([]string{domain}) - if err != nil { - return nil, err - } - log.Debugf("Got certificate on demand for domain %s", domain) - - transaction, object, err := a.store.Begin() - if err != nil { - return nil, err - } - account = object.(*Account) - cert, err := account.DomainsCertificate.addCertificateForDomains(certificate, types.Domain{Main: domain}) - if err != nil { - return nil, err - } - if err = transaction.Commit(account); err != nil { - return nil, err - } - return cert.tlsCert, nil -} - -// LoadCertificateForDomains loads certificates from ACME for given domains -func (a *ACME) LoadCertificateForDomains(domains []string) { - a.jobs.In() <- func() { - log.Debugf("LoadCertificateForDomains %v...", domains) - - domains, err := a.getValidDomains(domains, false) - if err != nil { - log.Errorf("Error getting valid domain: %v", err) - return - } - - operation := func() error { - if a.client == nil { - return errors.New("ACME client still not built") - } - return nil - } - notify := func(err error, time time.Duration) { - log.Errorf("Error getting ACME client: %v, retrying in %s", err, time) - } - ebo := backoff.NewExponentialBackOff() - ebo.MaxElapsedTime = 30 * time.Second - err = backoff.RetryNotify(safe.OperationWithRecover(operation), ebo, notify) - if err != nil { - log.Errorf("Error getting ACME client: %v", err) - return - } - account := a.store.Get().(*Account) - - // Check provided certificates - uncheckedDomains := a.getUncheckedDomains(domains, account) - if len(uncheckedDomains) == 0 { - return - } - - a.addResolvingDomains(uncheckedDomains) - defer a.removeResolvingDomains(uncheckedDomains) - - cert, err := a.getDomainsCertificates(uncheckedDomains) - if err != nil { - log.Errorf("Error getting ACME certificates %+v : %v", uncheckedDomains, err) - return - } - log.Debugf("Got certificate for domains %+v", uncheckedDomains) - transaction, object, err := a.store.Begin() - - if err != nil { - log.Errorf("Error creating transaction %+v : %v", uncheckedDomains, err) - return - } - var domain types.Domain - if len(uncheckedDomains) > 1 { - domain = types.Domain{Main: uncheckedDomains[0], SANs: uncheckedDomains[1:]} - } else { - domain = types.Domain{Main: uncheckedDomains[0]} - } - account = object.(*Account) - _, err = account.DomainsCertificate.addCertificateForDomains(cert, domain) - if err != nil { - log.Errorf("Error adding ACME certificates %+v : %v", uncheckedDomains, err) - return - } - if err = transaction.Commit(account); err != nil { - log.Errorf("Error Saving ACME account %+v: %v", account, err) - return - } - } -} - -func (a *ACME) addResolvingDomains(resolvingDomains []string) { - a.resolvingDomainsMutex.Lock() - defer a.resolvingDomainsMutex.Unlock() - - for _, domain := range resolvingDomains { - a.resolvingDomains[domain] = struct{}{} - } -} - -func (a *ACME) removeResolvingDomains(resolvingDomains []string) { - a.resolvingDomainsMutex.Lock() - defer a.resolvingDomainsMutex.Unlock() - - for _, domain := range resolvingDomains { - delete(a.resolvingDomains, domain) - } -} - -// Get provided certificate which check a domains list (Main and SANs) -// from static and dynamic provided certificates -func (a *ACME) getProvidedCertificate(domains string) *tls.Certificate { - log.Debugf("Looking for provided certificate to validate %s...", domains) - cert := searchProvidedCertificateForDomains(domains, a.TLSConfig.NameToCertificate) - if cert == nil && a.dynamicCerts != nil && a.dynamicCerts.Get() != nil { - cert = searchProvidedCertificateForDomains(domains, a.dynamicCerts.Get().(map[string]*tls.Certificate)) - } - if cert == nil { - log.Debugf("No provided certificate found for domains %s, get ACME certificate.", domains) - } - return cert -} - -func searchProvidedCertificateForDomains(domain string, certs map[string]*tls.Certificate) *tls.Certificate { - // Use regex to test for provided certs that might have been added into TLSOptions - for certDomains := range certs { - domainChecked := false - for _, certDomain := range strings.Split(certDomains, ",") { - domainChecked = types.MatchDomain(domain, certDomain) - if domainChecked { - break - } - } - if domainChecked { - log.Debugf("Domain %q checked by provided certificate %q", domain, certDomains) - return certs[certDomains] - } - } - return nil -} - -// Get provided certificate which check a domains list (Main and SANs) -// from static and dynamic provided certificates -func (a *ACME) getUncheckedDomains(domains []string, account *Account) []string { - a.resolvingDomainsMutex.RLock() - defer a.resolvingDomainsMutex.RUnlock() - - log.Debugf("Looking for provided certificate to validate %s...", domains) - allCerts := make(map[string]*tls.Certificate) - - // Get static certificates - for domains, certificate := range a.TLSConfig.NameToCertificate { - allCerts[domains] = certificate - } - - // Get dynamic certificates - if a.dynamicCerts != nil && a.dynamicCerts.Get() != nil { - for domains, certificate := range a.dynamicCerts.Get().(map[string]*tls.Certificate) { - allCerts[domains] = certificate - } - } - - // Get ACME certificates - if account != nil { - for domains, certificate := range account.DomainsCertificate.toDomainsMap() { - allCerts[domains] = certificate - } - } - - // Get currently resolved domains - for domain := range a.resolvingDomains { - if _, ok := allCerts[domain]; !ok { - allCerts[domain] = &tls.Certificate{} - } - } - - // Get Configuration Domains - for i := 0; i < len(a.Domains); i++ { - allCerts[a.Domains[i].Main] = &tls.Certificate{} - for _, san := range a.Domains[i].SANs { - allCerts[san] = &tls.Certificate{} - } - } - - return searchUncheckedDomains(domains, allCerts) -} - -func searchUncheckedDomains(domains []string, certs map[string]*tls.Certificate) []string { - var uncheckedDomains []string - for _, domainToCheck := range domains { - if !isDomainAlreadyChecked(domainToCheck, certs) { - uncheckedDomains = append(uncheckedDomains, domainToCheck) - } - } - - if len(uncheckedDomains) == 0 { - log.Debugf("No ACME certificate to generate for domains %q.", domains) - } else { - log.Debugf("Domains %q need ACME certificates generation for domains %q.", domains, strings.Join(uncheckedDomains, ",")) - } - return uncheckedDomains -} - -func (a *ACME) getDomainsCertificates(domains []string) (*Certificate, error) { - var cleanDomains []string - for _, domain := range domains { - canonicalDomain := types.CanonicalDomain(domain) - cleanDomain := dns01.UnFqdn(canonicalDomain) - if canonicalDomain != cleanDomain { - log.Warnf("FQDN detected, please remove the trailing dot: %s", canonicalDomain) - } - cleanDomains = append(cleanDomains, cleanDomain) - } - - log.Debugf("Loading ACME certificates %s...", cleanDomains) - bundle := true - - request := certificate.ObtainRequest{ - Domains: cleanDomains, - Bundle: bundle, - MustStaple: OSCPMustStaple, - } - - cert, err := a.client.Certificate.Obtain(request) - if err != nil { - return nil, fmt.Errorf("cannot obtain certificates: %+v", err) - } - - log.Debugf("Loaded ACME certificates %s", cleanDomains) - return &Certificate{ - Domain: cert.Domain, - CertURL: cert.CertURL, - CertStableURL: cert.CertStableURL, - PrivateKey: cert.PrivateKey, - Certificate: cert.Certificate, - }, nil -} - -func (a *ACME) runJobs() { - safe.Go(func() { - for job := range a.jobs.Out() { - function := job.(func()) - function() - } - }) -} - -// getValidDomains checks if given domain is allowed to generate a ACME certificate and return it -func (a *ACME) getValidDomains(domains []string, wildcardAllowed bool) ([]string, error) { - // Check if the domains array is empty or contains only one empty value - if len(domains) == 0 || (len(domains) == 1 && len(domains[0]) == 0) { - return nil, errors.New("unable to generate a certificate when no domain is given") - } - - if strings.HasPrefix(domains[0], "*") { - if !wildcardAllowed { - return nil, fmt.Errorf("unable to generate a wildcard certificate for domain %q from a 'Host' rule", strings.Join(domains, ",")) - } - - if a.DNSChallenge == nil { - return nil, fmt.Errorf("unable to generate a wildcard certificate for domain %q : ACME needs a DNSChallenge", strings.Join(domains, ",")) - } - if strings.HasPrefix(domains[0], "*.*") { - return nil, fmt.Errorf("unable to generate a wildcard certificate for domain %q : ACME does not allow '*.*' wildcard domain", strings.Join(domains, ",")) - } - } - for _, san := range domains[1:] { - if strings.HasPrefix(san, "*") { - return nil, fmt.Errorf("unable to generate a certificate for domains %q: SANs can not be a wildcard domain", strings.Join(domains, ",")) - - } - } - - domains = fun.Map(types.CanonicalDomain, domains).([]string) - return domains, nil -} - -func isDomainAlreadyChecked(domainToCheck string, existentDomains map[string]*tls.Certificate) bool { - for certDomains := range existentDomains { - for _, certDomain := range strings.Split(certDomains, ",") { - if types.MatchDomain(domainToCheck, certDomain) { - return true - } - } - } - return false -} - -// deleteUnnecessaryDomains deletes from the configuration : -// - Duplicated domains -// - Domains which are checked by wildcard domain -func (a *ACME) deleteUnnecessaryDomains() { - var newDomains []types.Domain - - for idxDomainToCheck, domainToCheck := range a.Domains { - keepDomain := true - - for idxDomain, domain := range a.Domains { - if idxDomainToCheck == idxDomain { - continue - } - - if reflect.DeepEqual(domain, domainToCheck) { - if idxDomainToCheck > idxDomain { - log.Warnf("The domain %v is duplicated in the configuration but will be process by ACME only once.", domainToCheck) - keepDomain = false - } - break - } - - var newDomainsToCheck []string - - // Check if domains can be validated by the wildcard domain - domainsMap := make(map[string]*tls.Certificate) - domainsMap[domain.Main] = &tls.Certificate{} - if len(domain.SANs) > 0 { - domainsMap[strings.Join(domain.SANs, ",")] = &tls.Certificate{} - } - - for _, domainProcessed := range domainToCheck.ToStrArray() { - if idxDomain < idxDomainToCheck && isDomainAlreadyChecked(domainProcessed, domainsMap) { - // The domain is duplicated in a CN - log.Warnf("Domain %q is duplicated in the configuration or validated by the domain %v. It will be processed once.", domainProcessed, domain) - continue - } else if domain.Main != domainProcessed && strings.HasPrefix(domain.Main, "*") && types.MatchDomain(domainProcessed, domain.Main) { - // Check if a wildcard can validate the domain - log.Warnf("Domain %q will not be processed by ACME provider because it is validated by the wildcard %q", domainProcessed, domain.Main) - continue - } - newDomainsToCheck = append(newDomainsToCheck, domainProcessed) - } - - // Delete the domain if both Main and SANs can be validated by the wildcard domain - // otherwise keep the unchecked values - if newDomainsToCheck == nil { - keepDomain = false - break - } - domainToCheck.Set(newDomainsToCheck) - } - - if keepDomain { - newDomains = append(newDomains, domainToCheck) - } - } - - a.Domains = newDomains -} diff --git a/old/acme/acme_example.json b/old/acme/acme_example.json deleted file mode 100644 index 8ebcbfd20..000000000 --- a/old/acme/acme_example.json +++ /dev/null @@ -1,43 +0,0 @@ -{ - "Email": "test@traefik.io", - "Registration": { - "body": { - "resource": "reg", - "id": 3, - "key": { - "kty": "RSA", - "n": "y5a71suIqvEtovDmDVQ3SSNagk5IVCFI_TvqWpEXSrdbcDE2C-PTEtEUJuLkYwygcpiWYbPmXgdS628vQCw5Uo4DeDyHiuysJOWBLaWow3p9goOdhnPbGBq0liIR9xXyRoctdipVk8UiO9scWsu4jMBM3sMr7_yBWPfYYiLEQmZGFO3iE7Oqr55h_kncHIj5lUQY1j_jkftqxlxUB5_0quyJ7l915j5QY--eY7h4GEhRvx0TlUpi-CnRtRblGeDDDilXZD6bQN2962WdKecsmRaYx-ttLz6jCPXz2VDJRWNcIS501ne2Zh3hzw_DS6IRd2GIia1Wg4sisi9epC9sumXPHi6xzR6-_i_nsFjdtTkUcV8HmorOYoc820KQVZaLScxa8e7-ixpOd6mr6AIbEf7dBAkb9f_iK3GwpqKD8yNcaj1EQgNSyJSjnKSulXI_GwkGnuXe00Qpb1a8ha5Z8yWg7XmZZnJyAZrmK60RfwRNQ1rO5ioerNUBJ2KYTYNzVjBdob9Ug6Cjh4bEKNNjqcbjQ50_Z97Vw40xzpDQ_fYllc6n92eSuv6olxFJTmK7EhHuanDzITngaqei3zL9RwQ7P-1jfEZ03qmGrQYYqXcsS46PQ8cE-frzY2mKp16pRNCG7-03gKVGV0JHyW1aYbevNUk7OumCAXhC2YOigBk", - "e": "AQAB" - }, - "contact": [ - "mailto:test@traefik.io" - ], - "agreement": "http://boulder:4000/terms/v1" - }, - "uri": "http://127.0.0.1:4000/acme/reg/3", - "new_authzr_uri": "http://127.0.0.1:4000/acme/new-authz", - "terms_of_service": "http://boulder:4000/terms/v1" - }, - "PrivateKey": "MIIJJwIBAAKCAgEAy5a71suIqvEtovDmDVQ3SSNagk5IVCFI/TvqWpEXSrdbcDE2C+PTEtEUJuLkYwygcpiWYbPmXgdS628vQCw5Uo4DeDyHiuysJOWBLaWow3p9goOdhnPbGBq0liIR9xXyRoctdipVk8UiO9scWsu4jMBM3sMr7/yBWPfYYiLEQmZGFO3iE7Oqr55h/kncHIj5lUQY1j/jkftqxlxUB5/0quyJ7l915j5QY++eY7h4GEhRvx0TlUpi+CnRtRblGeDDDilXZD6bQN2962WdKecsmRaYx+ttLz6jCPXz2VDJRWNcIS501ne2Zh3hzw/DS6IRd2GIia1Wg4sisi9epC9sumXPHi6xzR6+/i/nsFjdtTkUcV8HmorOYoc820KQVZaLScxa8e7+ixpOd6mr6AIbEf7dBAkb9f/iK3GwpqKD8yNcaj1EQgNSyJSjnKSulXI/GwkGnuXe00Qpb1a8ha5Z8yWg7XmZZnJyAZrmK60RfwRNQ1rO5ioerNUBJ2KYTYNzVjBdob9Ug6Cjh4bEKNNjqcbjQ50/Z97Vw40xzpDQ/fYllc6n92eSuv6olxFJTmK7EhHuanDzITngaqei3zL9RwQ7P+1jfEZ03qmGrQYYqXcsS46PQ8cE+frzY2mKp16pRNCG7+03gKVGV0JHyW1aYbevNUk7OumCAXhC2YOigBkCAwEAAQKCAgA8XW1EuwTC6tAFSDhuK1JZNUpY6K05hMUHkQRj5jFpzgQmt/C2hc7H/YZkIVJmrA/G6sdsINNlffZwKH9yH6q/d6w/snLeFl7UcdhjmIL5sxAT6sKCY0fLVd/FxERfZvp3Pw2Tw+mr7v+/j7BQm6cU1M/2HRiiB9SydIqMTpKyvXB6NC6ceOFbQTL9GxlQvKyEPbS/kiH/3vRB7I5d1GfPZmNfcp6ark9X0my8VK4HRSo36H8t/OhrfLrZXvh/O82aHVf0OTv/d8AgU/jNu+XVXoXegUfWglQFDChJf1KuaE+g5w1tqgFDNgkGRD475soXA6xgZi0Iw/B9tN3zALzT4IiAW1q72feeTgKOMA2zGtKXxQZZSOV+DuWFZNz/tT7XqGQThqxM09CHv2WGOe80vobtegXYTUt90hysrqIZmBW5XYdzQlJh1KWTtfCaTrWd47kbGvhkEPc8aA3Ji4/AqfkVXiqwaLu+MSlgzPpRj7U7UAIDqnpZjgttgLp74Ujnk3bTaUzdyyNqYDBG3IFGr/Sv+2GQDAUn/PYRJKWr0BteqOzX9zvW3zY8g9CYVXfK/AW3RMWLV8ly6vH/gWqa9gEuzRNRlzjUU6/HCVbUx3UT8RMWH2TQ0uuQZr5JX1iTwjeeT0dEIly1NnRQC92wcrE4UUTBEF3krGVpDBf0AQKCAQEA4jB8w+2fwzbF8X+gCODcY7sTeJRunzGy+jbdaLkcThuylga+6W3ZgWx0BD30ql9K2mouCVu86fCTnBeXXEC3QoTdgw/EzJ83+4JU3QSDdzs9Ta9vLHyvrpUkQfZ8UZpeLLmFsmsBMbBbnfw0S1TzXDsgrAc+G4tia8nO/Iqu75kEMGzmHQAvmN3iSqc1aTS4qumbB19g+v+csq9NEht4F9jt39KotG+OD3MxCxtMu7vxAkJRjFFcgcbb2Rtqe/kQEKA1vLEAJg27lV4k8XibCSerVUR6IzT8WZHrNiXmpRguTLl2k8uFUdCOOx6aLGyRVJ6+8SgIsMR540vnxwQzEQKCAQEA5mu2wtWT19mvXopC3easPsXIPzc5oaRkqfWZYT1KHcVQ7NIXsE3vCjcf/3igZ8l/FVQ4G4fpk/GoTqlpV5Aq/JHCpVOR2O69uB+W4kWgliejpHvF9gszzAYnC8lIXqDbWiinBhmm3ii8sDGAoBaSDw5NMUq3mI+nd8zZ+jx1bLBczDafmQ0YKr8k0YaROxIgoBgDOQDdSqG387lwzpza2DKI5Al3HfS42zjT0RmBahPiuT2aEoUZmIYuvFY0fEjfkpbdvLyexHfZCILRUGlG1nAwASFg86lp+mFSBJ3E3cvbP0CpbFGxon5u4Ao3/7htoOh6huh7MQ91h41fv1hsiQKCAQAe7WRR4e7jYVzlbX7zV9Oqq0y5QwpxJ/mB7viNNiphn7Xmf5uhDU0dPjgK0HHgzdDNVpFe5DVLg4KbaDpg+dRU+xfSsNhG5kpgUGzMH67eIbJ7Kc64tX/MDkZ74nkTK1lPIjrer3TlV2jfjDmWR1JTPR51hzP9ziwx8tEjhM7woeqJuIoqUvkvHL+xV3WdIgFSFUkGVAtNpp/FauTN4gWktRupbAN3UH2LLUP6ccwnK0aD+Y9u8T0F3av33qDLvL1umIlgeI89pMkOXmYMwmHoeY0axpcwszECCkqwB7SmxEyoXv+Qq9ZZ3ntkKAYKpvmkKWSQUtoFWYgVBS727mMRAoIBABLdwusU/bPwuPEutObiWjwRiaHTbb6UbUGVQGe70vO5EjUxxorC9s2JUe9i+w9EakleyfFHIZLheHxoVp26yio/7QYIX6q5cYM/4uTH+qwQts9i6wSISkdsQYovguNsnEk3huVy+Dy8bSaoBvYUowTkkOF2Uq4FJRskBLz+ckbh8dcuqcaoUdA+Mk+NixqhE1bIYIssTPItZ5hnGJtyMGD/UkIJnF0ximk4r+8w/W2oDypHpvPZPg1E/1KgZE/Az7166NDpSL6haX3O6ECDPi+Uo/mTuBJ7TpgXm9WQ7WuTo3H8Y2LhFYBOhdmGPKuNeDxyjIW7R0rvDxp4MtzB6rECggEAJIl7/qp1lxUQPQJRTsEYBkOtdRw0IGG1Rcj0emhHaBN05c9opCy+Osb7mVeU5ZiULe5kD02phL+36pEumprz7QzN46Y5pZc8AQ2W/QkeL4Wo9U9QzczvQQzc1EqrBkzvQTZtBhn4DRzz0IuTn1beVyHtBZeNpBFgMQFv9VYQuUNwFoTOkkQrBRnYbXH6KEnhF3c/1Hzi4KHVdHdfZ3LH7KFQJ34xio0q2tWQSQYeybmwOXdd9sxpz/Y4KBS9fqm7UrwnPK8yuOc05HLEaws+1iam5YyJprlQo3mGKe0wRztwn44HDeQr70LlFm0lzigVAv0hSiWO1Q5hJL7nDu8m/Q==", - "DomainsCertificate": { - "Certs": [ - { - "Domains": { - "Main": "local1.com", - "SANs": [ - "test1.local1.com", - "test2.local1.com" - ] - }, - "Certificate": { - "Domain": "local1.com", - "CertURL": "http://127.0.0.1:4000/acme/cert/ffc4f3f14def9ee6ec6a0522b5c0baa3379d", - "CertStableURL": "", - "PrivateKey": "LS0tLS1CRUdJTiBSU0EgUFJJVkFURSBLRVktLS0tLQpNSUlKS1FJQkFBS0NBZ0VBdVNoTTR4enF6cE5YcFNaNnAvZnQrRmt5VmgyK1BSZXJUelV0OERRSng2UkVjQS9FCnN2RnNIVmNOSkZMS2twYTNlOEd3SUZBakJQNnJPK3hoR1JjWlJrdENON1gyOW5LZFhGbHZkYzJxd0hyTFF5WWkKTTB3ODhTck41VERiNi96TWU2dTB0dERiYWtDbDd6ZEJKUXJ6a1h5ZU1MeVkzTUs3aVkrMHpwL2JqMVhvbk5DdQpaQStkZ3hsMVNrV01DVUYvQk9HNWFyT1hwb0x4S0dQWGdzV3hOTVNLVmJKSHczL3ZqNTViZU92Um5lT3BNWlhvCmMwOWpZT3VBakNka1Z5czBSWHJLNWNCRDRMbVRXdnN4MFdTK2VMVHlGTTdQTHVZM3lEWkNNWEhjVmlqRHhnbFMKYjB1ZVRQcGFUWEQwYkxqZ0RNOUVEdE15ZEJzMUNPWlpPWG9ickN5Q2I1eWxTOFdVd1NzVXM1UldxZnlVbnAvcgpSNGx2c2RZOWRVZjRPdkNMVnJvWWk5NWFGc1Zxa0xLOExuL0Eyc3kxYWlDTnR4RmpKOXRXbWU0V0NhdzRoU0YvCkR4NWVNNWNYR2JSYXduVlZJQlZXeHhzNTBPMFJlUWRvbXBQZEFNS1RDWk9SRmxYaDdOWTdxQVdWRGtpdzhyam8Kekd3Ni9XdjlOR3hTNTliKzc0YVAxcjBxOTZ2RS9Rdi8zTCtjbjhiN0lBLytPYmFKdzhIT3RGbXc4RjBxQkN3MAprYWVVSloxb1JueGFYQUo4RHhHREpFOVdNUzh0QmJtVm16YkxoRkMzeDdVc0xGeTBrSzh1SFBFT3dQb2NKNUFUCkE1UHBvclNEMmFleHA0Z3VqYVp5c1JManpmY0dnaTdva0JFNlZVNWVqRE1iYS9lNERQNEJQUVg5VmtVQ0F3RUEKQVFLQ0FnQmZjMWdYcUp1ZmZMT3REcVlpbXh4UmIrSVVKT2NpWldaSndmZDVvY244NGtEcHFDZFZ2RUZvNnF4NgpzamQ5MURhb2xOUHdCSC9aSGxRMTR3aTNQNEluQzdzS0wwTXVEeTN5SXFUa0RPOWVwSzdPWWdVMWZyTFgvS0lCCjZlc2x2Ny9HYldFTzhhSjdKdktqM0U4NEFtcEg4UDgzenJIYTlJUnJTT3NEcmNNcEpEZHpSOXp1OW1IVDZMYmYKWC9UdC9KYTNkSW42YUxUZ0FSYkRKSjAvN0J3TFFOcXpqT0dUOWdzUWRhbGdMK2x5eEo4L1ViRndhRmVwNmgzdApvbzBHcHQ0ZWgwdTdueDhlNVd3Q2RnWmJsTnpnS3grMC9Gd3dLRHhQZVRFc2ZpOEJONmlkR2NjbVdzd3prTWdtCnJmbERaeGNSWTNRSlZIVHBCL0dTTWZXRFBPQ3dRdGltQk1WN3kxM2hPMTdPWXpSNDBMZnpUalJBbmtna2V2eWYKcFowb3dLR3o4QS9haHhRWWJmYVQ5VEhXV0wrYUpYeUhFanBKckp5aTg3UExVbzhsOFVydU56MDRWNXpLOFJPbgo2cG9EWmVtbm1EYWRlU09pK3hZRWlGT1NwSXNWbzlpcm9jUGFKN2YzYWpiNUU4RHpuN1o1MmhzL2R6akpLcFZJCm5mVDFkUU9SZEowSXRUNlRlQ2RTL0dpS25IS1RtNjR2T21IbmlJcm8rUGRhUmFjV0IrTUJ0VytRd0cyUStyRGkKc3g4NlpQbHRpTVpLMDZ5TVlyVHZUdGk2aFVGaUY5cWh4b3RGazdNQkNrZlIwYUVhaUREQUpKNm1jb1lpRUQ2QgpBVGJhVmpVaGNaUiswYkRST25PN0ozRk5rZmx3K2dMaVhvcXFRRW9pU2ZWb2h5SWY3UUtDQVFFQThjYTM5K0g4CjN3L2Qrcm0yUGNhM0RMQnBYaWU4Z3ZYcGpjazVYSkpvSGVmbnJjZWQrcFpXaTZEYncwYld0MEdtYkxmVjJNSlAKV2I1aTZzSXhmdkN3YlFqbHY0UnExMVA5ZEswT3poMnVpKzZ6cXVBMG5YTVcrN0lJS0cvdDhmS2NJZGRRNnRGcwpFclFVTFBDak56ODA2cHBiSlhPRmVvMW1BK293TGhHNlA3dDhCdlZHSk1NaTNxejNlSUNuVVE2eDNFY01ITXNuClhrM21DUzI1WUZaNk96cytFK254cGVraTAzZmQwblp3UE1jdElHZys1c3hleE9zREsrTHlvb2FqQnc5N0oyUzIKcUNNWXFtT0tLcmxEQ3Y1WmQ4dlZLN3hXVmpKRVhGTTNMZ2pieHBRcCtuVXNVVWxwS01LOVlGS0lRREl0RU9aMApWcWExTXJaOElzN1l5d0tDQVFFQXhBemZIa2pIVGlvTHdZbG5EcEk0MWlOTDh5Y0ZBallrTC94dWhPU2tlVkE4CjdRWDZPZUpDekR3Z0FUYXVqOWR6Y0wwby9yTndWV0xWcnQ3OXk3YnJvVDdFREZKWVNTY25GRXNMTlVWSXRncGkKckNSUXJTL1F2TkVGTmE5K0pRc1dmYkdBNHdIUTFaSjI4MFp1cWMvNlEyUi9kZVh3cUZBQVBHN2NIcEhHWlR6ZQoyRmFRUHFLRkV4WlEyZkpvRys0SVBRNHVQVERybXlGMmVUWXk2T3BaaDBHbWJRYlVTa1dFWDlQRmF1cHJIWVdGCk8wK25DaVVPNVRaMFZoaGR2dUNKMWdPclZHYzhBUlJtUVZ1aUNEWTZCaGlvVTU0ZmZsSXlDTXZ5a3MwcmRXZ3MKWVJ2TmN4TXNlRGJpTDRKSURkMHhiN1d4VUdmVjRVNHZPMks5Vms1N0x3S0NBUUVBMkd1eE1jcXd1RnRUc0tPYwpaaUFDcXZFZTRKRmhSVGtySHlnSW1MelZSaS9ZU3M1c3MycnZmWDA0T3N5bVZ0UUZUVHdoeUMzbktjWXFkVW52ClZGblBFMHJyblV2Qzk0elBUQ205SHZPaTBzK1JORndOdlFMUWgrME5NR1ZBOFZyaU44aXRQZ1RJWU5XaFdianQKNFA1TE45V0QwVHBmT1J4cFBRZmNxT0JsZjdjcmhtNzNvdUNwemZtMmE3OStCaWpKUFF5NzR1cFhDeXRmeHNlUApNSlU0Uk56NjdJaDFMclpKM2xGbDFvYitZT2xKazhDOHpZd1RLT0hWck9zeGxobyt4SXN2Q2t3MDFMelZ6Mi9hCnRmT3Y5NTlHSnQzbXE0ZWpJUFZPQy9iUlpmdTMvMEdSY2dpQTZ5SnpaM0VxWTVaOU1EbTU3VzdjcE5RRlRxZmEKNXEyUmtRS0NBUUErNGhZSzQ3TXg2aUNkTWxKaEJSdS82OUJucktOWm96NFdPalRFNFlXejk3MmpGU0Mrd2tsRQpzeUJjNDBvNGp4WFRHb2wwc04rZU03WndnY3dNTko3OXVHRXZ4cFhVMlA4YTdqc3BHaEVKZXVsTlo5U015R0orCnZkaWE4TEJZZDJiK2FCbjhOay9pd1Rqd0xTNC92NXI1Vk5uaFdpRElDK2tYZVVPWGRwQ1pWbDN3TEV2V0cxRHQKMzJHTmxzZzM5VENsVE5BZUJudjc1VTdYOEQrQ0gvRVpoa0E0aGxFL2hXN0JRZTczclRzd1creHhLc3BjWWFpVwpjdEg3NzVMYUw3Rm1lUVRTYk01OVZpcTZXZ2J0OVY3Rko5R09DSkQzZHF2ZjBITDlEVndjSzQ3WWt3OWlFc3RYCnY5cnEvREhhYUpGNzBGNlFlTTNNbDhSa212WTZJYkEzQW9JQkFRRGt6RmZLeG9HQ3dWUDlua3k4NmFQSjFvd2kKc2FDZEx6RjRWTENRZzkrUXJITzEyY0p5MFFQUnJ2cUQyMGp1cDFlOWJhWVZzbkdYc1FZTFg2NVR6UzJSSCtlSAp6S0NPTTdnMVE3djMxNWpjMDMvN1lQck4rb3RrV0VBOUkyaDZjUE1vY3c0aERTNk02OFlxQVlKTS9RclVhenZhCnhBTFJaZEVkQW1xWDA4VHhuY1hRUEVxYkk0ZnlSZ2pVM1BYR3RRaFFFbERpR2kwbThjQTJNTXdsR1RmbTdOSXgKaENjZ2ZkL296TEp2VUhiMkxLRi82cXEySmJVRHlOMkVoK0xSZUJjdnp6Y1grZE5MdGQxY0Uvcm1SM2hMbWxmNgo3KzRpTVMxK0t1eWV3VlJVUEE1c1F1aUYyVUVoeEs1MUpZK1FpOG9HbERKdGRrOXB3QlZNN1F0WW9KVEwKLS0tLS1FTkQgUlNBIFBSSVZBVEUgS0VZLS0tLS0K", - "Certificate": "LS0tLS1CRUdJTiBDRVJUSUZJQ0FURS0tLS0tCk1JSUZvakNDQklxZ0F3SUJBZ0lUQVAvRTgvRk43NTdtN0dvRklyWEF1cU0zblRBTkJna3Foa2lHOXcwQkFRc0YKQURBZk1SMHdHd1lEVlFRRERCUm9NbkJ3ZVNCb01tTnJaWElnWm1GclpTQkRRVEFlRncweE9EQXhNVFV3TnpJNQpNREJhRncweE9EQTBNVFV3TnpJNU1EQmFNRVF4RXpBUkJnTlZCQU1UQ214dlkyRnNNUzVqYjIweExUQXJCZ05WCkJBVVRKR1ptWXpSbU0yWXhOR1JsWmpsbFpUWmxZelpoTURVeU1tSTFZekJpWVdFek16YzVaRENDQWlJd0RRWUoKS29aSWh2Y05BUUVCQlFBRGdnSVBBRENDQWdvQ2dnSUJBTGtvVE9NYzZzNlRWNlVtZXFmMzdmaFpNbFlkdmowWApxMDgxTGZBMENjZWtSSEFQeExMeGJCMVhEU1JTeXBLV3QzdkJzQ0JRSXdUK3F6dnNZUmtYR1VaTFFqZTE5dlp5Cm5WeFpiM1hOcXNCNnkwTW1Jak5NUFBFcXplVXcyK3Y4ekh1cnRMYlEyMnBBcGU4M1FTVUs4NUY4bmpDOG1OekMKdTRtUHRNNmYyNDlWNkp6UXJtUVBuWU1aZFVwRmpBbEJmd1RodVdxemw2YUM4U2hqMTRMRnNUVEVpbFd5UjhOLwo3NCtlVzNqcjBaM2pxVEdWNkhOUFkyRHJnSXduWkZjck5FVjZ5dVhBUStDNWsxcjdNZEZrdm5pMDhoVE96eTdtCk44ZzJRakZ4M0ZZb3c4WUpVbTlMbmt6NldrMXc5R3k0NEF6UFJBN1RNblFiTlFqbVdUbDZHNndzZ20rY3BVdkYKbE1FckZMT1VWcW44bEo2ZjYwZUpiN0hXUFhWSCtEcndpMWE2R0l2ZVdoYkZhcEN5dkM1L3dOck10V29namJjUgpZeWZiVnBudUZnbXNPSVVoZnc4ZVhqT1hGeG0wV3NKMVZTQVZWc2NiT2REdEVYa0hhSnFUM1FEQ2t3bVRrUlpWCjRleldPNmdGbFE1SXNQSzQ2TXhzT3Yxci9UUnNVdWZXL3UrR2o5YTlLdmVyeFAwTC85eS9uSi9HK3lBUC9qbTIKaWNQQnpyUlpzUEJkS2dRc05KR25sQ1dkYUVaOFdsd0NmQThSZ3lSUFZqRXZMUVc1bFpzMnk0UlF0OGUxTEN4Ywp0SkN2TGh6eERzRDZIQ2VRRXdPVDZhSzBnOW1uc2FlSUxvMm1jckVTNDgzM0JvSXU2SkFST2xWT1hvd3pHMnYzCnVBeitBVDBGL1ZaRkFnTUJBQUdqZ2dHd01JSUJyREFPQmdOVkhROEJBZjhFQkFNQ0JhQXdIUVlEVlIwbEJCWXcKRkFZSUt3WUJCUVVIQXdFR0NDc0dBUVVGQndNQ01Bd0dBMVVkRXdFQi93UUNNQUF3SFFZRFZSME9CQllFRk5LZQpBVUZYc2Z2N2lML0lYVVBXdzY2ZU5jQnhNQjhHQTFVZEl3UVlNQmFBRlB0NFR4TDVZQldETEo4WGZ6UVpzeTQyCjZrR0pNR1lHQ0NzR0FRVUZCd0VCQkZvd1dEQWlCZ2dyQmdFRkJRY3dBWVlXYUhSMGNEb3ZMekV5Tnk0d0xqQXUKTVRvME1EQXlMekF5QmdnckJnRUZCUWN3QW9ZbWFIUjBjRG92THpFeU55NHdMakF1TVRvME1EQXdMMkZqYldVdgphWE56ZFdWeUxXTmxjblF3T1FZRFZSMFJCREl3TUlJS2JHOWpZV3d4TG1OdmJZSVFkR1Z6ZERFdWJHOWpZV3d4CkxtTnZiWUlRZEdWemRESXViRzlqWVd3eExtTnZiVEFuQmdOVkhSOEVJREFlTUJ5Z0dxQVloaFpvZEhSd09pOHYKWlhoaGJYQnNaUzVqYjIwdlkzSnNNR0VHQTFVZElBUmFNRmd3Q0FZR1o0RU1BUUlCTUV3R0F5b0RCREJGTUNJRwpDQ3NHQVFVRkJ3SUJGaFpvZEhSd09pOHZaWGhoYlhCc1pTNWpiMjB2WTNCek1COEdDQ3NHQVFVRkJ3SUNNQk1NCkVVUnZJRmRvWVhRZ1ZHaHZkU0JYYVd4ME1BMEdDU3FHU0liM0RRRUJDd1VBQTRJQkFRQ3A0Q2FxZlR4THNQTzQKS2JueDJZdEc4bTN3MC9keTVVR1VRNjZHbGxPVTk0L2I0MmNhbTRuNUZrTWlpZ01IaUx4c2JZVXh0cDZKQ3R5cQpLKzFNcDFWWEtSTTVKbFBTNWRIaWhxdHk1U3BrTUhjampwQSs3U2YyVWtoNmpKRWYxTUVJY2JnWnpJRk5IT0hYClVUUUppVFhKcno3blJDZnlQWFZtbWErUGtIRlU4R0VEVzJGOVptU1kzVFBiQWhiWkV2UkZubjUrR1lxbkZuancKWWw3Y0I2MXYwRzVpOGQwbnVvbTB4a2hiNTU3Y3BiZHhLblhsaFU4N2RZSTR5SUdPdUFGUWpYcXFXN2NIZCtXUQpWSDB2dFA3cEgrRmt2YnY4WkkxMHMrNU5ZcCtzZjFQZGQxekJsRmdNSGF3dnFFYUg3SU9sejdkajlCdmtVc0dpClhxQWVqQnFPCi0tLS0tRU5EIENFUlRJRklDQVRFLS0tLS0KLS0tLS1CRUdJTiBDRVJUSUZJQ0FURS0tLS0tCk1JSUVpakNDQTNLZ0F3SUJBZ0lDRWswd0RRWUpLb1pJaHZjTkFRRUxCUUF3S3pFcE1DY0dBMVVFQXd3Z1kyRmoKYTJ4cGJtY2dZM0o1Y0hSdlozSmhjR2hsY2lCbVlXdGxJRkpQVDFRd0hoY05NVFV4TURJeE1qQXhNVFV5V2hjTgpNakF4TURFNU1qQXhNVFV5V2pBZk1SMHdHd1lEVlFRREV4Um9ZWEJ3ZVNCb1lXTnJaWElnWm1GclpTQkRRVENDCkFTSXdEUVlKS29aSWh2Y05BUUVCQlFBRGdnRVBBRENDQVFvQ2dnRUJBTUlLUjNtYUJjVVNzbmNYWXpRVDEzRDUKTnIrWjNtTHhNTWgzVFVkdDZzQUNtcWJKMGJ0UmxnWGZNdE5MTTJPVTFJNmEzSnUrdElaU2RuMnYyMUpCd3Z4VQp6cFpRNHp5MmNpbUlpTVFEWkNRSEp3ekM5R1puOEhhVzA5MWl6OUgwR28zQTdXRFh3WU5tc2RMTlJpMDBvMTRVCmpvYVZxYVBzWXJaV3ZSS2FJUnFhVTBoSG1TMEFXd1FTdk4vOTNpTUlYdXlpd3l3bWt3S2JXbm54Q1EvZ3NjdEsKRlV0Y05yd0V4OVdnajZLbGh3RFR5STFRV1NCYnhWWU55VWdQRnpLeHJTbXdNTzB5TmZmN2hvK1FUOXg1K1kvNwpYRTU5UzRNYzRaWHhjWEtldy9nU2xOOVU1bXZUK0QyQmhEdGtDdXBkZnNaTkNRV3AyN0ErYi9EbXJGSTlOcXNDCkF3RUFBYU9DQWNJd2dnRytNQklHQTFVZEV3RUIvd1FJTUFZQkFmOENBUUF3UXdZRFZSMGVCRHd3T3FFNE1BYUMKQkM1dGFXd3dDb2NJQUFBQUFBQUFBQUF3SW9jZ0FBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQQpBQUFBQUFBd0RnWURWUjBQQVFIL0JBUURBZ0dHTUg4R0NDc0dBUVVGQndFQkJITXdjVEF5QmdnckJnRUZCUWN3CkFZWW1hSFIwY0RvdkwybHpjbWN1ZEhKMWMzUnBaQzV2WTNOd0xtbGtaVzUwY25WemRDNWpiMjB3T3dZSUt3WUIKQlFVSE1BS0dMMmgwZEhBNkx5OWhjSEJ6TG1sa1pXNTBjblZ6ZEM1amIyMHZjbTl2ZEhNdlpITjBjbTl2ZEdOaAplRE11Y0Rkak1COEdBMVVkSXdRWU1CYUFGT21rUCs2ZXBlYnkxZGQ1WUR5VHBpNGtqcGVxTUZRR0ExVWRJQVJOCk1Fc3dDQVlHWjRFTUFRSUJNRDhHQ3lzR0FRUUJndDhUQVFFQk1EQXdMZ1lJS3dZQkJRVUhBZ0VXSW1oMGRIQTYKTHk5amNITXVjbTl2ZEMxNE1TNXNaWFJ6Wlc1amNubHdkQzV2Y21jd1BBWURWUjBmQkRVd016QXhvQytnTFlZcgphSFIwY0RvdkwyTnliQzVwWkdWdWRISjFjM1F1WTI5dEwwUlRWRkpQVDFSRFFWZ3pRMUpNTG1OeWJEQWRCZ05WCkhRNEVGZ1FVKzNoUEV2bGdGWU1zbnhkL05CbXpMamJxUVlrd0RRWUpLb1pJaHZjTkFRRUxCUUFEZ2dFQkFBMFkKQWVMWE9rbHg0aGhDaWtVVWwrQmRuRmZuMWcwVzVBaVFMVk5JT0w2UG5xWHUwd2puaE55aHFkd25maFlNbm95NAppZFJoNGxCNnB6OEdmOXBubExkL0RuV1NWM2dTKy9JL21BbDFkQ2tLYnk2SDJWNzkwZTZJSG1JSzJLWW0zam0rClUrK0ZJZEdwQmRzUVRTZG1pWC9yQXl1eE1ETTBhZE1rTkJ3VGZRbVpRQ3o2bkdIdzFRY1NQWk12WnBzQzhTa3YKZWt6eHNqRjFvdE9yTVVQTlBRdnRUV3JWeDhHbFIycWZ4LzR4YlFhMXYyZnJOdkZCQ21PNTlnb3oram5XdmZUdApqMk5qd0RaN3ZsTUJzUG0xNmRiS1lDODQwdXZSb1pqeHFzZGMzQ2hDWmpxaW1GcWxORy94b1BBOCtkVGljWnpDClhFOWlqUEljdlc2eTFhYTNiR3c9Ci0tLS0tRU5EIENFUlRJRklDQVRFLS0tLS0K" - } - } - ] - }, - "ChallengeCerts": {} -} diff --git a/old/acme/acme_test.go b/old/acme/acme_test.go deleted file mode 100644 index a2cdf2191..000000000 --- a/old/acme/acme_test.go +++ /dev/null @@ -1,807 +0,0 @@ -package acme - -import ( - "crypto/tls" - "encoding/base64" - "net/http" - "net/http/httptest" - "reflect" - "sort" - "sync" - "testing" - "time" - - acmeprovider "github.com/containous/traefik/pkg/provider/acme" - "github.com/containous/traefik/pkg/tls/generate" - "github.com/containous/traefik/pkg/types" - "github.com/stretchr/testify/assert" - "github.com/stretchr/testify/require" -) - -func TestDomainsSet(t *testing.T) { - testCases := []struct { - input string - expected types.Domains - }{ - { - input: "", - expected: types.Domains{}, - }, - { - input: "foo1.com", - expected: types.Domains{ - types.Domain{Main: "foo1.com"}, - }, - }, - { - input: "foo2.com,bar.net", - expected: types.Domains{ - types.Domain{ - Main: "foo2.com", - SANs: []string{"bar.net"}, - }, - }, - }, - { - input: "foo3.com,bar1.net,bar2.net,bar3.net", - expected: types.Domains{ - types.Domain{ - Main: "foo3.com", - SANs: []string{"bar1.net", "bar2.net", "bar3.net"}, - }, - }, - }, - } - - for _, test := range testCases { - test := test - t.Run(test.input, func(t *testing.T) { - t.Parallel() - - domains := types.Domains{} - _ = domains.Set(test.input) - assert.Exactly(t, test.expected, domains) - }) - } -} - -func TestDomainsSetAppend(t *testing.T) { - testCases := []struct { - input string - expected types.Domains - }{ - { - input: "", - expected: types.Domains{}, - }, - { - input: "foo1.com", - expected: types.Domains{ - types.Domain{Main: "foo1.com"}, - }, - }, - { - input: "foo2.com,bar.net", - expected: types.Domains{ - types.Domain{Main: "foo1.com"}, - types.Domain{ - Main: "foo2.com", - SANs: []string{"bar.net"}, - }, - }, - }, - { - input: "foo3.com,bar1.net,bar2.net,bar3.net", - expected: types.Domains{ - types.Domain{Main: "foo1.com"}, - types.Domain{ - Main: "foo2.com", - SANs: []string{"bar.net"}, - }, - types.Domain{ - Main: "foo3.com", - SANs: []string{"bar1.net", "bar2.net", "bar3.net"}, - }, - }, - }, - } - - // append to - domains := types.Domains{} - for _, test := range testCases { - t.Run(test.input, func(t *testing.T) { - - _ = domains.Set(test.input) - assert.Exactly(t, test.expected, domains) - }) - } -} - -func TestCertificatesRenew(t *testing.T) { - foo1Cert, foo1Key, _ := generate.KeyPair("foo1.com", time.Now()) - foo2Cert, foo2Key, _ := generate.KeyPair("foo2.com", time.Now()) - - domainsCertificates := DomainsCertificates{ - lock: sync.RWMutex{}, - Certs: []*DomainsCertificate{ - { - Domains: types.Domain{ - Main: "foo1.com"}, - Certificate: &Certificate{ - Domain: "foo1.com", - CertURL: "url", - CertStableURL: "url", - PrivateKey: foo1Key, - Certificate: foo1Cert, - }, - }, - { - Domains: types.Domain{ - Main: "foo2.com"}, - Certificate: &Certificate{ - Domain: "foo2.com", - CertURL: "url", - CertStableURL: "url", - PrivateKey: foo2Key, - Certificate: foo2Cert, - }, - }, - }, - } - - foo1Cert, foo1Key, _ = generate.KeyPair("foo1.com", time.Now()) - newCertificate := &Certificate{ - Domain: "foo1.com", - CertURL: "url", - CertStableURL: "url", - PrivateKey: foo1Key, - Certificate: foo1Cert, - } - - err := domainsCertificates.renewCertificates(newCertificate, types.Domain{Main: "foo1.com"}) - if err != nil { - t.Errorf("Error in renewCertificates :%v", err) - } - - if len(domainsCertificates.Certs) != 2 { - t.Errorf("Expected domainsCertificates length %d %+v\nGot %+v", 2, domainsCertificates.Certs, len(domainsCertificates.Certs)) - } - - if !reflect.DeepEqual(domainsCertificates.Certs[0].Certificate, newCertificate) { - t.Errorf("Expected new certificate %+v \nGot %+v", newCertificate, domainsCertificates.Certs[0].Certificate) - } -} - -func TestRemoveDuplicates(t *testing.T) { - now := time.Now() - fooCert, fooKey, _ := generate.KeyPair("foo.com", now) - foo24Cert, foo24Key, _ := generate.KeyPair("foo.com", now.Add(24*time.Hour)) - foo48Cert, foo48Key, _ := generate.KeyPair("foo.com", now.Add(48*time.Hour)) - barCert, barKey, _ := generate.KeyPair("bar.com", now) - domainsCertificates := DomainsCertificates{ - lock: sync.RWMutex{}, - Certs: []*DomainsCertificate{ - { - Domains: types.Domain{ - Main: "foo.com"}, - Certificate: &Certificate{ - Domain: "foo.com", - CertURL: "url", - CertStableURL: "url", - PrivateKey: foo24Key, - Certificate: foo24Cert, - }, - }, - { - Domains: types.Domain{ - Main: "foo.com"}, - Certificate: &Certificate{ - Domain: "foo.com", - CertURL: "url", - CertStableURL: "url", - PrivateKey: foo48Key, - Certificate: foo48Cert, - }, - }, - { - Domains: types.Domain{ - Main: "foo.com"}, - Certificate: &Certificate{ - Domain: "foo.com", - CertURL: "url", - CertStableURL: "url", - PrivateKey: fooKey, - Certificate: fooCert, - }, - }, - { - Domains: types.Domain{ - Main: "bar.com"}, - Certificate: &Certificate{ - Domain: "bar.com", - CertURL: "url", - CertStableURL: "url", - PrivateKey: barKey, - Certificate: barCert, - }, - }, - { - Domains: types.Domain{ - Main: "foo.com"}, - Certificate: &Certificate{ - Domain: "foo.com", - CertURL: "url", - CertStableURL: "url", - PrivateKey: foo48Key, - Certificate: foo48Cert, - }, - }, - }, - } - - err := domainsCertificates.Init() - require.NoError(t, err) - - if len(domainsCertificates.Certs) != 2 { - t.Errorf("Expected domainsCertificates length %d %+v\nGot %+v", 2, domainsCertificates.Certs, len(domainsCertificates.Certs)) - } - - for _, cert := range domainsCertificates.Certs { - switch cert.Domains.Main { - case "bar.com": - continue - case "foo.com": - if !cert.tlsCert.Leaf.NotAfter.Equal(now.Add(48 * time.Hour).Truncate(1 * time.Second)) { - t.Errorf("Bad expiration %s date for domain %+v, now %s", cert.tlsCert.Leaf.NotAfter.String(), cert, now.Add(48*time.Hour).Truncate(1*time.Second).String()) - } - default: - t.Errorf("Unknown domain %+v", cert) - } - } -} - -func TestAcmeClientCreation(t *testing.T) { - // Lengthy setup to avoid external web requests - oh for easier golang testing! - account := &Account{Email: "f@f"} - - account.PrivateKey, _ = base64.StdEncoding.DecodeString(` -MIIBPAIBAAJBAMp2Ni92FfEur+CAvFkgC12LT4l9D53ApbBpDaXaJkzzks+KsLw9zyAxvlrfAyTCQ -7tDnEnIltAXyQ0uOFUUdcMCAwEAAQJAK1FbipATZcT9cGVa5x7KD7usytftLW14heQUPXYNV80r/3 -lmnpvjL06dffRpwkYeN8DATQF/QOcy3NNNGDw/4QIhAPAKmiZFxA/qmRXsuU8Zhlzf16WrNZ68K64 -asn/h3qZrAiEA1+wFR3WXCPIolOvd7AHjfgcTKQNkoMPywU4FYUNQ1AkCIQDv8yk0qPjckD6HVCPJ -llJh9MC0svjevGtNlxJoE3lmEQIhAKXy1wfZ32/XtcrnENPvi6lzxI0T94X7s5pP3aCoPPoJAiEAl -cijFkALeQp/qyeXdFld2v9gUN3eCgljgcl0QweRoIc=---`) - - ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { - _, err := w.Write([]byte(`{ - "GPHhmRVEDas": "https://community.letsencrypt.org/t/adding-random-entries-to-the-directory/33417", - "keyChange": "https://foo/acme/key-change", - "meta": { - "termsOfService": "https://boulder:4431/terms/v7" - }, - "newAccount": "https://foo/acme/new-acct", - "newNonce": "https://foo/acme/new-nonce", - "newOrder": "https://foo/acme/new-order", - "revokeCert": "https://foo/acme/revoke-cert" -}`)) - if err != nil { - http.Error(w, err.Error(), http.StatusInternalServerError) - } - })) - defer ts.Close() - - a := ACME{ - CAServer: ts.URL, - DNSChallenge: &acmeprovider.DNSChallenge{ - Provider: "manual", - DelayBeforeCheck: 10, - DisablePropagationCheck: true, - }, - } - - client, err := a.buildACMEClient(account) - if err != nil { - t.Errorf("Error in buildACMEClient: %v", err) - } - if client == nil { - t.Error("No client from buildACMEClient!") - } -} - -func TestAcme_getUncheckedCertificates(t *testing.T) { - mm := make(map[string]*tls.Certificate) - mm["*.containo.us"] = &tls.Certificate{} - mm["traefik.acme.io"] = &tls.Certificate{} - - dm := make(map[string]struct{}) - dm["*.traefik.wtf"] = struct{}{} - - a := ACME{TLSConfig: &tls.Config{NameToCertificate: mm}, resolvingDomains: dm} - - domains := []string{"traefik.containo.us", "trae.containo.us", "foo.traefik.wtf"} - uncheckedDomains := a.getUncheckedDomains(domains, nil) - assert.Empty(t, uncheckedDomains) - domains = []string{"traefik.acme.io", "trae.acme.io"} - uncheckedDomains = a.getUncheckedDomains(domains, nil) - assert.Len(t, uncheckedDomains, 1) - domainsCertificates := DomainsCertificates{Certs: []*DomainsCertificate{ - { - tlsCert: &tls.Certificate{}, - Domains: types.Domain{ - Main: "*.acme.wtf", - SANs: []string{"trae.acme.io"}, - }, - }, - }} - account := Account{DomainsCertificate: domainsCertificates} - uncheckedDomains = a.getUncheckedDomains(domains, &account) - assert.Empty(t, uncheckedDomains) - domains = []string{"traefik.containo.us", "trae.containo.us", "traefik.wtf"} - uncheckedDomains = a.getUncheckedDomains(domains, nil) - assert.Len(t, uncheckedDomains, 1) -} - -func TestAcme_getProvidedCertificate(t *testing.T) { - mm := make(map[string]*tls.Certificate) - mm["*.containo.us"] = &tls.Certificate{} - mm["traefik.acme.io"] = &tls.Certificate{} - - a := ACME{TLSConfig: &tls.Config{NameToCertificate: mm}} - - domain := "traefik.containo.us" - certificate := a.getProvidedCertificate(domain) - assert.NotNil(t, certificate) - domain = "trae.acme.io" - certificate = a.getProvidedCertificate(domain) - assert.Nil(t, certificate) -} - -func TestAcme_getValidDomain(t *testing.T) { - testCases := []struct { - desc string - domains []string - wildcardAllowed bool - dnsChallenge *acmeprovider.DNSChallenge - expectedErr string - expectedDomains []string - }{ - { - desc: "valid wildcard", - domains: []string{"*.traefik.wtf"}, - dnsChallenge: &acmeprovider.DNSChallenge{}, - wildcardAllowed: true, - expectedErr: "", - expectedDomains: []string{"*.traefik.wtf"}, - }, - { - desc: "no wildcard", - domains: []string{"traefik.wtf", "foo.traefik.wtf"}, - dnsChallenge: &acmeprovider.DNSChallenge{}, - expectedErr: "", - wildcardAllowed: true, - expectedDomains: []string{"traefik.wtf", "foo.traefik.wtf"}, - }, - { - desc: "unauthorized wildcard", - domains: []string{"*.traefik.wtf"}, - dnsChallenge: &acmeprovider.DNSChallenge{}, - wildcardAllowed: false, - expectedErr: "unable to generate a wildcard certificate for domain \"*.traefik.wtf\" from a 'Host' rule", - expectedDomains: nil, - }, - { - desc: "no domain", - domains: []string{}, - dnsChallenge: nil, - wildcardAllowed: true, - expectedErr: "unable to generate a certificate when no domain is given", - expectedDomains: nil, - }, - { - desc: "no DNSChallenge", - domains: []string{"*.traefik.wtf", "foo.traefik.wtf"}, - dnsChallenge: nil, - wildcardAllowed: true, - expectedErr: "unable to generate a wildcard certificate for domain \"*.traefik.wtf,foo.traefik.wtf\" : ACME needs a DNSChallenge", - expectedDomains: nil, - }, - { - desc: "unauthorized wildcard with SAN", - domains: []string{"*.*.traefik.wtf", "foo.traefik.wtf"}, - dnsChallenge: &acmeprovider.DNSChallenge{}, - wildcardAllowed: true, - expectedErr: "unable to generate a wildcard certificate for domain \"*.*.traefik.wtf,foo.traefik.wtf\" : ACME does not allow '*.*' wildcard domain", - expectedDomains: nil, - }, - { - desc: "wildcard with SANs", - domains: []string{"*.traefik.wtf", "traefik.wtf"}, - dnsChallenge: &acmeprovider.DNSChallenge{}, - wildcardAllowed: true, - expectedErr: "", - expectedDomains: []string{"*.traefik.wtf", "traefik.wtf"}, - }, - { - desc: "unexpected SANs", - domains: []string{"*.traefik.wtf", "*.acme.wtf"}, - dnsChallenge: &acmeprovider.DNSChallenge{}, - wildcardAllowed: true, - expectedErr: "unable to generate a certificate for domains \"*.traefik.wtf,*.acme.wtf\": SANs can not be a wildcard domain", - expectedDomains: nil, - }, - } - for _, test := range testCases { - test := test - t.Run(test.desc, func(t *testing.T) { - t.Parallel() - - a := ACME{} - if test.dnsChallenge != nil { - a.DNSChallenge = test.dnsChallenge - } - domains, err := a.getValidDomains(test.domains, test.wildcardAllowed) - - if len(test.expectedErr) > 0 { - assert.EqualError(t, err, test.expectedErr, "Unexpected error.") - } else { - assert.Equal(t, len(test.expectedDomains), len(domains), "Unexpected domains.") - } - }) - } -} - -func TestAcme_getCertificateForDomain(t *testing.T) { - testCases := []struct { - desc string - domain string - dc *DomainsCertificates - expected *DomainsCertificate - expectedFound bool - }{ - { - desc: "non-wildcard exact match", - domain: "foo.traefik.wtf", - dc: &DomainsCertificates{ - Certs: []*DomainsCertificate{ - { - Domains: types.Domain{ - Main: "foo.traefik.wtf", - }, - }, - }, - }, - expected: &DomainsCertificate{ - Domains: types.Domain{ - Main: "foo.traefik.wtf", - }, - }, - expectedFound: true, - }, - { - desc: "non-wildcard no match", - domain: "bar.traefik.wtf", - dc: &DomainsCertificates{ - Certs: []*DomainsCertificate{ - { - Domains: types.Domain{ - Main: "foo.traefik.wtf", - }, - }, - }, - }, - expected: nil, - expectedFound: false, - }, - { - desc: "wildcard match", - domain: "foo.traefik.wtf", - dc: &DomainsCertificates{ - Certs: []*DomainsCertificate{ - { - Domains: types.Domain{ - Main: "*.traefik.wtf", - }, - }, - }, - }, - expected: &DomainsCertificate{ - Domains: types.Domain{ - Main: "*.traefik.wtf", - }, - }, - expectedFound: true, - }, - { - desc: "wildcard no match", - domain: "foo.traefik.wtf", - dc: &DomainsCertificates{ - Certs: []*DomainsCertificate{ - { - Domains: types.Domain{ - Main: "*.bar.traefik.wtf", - }, - }, - }, - }, - expected: nil, - expectedFound: false, - }, - } - - for _, test := range testCases { - test := test - t.Run(test.desc, func(t *testing.T) { - t.Parallel() - - got, found := test.dc.getCertificateForDomain(test.domain) - assert.Equal(t, test.expectedFound, found) - assert.Equal(t, test.expected, got) - }) - } -} - -func TestRemoveEmptyCertificates(t *testing.T) { - now := time.Now() - fooCert, fooKey, _ := generate.KeyPair("foo.com", now) - acmeCert, acmeKey, _ := generate.KeyPair("acme.wtf", now.Add(24*time.Hour)) - barCert, barKey, _ := generate.KeyPair("bar.com", now) - testCases := []struct { - desc string - dc *DomainsCertificates - expectedDc *DomainsCertificates - }{ - { - desc: "No empty certificate", - dc: &DomainsCertificates{ - Certs: []*DomainsCertificate{ - { - Certificate: &Certificate{ - Certificate: fooCert, - PrivateKey: fooKey, - }, - Domains: types.Domain{ - Main: "foo.com", - }, - }, - { - Certificate: &Certificate{ - Certificate: acmeCert, - PrivateKey: acmeKey, - }, - Domains: types.Domain{ - Main: "acme.wtf", - }, - }, - { - Certificate: &Certificate{ - Certificate: barCert, - PrivateKey: barKey, - }, - Domains: types.Domain{ - Main: "bar.com", - }, - }, - }, - }, - expectedDc: &DomainsCertificates{ - Certs: []*DomainsCertificate{ - { - Certificate: &Certificate{ - Certificate: fooCert, - PrivateKey: fooKey, - }, - Domains: types.Domain{ - Main: "foo.com", - }, - }, - { - Certificate: &Certificate{ - Certificate: acmeCert, - PrivateKey: acmeKey, - }, - Domains: types.Domain{ - Main: "acme.wtf", - }, - }, - { - Certificate: &Certificate{ - Certificate: barCert, - PrivateKey: barKey, - }, - Domains: types.Domain{ - Main: "bar.com", - }, - }, - }, - }, - }, - { - desc: "First certificate is nil", - dc: &DomainsCertificates{ - Certs: []*DomainsCertificate{ - { - Domains: types.Domain{ - Main: "foo.com", - }, - }, - { - Certificate: &Certificate{ - Certificate: acmeCert, - PrivateKey: acmeKey, - }, - Domains: types.Domain{ - Main: "acme.wtf", - }, - }, - { - Certificate: &Certificate{ - Certificate: barCert, - PrivateKey: barKey, - }, - Domains: types.Domain{ - Main: "bar.com", - }, - }, - }, - }, - expectedDc: &DomainsCertificates{ - Certs: []*DomainsCertificate{ - { - Certificate: &Certificate{ - Certificate: acmeCert, - PrivateKey: acmeKey, - }, - Domains: types.Domain{ - Main: "acme.wtf", - }, - }, - { - Certificate: &Certificate{ - Certificate: nil, - PrivateKey: barKey, - }, - Domains: types.Domain{ - Main: "bar.com", - }, - }, - }, - }, - }, - { - desc: "Last certificate is empty", - dc: &DomainsCertificates{ - Certs: []*DomainsCertificate{ - { - Certificate: &Certificate{ - Certificate: fooCert, - PrivateKey: fooKey, - }, - Domains: types.Domain{ - Main: "foo.com", - }, - }, - { - Certificate: &Certificate{ - Certificate: acmeCert, - PrivateKey: acmeKey, - }, - Domains: types.Domain{ - Main: "acme.wtf", - }, - }, - { - Certificate: &Certificate{}, - Domains: types.Domain{ - Main: "bar.com", - }, - }, - }, - }, - expectedDc: &DomainsCertificates{ - Certs: []*DomainsCertificate{ - { - Certificate: &Certificate{ - Certificate: fooCert, - PrivateKey: fooKey, - }, - Domains: types.Domain{ - Main: "foo.com", - }, - }, - { - Certificate: &Certificate{ - Certificate: acmeCert, - PrivateKey: acmeKey, - }, - Domains: types.Domain{ - Main: "acme.wtf", - }, - }, - }, - }, - }, - { - desc: "First and last certificates are nil or empty", - dc: &DomainsCertificates{ - Certs: []*DomainsCertificate{ - { - Domains: types.Domain{ - Main: "foo.com", - }, - }, - { - Certificate: &Certificate{ - Certificate: acmeCert, - PrivateKey: acmeKey, - }, - Domains: types.Domain{ - Main: "acme.wtf", - }, - }, - { - Certificate: &Certificate{}, - Domains: types.Domain{ - Main: "bar.com", - }, - }, - }, - }, - expectedDc: &DomainsCertificates{ - Certs: []*DomainsCertificate{ - { - Certificate: &Certificate{ - Certificate: acmeCert, - PrivateKey: acmeKey, - }, - Domains: types.Domain{ - Main: "acme.wtf", - }, - }, - }, - }, - }, - { - desc: "All certificates are nil or empty", - dc: &DomainsCertificates{ - Certs: []*DomainsCertificate{ - { - Domains: types.Domain{ - Main: "foo.com", - }, - }, - { - Domains: types.Domain{ - Main: "foo24.com", - }, - }, - { - Certificate: &Certificate{}, - Domains: types.Domain{ - Main: "bar.com", - }, - }, - }, - }, - expectedDc: &DomainsCertificates{ - Certs: []*DomainsCertificate{}, - }, - }, - } - for _, test := range testCases { - test := test - t.Run(test.desc, func(t *testing.T) { - t.Parallel() - - a := &Account{DomainsCertificate: *test.dc} - err := a.Init() - require.NoError(t, err) - - assert.Equal(t, len(test.expectedDc.Certs), len(a.DomainsCertificate.Certs)) - sort.Sort(&a.DomainsCertificate) - sort.Sort(test.expectedDc) - for key, value := range test.expectedDc.Certs { - assert.Equal(t, value.Domains.Main, a.DomainsCertificate.Certs[key].Domains.Main) - } - }) - } -} diff --git a/old/acme/challenge_http_provider.go b/old/acme/challenge_http_provider.go deleted file mode 100644 index af078afc7..000000000 --- a/old/acme/challenge_http_provider.go +++ /dev/null @@ -1,102 +0,0 @@ -package acme - -import ( - "fmt" - "sync" - "time" - - "github.com/cenkalti/backoff" - "github.com/containous/traefik/old/cluster" - "github.com/containous/traefik/pkg/log" - "github.com/containous/traefik/pkg/safe" - "github.com/go-acme/lego/challenge" -) - -var _ challenge.ProviderTimeout = (*challengeHTTPProvider)(nil) - -type challengeHTTPProvider struct { - store cluster.Store - lock sync.RWMutex -} - -func (c *challengeHTTPProvider) getTokenValue(token, domain string) []byte { - log.Debugf("Looking for an existing ACME challenge for token %v...", token) - c.lock.RLock() - defer c.lock.RUnlock() - - account := c.store.Get().(*Account) - if account.HTTPChallenge == nil { - return []byte{} - } - - var result []byte - operation := func() error { - var ok bool - if result, ok = account.HTTPChallenge[token][domain]; !ok { - return fmt.Errorf("cannot find challenge for token %v", token) - } - return nil - } - - notify := func(err error, time time.Duration) { - log.Errorf("Error getting challenge for token retrying in %s", time) - } - - ebo := backoff.NewExponentialBackOff() - ebo.MaxElapsedTime = 60 * time.Second - err := backoff.RetryNotify(safe.OperationWithRecover(operation), ebo, notify) - if err != nil { - log.Errorf("Error getting challenge for token: %v", err) - return []byte{} - } - return result -} - -func (c *challengeHTTPProvider) Present(domain, token, keyAuth string) error { - log.Debugf("Challenge Present %s", domain) - c.lock.Lock() - defer c.lock.Unlock() - - transaction, object, err := c.store.Begin() - if err != nil { - return err - } - - account := object.(*Account) - if account.HTTPChallenge == nil { - account.HTTPChallenge = map[string]map[string][]byte{} - } - - if _, ok := account.HTTPChallenge[token]; !ok { - account.HTTPChallenge[token] = map[string][]byte{} - } - - account.HTTPChallenge[token][domain] = []byte(keyAuth) - - return transaction.Commit(account) -} - -func (c *challengeHTTPProvider) CleanUp(domain, token, keyAuth string) error { - log.Debugf("Challenge CleanUp %s", domain) - c.lock.Lock() - defer c.lock.Unlock() - - transaction, object, err := c.store.Begin() - if err != nil { - return err - } - - account := object.(*Account) - if _, ok := account.HTTPChallenge[token]; ok { - delete(account.HTTPChallenge[token], domain) - if len(account.HTTPChallenge[token]) == 0 { - delete(account.HTTPChallenge, token) - } - } - - return transaction.Commit(account) -} - -func (c *challengeHTTPProvider) Timeout() (timeout, interval time.Duration) { - return 60 * time.Second, 5 * time.Second -} diff --git a/old/acme/challenge_tls_provider.go b/old/acme/challenge_tls_provider.go deleted file mode 100644 index 73612ed35..000000000 --- a/old/acme/challenge_tls_provider.go +++ /dev/null @@ -1,132 +0,0 @@ -package acme - -import ( - "crypto/tls" - "fmt" - "strings" - "sync" - "time" - - "github.com/cenkalti/backoff" - "github.com/containous/traefik/old/cluster" - "github.com/containous/traefik/pkg/log" - "github.com/containous/traefik/pkg/safe" - "github.com/go-acme/lego/challenge" - "github.com/go-acme/lego/challenge/tlsalpn01" -) - -var _ challenge.ProviderTimeout = (*challengeTLSProvider)(nil) - -type challengeTLSProvider struct { - store cluster.Store - lock sync.RWMutex -} - -func (c *challengeTLSProvider) getCertificate(domain string) (cert *tls.Certificate, exists bool) { - log.Debugf("Looking for an existing ACME challenge for %s...", domain) - - if !strings.HasSuffix(domain, ".acme.invalid") { - return nil, false - } - - c.lock.RLock() - defer c.lock.RUnlock() - - account := c.store.Get().(*Account) - if account.ChallengeCerts == nil { - return nil, false - } - - err := account.Init() - if err != nil { - log.Errorf("Unable to init ACME Account: %v", err) - return nil, false - } - - var result *tls.Certificate - operation := func() error { - for _, cert := range account.ChallengeCerts { - for _, dns := range cert.certificate.Leaf.DNSNames { - if domain == dns { - result = cert.certificate - return nil - } - } - } - return fmt.Errorf("cannot find challenge cert for domain %s", domain) - } - - notify := func(err error, time time.Duration) { - log.Errorf("Error getting cert: %v, retrying in %s", err, time) - } - ebo := backoff.NewExponentialBackOff() - ebo.MaxElapsedTime = 60 * time.Second - - err = backoff.RetryNotify(safe.OperationWithRecover(operation), ebo, notify) - if err != nil { - log.Errorf("Error getting cert: %v", err) - return nil, false - - } - return result, true -} - -func (c *challengeTLSProvider) Present(domain, token, keyAuth string) error { - log.Debugf("Challenge Present %s", domain) - - cert, err := tlsALPN01ChallengeCert(domain, keyAuth) - if err != nil { - return err - } - - c.lock.Lock() - defer c.lock.Unlock() - - transaction, object, err := c.store.Begin() - if err != nil { - return err - } - - account := object.(*Account) - if account.ChallengeCerts == nil { - account.ChallengeCerts = map[string]*ChallengeCert{} - } - account.ChallengeCerts[domain] = cert - - return transaction.Commit(account) -} - -func (c *challengeTLSProvider) CleanUp(domain, token, keyAuth string) error { - log.Debugf("Challenge CleanUp %s", domain) - - c.lock.Lock() - defer c.lock.Unlock() - - transaction, object, err := c.store.Begin() - if err != nil { - return err - } - - account := object.(*Account) - delete(account.ChallengeCerts, domain) - - return transaction.Commit(account) -} - -func (c *challengeTLSProvider) Timeout() (timeout, interval time.Duration) { - return 60 * time.Second, 5 * time.Second -} - -func tlsALPN01ChallengeCert(domain, keyAuth string) (*ChallengeCert, error) { - tempCertPEM, rsaPrivPEM, err := tlsalpn01.ChallengeBlocks(domain, keyAuth) - if err != nil { - return nil, err - } - - certificate, err := tls.X509KeyPair(tempCertPEM, rsaPrivPEM) - if err != nil { - return nil, err - } - - return &ChallengeCert{Certificate: tempCertPEM, PrivateKey: rsaPrivPEM, certificate: &certificate}, nil -} diff --git a/old/acme/localStore.go b/old/acme/localStore.go deleted file mode 100644 index b8d34f229..000000000 --- a/old/acme/localStore.go +++ /dev/null @@ -1,177 +0,0 @@ -package acme - -import ( - "encoding/json" - "io/ioutil" - "os" - - "github.com/containous/traefik/pkg/log" - "github.com/containous/traefik/pkg/provider/acme" -) - -// LocalStore is a store using a file as storage -type LocalStore struct { - file string -} - -// NewLocalStore create a LocalStore -func NewLocalStore(file string) *LocalStore { - return &LocalStore{ - file: file, - } -} - -// Get loads file into store and returns the Account -func (s *LocalStore) Get() (*Account, error) { - account := &Account{} - - hasData, err := acme.CheckFile(s.file) - if err != nil { - return nil, err - } - - if hasData { - f, err := os.Open(s.file) - if err != nil { - return nil, err - } - defer f.Close() - - file, err := ioutil.ReadAll(f) - if err != nil { - return nil, err - } - - if err := json.Unmarshal(file, &account); err != nil { - return nil, err - } - } - - return account, nil -} - -// ConvertToNewFormat converts old acme.json format to the new one and store the result into the file (used for the backward compatibility) -func ConvertToNewFormat(fileName string) { - localStore := acme.NewLocalStore(fileName) - - storeAccount, err := localStore.GetAccount() - if err != nil { - log.Errorf("Failed to read new account, ACME data conversion is not available : %v", err) - return - } - - storeCertificates, err := localStore.GetCertificates() - if err != nil { - log.Errorf("Failed to read new certificates, ACME data conversion is not available : %v", err) - return - } - - if storeAccount == nil { - localStore := NewLocalStore(fileName) - - account, err := localStore.Get() - if err != nil { - log.Errorf("Failed to read old account, ACME data conversion is not available : %v", err) - return - } - - // Convert ACME data from old to new format - newAccount := &acme.Account{} - if account != nil && len(account.Email) > 0 { - err = backupACMEFile(fileName, account) - if err != nil { - log.Errorf("Unable to create a backup for the V1 formatted ACME file: %v", err) - return - } - - err = account.RemoveAccountV1Values() - if err != nil { - log.Errorf("Unable to remove ACME Account V1 values during format conversion: %v", err) - return - } - - newAccount = &acme.Account{ - PrivateKey: account.PrivateKey, - Registration: account.Registration, - Email: account.Email, - KeyType: account.KeyType, - } - - var newCertificates []*acme.Certificate - for _, cert := range account.DomainsCertificate.Certs { - newCertificates = append(newCertificates, &acme.Certificate{ - Certificate: cert.Certificate.Certificate, - Key: cert.Certificate.PrivateKey, - Domain: cert.Domains, - }) - } - - // If account is in the old format, storeCertificates is nil or empty and has to be initialized - storeCertificates = newCertificates - } - - // Stores the data in new format into the file even if account is nil - // to delete Account in ACME v1 format and keeping the certificates - newLocalStore := acme.NewLocalStore(fileName) - newLocalStore.SaveDataChan <- &acme.StoredData{Account: newAccount, Certificates: storeCertificates} - } -} - -func backupACMEFile(originalFileName string, account interface{}) error { - // write account to file - data, err := json.MarshalIndent(account, "", " ") - if err != nil { - return err - } - return ioutil.WriteFile(originalFileName+".bak", data, 0600) -} - -// FromNewToOldFormat converts new acme account to the old one (used for the backward compatibility) -func FromNewToOldFormat(fileName string) (*Account, error) { - localStore := acme.NewLocalStore(fileName) - - storeAccount, err := localStore.GetAccount() - if err != nil { - return nil, err - } - - storeCertificates, err := localStore.GetCertificates() - if err != nil { - return nil, err - } - - // Convert ACME Account from new to old format - // (Needed by the KV stores) - var account *Account - if storeAccount != nil { - account = &Account{ - Email: storeAccount.Email, - PrivateKey: storeAccount.PrivateKey, - Registration: storeAccount.Registration, - DomainsCertificate: DomainsCertificates{}, - KeyType: storeAccount.KeyType, - } - } - - // Convert ACME Certificates from new to old format - // (Needed by the KV stores) - if len(storeCertificates) > 0 { - // Account can be nil if data are migrated from new format - // with a ACME V1 Account - if account == nil { - account = &Account{} - } - for _, cert := range storeCertificates { - _, err := account.DomainsCertificate.addCertificateForDomains(&Certificate{ - Domain: cert.Domain.Main, - Certificate: cert.Certificate, - PrivateKey: cert.Key, - }, cert.Domain) - if err != nil { - return nil, err - } - } - } - - return account, nil -} diff --git a/old/acme/localStore_test.go b/old/acme/localStore_test.go deleted file mode 100644 index 8070cf31c..000000000 --- a/old/acme/localStore_test.go +++ /dev/null @@ -1,32 +0,0 @@ -package acme - -import ( - "io/ioutil" - "os" - "path/filepath" - "testing" - - "github.com/stretchr/testify/assert" -) - -func TestGet(t *testing.T) { - acmeFile := "./acme_example.json" - - folder, prefix := filepath.Split(acmeFile) - tmpFile, err := ioutil.TempFile(folder, prefix) - defer os.Remove(tmpFile.Name()) - - assert.NoError(t, err) - - fileContent, err := ioutil.ReadFile(acmeFile) - assert.NoError(t, err) - - _, err = tmpFile.Write(fileContent) - assert.NoError(t, err) - - localStore := NewLocalStore(tmpFile.Name()) - account, err := localStore.Get() - assert.NoError(t, err) - - assert.Len(t, account.DomainsCertificate.Certs, 1) -} diff --git a/old/api/dashboard.go b/old/api/dashboard.go deleted file mode 100644 index b2489a58a..000000000 --- a/old/api/dashboard.go +++ /dev/null @@ -1,39 +0,0 @@ -package api - -import ( - "net/http" - - "github.com/containous/mux" - "github.com/containous/traefik/old/log" - assetfs "github.com/elazarl/go-bindata-assetfs" -) - -// DashboardHandler expose dashboard routes -type DashboardHandler struct { - Assets *assetfs.AssetFS -} - -// AddRoutes add dashboard routes on a router -func (g DashboardHandler) AddRoutes(router *mux.Router) { - if g.Assets == nil { - log.Error("No assets for dashboard") - return - } - - // Expose dashboard - router.Methods(http.MethodGet). - Path("/"). - HandlerFunc(func(response http.ResponseWriter, request *http.Request) { - http.Redirect(response, request, request.Header.Get("X-Forwarded-Prefix")+"/dashboard/", 302) - }) - - router.Methods(http.MethodGet). - Path("/dashboard/status"). - HandlerFunc(func(response http.ResponseWriter, request *http.Request) { - http.Redirect(response, request, "/dashboard/", 302) - }) - - router.Methods(http.MethodGet). - PathPrefix("/dashboard/"). - Handler(http.StripPrefix("/dashboard/", http.FileServer(g.Assets))) -} diff --git a/old/api/debug.go b/old/api/debug.go deleted file mode 100644 index 785a61988..000000000 --- a/old/api/debug.go +++ /dev/null @@ -1,48 +0,0 @@ -package api - -import ( - "expvar" - "fmt" - "net/http" - "net/http/pprof" - "runtime" - - "github.com/containous/mux" -) - -func init() { - expvar.Publish("Goroutines", expvar.Func(goroutines)) -} - -func goroutines() interface{} { - return runtime.NumGoroutine() -} - -// DebugHandler expose debug routes -type DebugHandler struct{} - -// AddRoutes add debug routes on a router -func (g DebugHandler) AddRoutes(router *mux.Router) { - router.Methods(http.MethodGet).Path("/debug/vars"). - HandlerFunc(func(w http.ResponseWriter, _ *http.Request) { - w.Header().Set("Content-Type", "application/json; charset=utf-8") - fmt.Fprint(w, "{\n") - first := true - expvar.Do(func(kv expvar.KeyValue) { - if !first { - fmt.Fprint(w, ",\n") - } - first = false - fmt.Fprintf(w, "%q: %s", kv.Key, kv.Value) - }) - fmt.Fprint(w, "\n}\n") - }) - - runtime.SetBlockProfileRate(1) - runtime.SetMutexProfileFraction(5) - router.Methods(http.MethodGet).PathPrefix("/debug/pprof/cmdline").HandlerFunc(pprof.Cmdline) - router.Methods(http.MethodGet).PathPrefix("/debug/pprof/profile").HandlerFunc(pprof.Profile) - router.Methods(http.MethodGet).PathPrefix("/debug/pprof/symbol").HandlerFunc(pprof.Symbol) - router.Methods(http.MethodGet).PathPrefix("/debug/pprof/trace").HandlerFunc(pprof.Trace) - router.Methods(http.MethodGet).PathPrefix("/debug/pprof/").HandlerFunc(pprof.Index) -} diff --git a/old/api/handler.go b/old/api/handler.go deleted file mode 100644 index 223258962..000000000 --- a/old/api/handler.go +++ /dev/null @@ -1,252 +0,0 @@ -package api - -import ( - "net/http" - - "github.com/containous/mux" - "github.com/containous/traefik/old/log" - "github.com/containous/traefik/old/middlewares" - "github.com/containous/traefik/old/types" - "github.com/containous/traefik/pkg/safe" - "github.com/containous/traefik/pkg/version" - assetfs "github.com/elazarl/go-bindata-assetfs" - thoas_stats "github.com/thoas/stats" - "github.com/unrolled/render" -) - -// Handler expose api routes -type Handler struct { - EntryPoint string `description:"EntryPoint" export:"true"` - Dashboard bool `description:"Activate dashboard" export:"true"` - Debug bool `export:"true"` - CurrentConfigurations *safe.Safe - Statistics *types.Statistics `description:"Enable more detailed statistics" export:"true"` - Stats *thoas_stats.Stats `json:"-"` - StatsRecorder *middlewares.StatsRecorder `json:"-"` - DashboardAssets *assetfs.AssetFS `json:"-"` -} - -var ( - templatesRenderer = render.New(render.Options{ - Directory: "nowhere", - }) -) - -// AddRoutes add api routes on a router -func (p Handler) AddRoutes(router *mux.Router) { - if p.Debug { - DebugHandler{}.AddRoutes(router) - } - - router.Methods(http.MethodGet).Path("/api").HandlerFunc(p.getConfigHandler) - router.Methods(http.MethodGet).Path("/api/providers").HandlerFunc(p.getConfigHandler) - router.Methods(http.MethodGet).Path("/api/providers/{provider}").HandlerFunc(p.getProviderHandler) - router.Methods(http.MethodGet).Path("/api/providers/{provider}/backends").HandlerFunc(p.getBackendsHandler) - router.Methods(http.MethodGet).Path("/api/providers/{provider}/backends/{backend}").HandlerFunc(p.getBackendHandler) - router.Methods(http.MethodGet).Path("/api/providers/{provider}/backends/{backend}/servers").HandlerFunc(p.getServersHandler) - router.Methods(http.MethodGet).Path("/api/providers/{provider}/backends/{backend}/servers/{server}").HandlerFunc(p.getServerHandler) - router.Methods(http.MethodGet).Path("/api/providers/{provider}/frontends").HandlerFunc(p.getFrontendsHandler) - router.Methods(http.MethodGet).Path("/api/providers/{provider}/frontends/{frontend}").HandlerFunc(p.getFrontendHandler) - router.Methods(http.MethodGet).Path("/api/providers/{provider}/frontends/{frontend}/routes").HandlerFunc(p.getRoutesHandler) - router.Methods(http.MethodGet).Path("/api/providers/{provider}/frontends/{frontend}/routes/{route}").HandlerFunc(p.getRouteHandler) - - // health route - router.Methods(http.MethodGet).Path("/health").HandlerFunc(p.getHealthHandler) - - version.Handler{}.Append(router) - - if p.Dashboard { - DashboardHandler{Assets: p.DashboardAssets}.AddRoutes(router) - } -} - -func getProviderIDFromVars(vars map[string]string) string { - providerID := vars["provider"] - // TODO: Deprecated - if providerID == "rest" { - providerID = "web" - } - return providerID -} - -func (p Handler) getConfigHandler(response http.ResponseWriter, request *http.Request) { - currentConfigurations := p.CurrentConfigurations.Get().(types.Configurations) - err := templatesRenderer.JSON(response, http.StatusOK, currentConfigurations) - if err != nil { - log.Error(err) - } -} - -func (p Handler) getProviderHandler(response http.ResponseWriter, request *http.Request) { - providerID := getProviderIDFromVars(mux.Vars(request)) - - currentConfigurations := p.CurrentConfigurations.Get().(types.Configurations) - if provider, ok := currentConfigurations[providerID]; ok { - err := templatesRenderer.JSON(response, http.StatusOK, provider) - if err != nil { - log.Error(err) - } - } else { - http.NotFound(response, request) - } -} - -func (p Handler) getBackendsHandler(response http.ResponseWriter, request *http.Request) { - providerID := getProviderIDFromVars(mux.Vars(request)) - - currentConfigurations := p.CurrentConfigurations.Get().(types.Configurations) - if provider, ok := currentConfigurations[providerID]; ok { - err := templatesRenderer.JSON(response, http.StatusOK, provider.Backends) - if err != nil { - log.Error(err) - } - } else { - http.NotFound(response, request) - } -} - -func (p Handler) getBackendHandler(response http.ResponseWriter, request *http.Request) { - vars := mux.Vars(request) - providerID := getProviderIDFromVars(vars) - backendID := vars["backend"] - - currentConfigurations := p.CurrentConfigurations.Get().(types.Configurations) - if provider, ok := currentConfigurations[providerID]; ok { - if backend, ok := provider.Backends[backendID]; ok { - err := templatesRenderer.JSON(response, http.StatusOK, backend) - if err != nil { - log.Error(err) - } - return - } - } - http.NotFound(response, request) -} - -func (p Handler) getServersHandler(response http.ResponseWriter, request *http.Request) { - vars := mux.Vars(request) - providerID := getProviderIDFromVars(vars) - backendID := vars["backend"] - - currentConfigurations := p.CurrentConfigurations.Get().(types.Configurations) - if provider, ok := currentConfigurations[providerID]; ok { - if backend, ok := provider.Backends[backendID]; ok { - err := templatesRenderer.JSON(response, http.StatusOK, backend.Servers) - if err != nil { - log.Error(err) - } - return - } - } - http.NotFound(response, request) -} - -func (p Handler) getServerHandler(response http.ResponseWriter, request *http.Request) { - vars := mux.Vars(request) - providerID := getProviderIDFromVars(vars) - backendID := vars["backend"] - serverID := vars["server"] - - currentConfigurations := p.CurrentConfigurations.Get().(types.Configurations) - if provider, ok := currentConfigurations[providerID]; ok { - if backend, ok := provider.Backends[backendID]; ok { - if server, ok := backend.Servers[serverID]; ok { - err := templatesRenderer.JSON(response, http.StatusOK, server) - if err != nil { - log.Error(err) - } - return - } - } - } - http.NotFound(response, request) -} - -func (p Handler) getFrontendsHandler(response http.ResponseWriter, request *http.Request) { - providerID := getProviderIDFromVars(mux.Vars(request)) - - currentConfigurations := p.CurrentConfigurations.Get().(types.Configurations) - if provider, ok := currentConfigurations[providerID]; ok { - err := templatesRenderer.JSON(response, http.StatusOK, provider.Frontends) - if err != nil { - log.Error(err) - } - } else { - http.NotFound(response, request) - } -} - -func (p Handler) getFrontendHandler(response http.ResponseWriter, request *http.Request) { - vars := mux.Vars(request) - providerID := getProviderIDFromVars(vars) - frontendID := vars["frontend"] - - currentConfigurations := p.CurrentConfigurations.Get().(types.Configurations) - if provider, ok := currentConfigurations[providerID]; ok { - if frontend, ok := provider.Frontends[frontendID]; ok { - err := templatesRenderer.JSON(response, http.StatusOK, frontend) - if err != nil { - log.Error(err) - } - return - } - } - http.NotFound(response, request) -} - -func (p Handler) getRoutesHandler(response http.ResponseWriter, request *http.Request) { - vars := mux.Vars(request) - providerID := getProviderIDFromVars(vars) - frontendID := vars["frontend"] - - currentConfigurations := p.CurrentConfigurations.Get().(types.Configurations) - if provider, ok := currentConfigurations[providerID]; ok { - if frontend, ok := provider.Frontends[frontendID]; ok { - err := templatesRenderer.JSON(response, http.StatusOK, frontend.Routes) - if err != nil { - log.Error(err) - } - return - } - } - http.NotFound(response, request) -} - -func (p Handler) getRouteHandler(response http.ResponseWriter, request *http.Request) { - vars := mux.Vars(request) - providerID := getProviderIDFromVars(vars) - frontendID := vars["frontend"] - routeID := vars["route"] - - currentConfigurations := p.CurrentConfigurations.Get().(types.Configurations) - if provider, ok := currentConfigurations[providerID]; ok { - if frontend, ok := provider.Frontends[frontendID]; ok { - if route, ok := frontend.Routes[routeID]; ok { - err := templatesRenderer.JSON(response, http.StatusOK, route) - if err != nil { - log.Error(err) - } - return - } - } - } - http.NotFound(response, request) -} - -// healthResponse combines data returned by thoas/stats with statistics (if -// they are enabled). -type healthResponse struct { - *thoas_stats.Data - *middlewares.Stats -} - -func (p *Handler) getHealthHandler(response http.ResponseWriter, request *http.Request) { - health := &healthResponse{Data: p.Stats.Data()} - if p.StatsRecorder != nil { - health.Stats = p.StatsRecorder.Data() - } - err := templatesRenderer.JSON(response, http.StatusOK, health) - if err != nil { - log.Error(err) - } -} diff --git a/old/cluster/datastore.go b/old/cluster/datastore.go deleted file mode 100644 index 0b574f301..000000000 --- a/old/cluster/datastore.go +++ /dev/null @@ -1,247 +0,0 @@ -package cluster - -import ( - "context" - "encoding/json" - "fmt" - "sync" - "time" - - "github.com/abronan/valkeyrie/store" - "github.com/cenkalti/backoff" - "github.com/containous/staert" - "github.com/containous/traefik/pkg/job" - "github.com/containous/traefik/pkg/log" - "github.com/containous/traefik/pkg/safe" - uuid "github.com/satori/go.uuid" -) - -// Metadata stores Object plus metadata -type Metadata struct { - object Object - Object []byte - Lock string -} - -// NewMetadata returns new Metadata -func NewMetadata(object Object) *Metadata { - return &Metadata{object: object} -} - -// Marshall marshalls object -func (m *Metadata) Marshall() error { - var err error - m.Object, err = json.Marshal(m.object) - return err -} - -func (m *Metadata) unmarshall() error { - if len(m.Object) == 0 { - return nil - } - return json.Unmarshal(m.Object, m.object) -} - -// Listener is called when Object has been changed in KV store -type Listener func(Object) error - -var _ Store = (*Datastore)(nil) - -// Datastore holds a struct synced in a KV store -type Datastore struct { - kv staert.KvSource - ctx context.Context - localLock *sync.RWMutex - meta *Metadata - lockKey string - listener Listener -} - -// NewDataStore creates a Datastore -func NewDataStore(ctx context.Context, kvSource staert.KvSource, object Object, listener Listener) (*Datastore, error) { - datastore := Datastore{ - kv: kvSource, - ctx: ctx, - meta: &Metadata{object: object}, - lockKey: kvSource.Prefix + "/lock", - localLock: &sync.RWMutex{}, - listener: listener, - } - err := datastore.watchChanges() - if err != nil { - return nil, err - } - return &datastore, nil -} - -func (d *Datastore) watchChanges() error { - stopCh := make(chan struct{}) - kvCh, err := d.kv.Watch(d.lockKey, stopCh, nil) - if err != nil { - return fmt.Errorf("error while watching key %s: %v", d.lockKey, err) - } - safe.Go(func() { - ctx, cancel := context.WithCancel(d.ctx) - operation := func() error { - for { - select { - case <-ctx.Done(): - stopCh <- struct{}{} - return nil - case _, ok := <-kvCh: - if !ok { - cancel() - return err - } - err = d.reload() - if err != nil { - return err - } - if d.listener != nil { - err := d.listener(d.meta.object) - if err != nil { - log.Errorf("Error calling datastore listener: %s", err) - } - } - } - } - } - notify := func(err error, time time.Duration) { - log.Errorf("Error in watch datastore: %+v, retrying in %s", err, time) - } - err := backoff.RetryNotify(safe.OperationWithRecover(operation), job.NewBackOff(backoff.NewExponentialBackOff()), notify) - if err != nil { - log.Errorf("Error in watch datastore: %v", err) - } - }) - return nil -} - -func (d *Datastore) reload() error { - log.Debug("Datastore reload") - _, err := d.Load() - return err -} - -// Begin creates a transaction with the KV store. -func (d *Datastore) Begin() (Transaction, Object, error) { - id := uuid.NewV4().String() - log.Debugf("Transaction %s begins", id) - remoteLock, err := d.kv.NewLock(d.lockKey, &store.LockOptions{TTL: 20 * time.Second, Value: []byte(id)}) - if err != nil { - return nil, nil, err - } - stopCh := make(chan struct{}) - ctx, cancel := context.WithCancel(d.ctx) - var errLock error - go func() { - _, errLock = remoteLock.Lock(stopCh) - cancel() - }() - select { - case <-ctx.Done(): - if errLock != nil { - return nil, nil, errLock - } - case <-d.ctx.Done(): - stopCh <- struct{}{} - return nil, nil, d.ctx.Err() - } - - // we got the lock! Now make sure we are synced with KV store - operation := func() error { - meta := d.get() - if meta.Lock != id { - return fmt.Errorf("object lock value: expected %s, got %s", id, meta.Lock) - } - return nil - } - notify := func(err error, time time.Duration) { - log.Errorf("Datastore sync error: %v, retrying in %s", err, time) - err = d.reload() - if err != nil { - log.Errorf("Error reloading: %+v", err) - } - } - ebo := backoff.NewExponentialBackOff() - ebo.MaxElapsedTime = 60 * time.Second - err = backoff.RetryNotify(safe.OperationWithRecover(operation), ebo, notify) - if err != nil { - return nil, nil, fmt.Errorf("datastore cannot sync: %v", err) - } - - // we synced with KV store, we can now return Setter - return &datastoreTransaction{ - Datastore: d, - remoteLock: remoteLock, - id: id, - }, d.meta.object, nil -} - -func (d *Datastore) get() *Metadata { - d.localLock.RLock() - defer d.localLock.RUnlock() - return d.meta -} - -// Load load atomically a struct from the KV store -func (d *Datastore) Load() (Object, error) { - d.localLock.Lock() - defer d.localLock.Unlock() - - // clear Object first, as mapstructure's decoder doesn't have ZeroFields set to true for merging purposes - d.meta.Object = d.meta.Object[:0] - - err := d.kv.LoadConfig(d.meta) - if err != nil { - return nil, err - } - err = d.meta.unmarshall() - if err != nil { - return nil, err - } - return d.meta.object, nil -} - -// Get atomically a struct from the KV store -func (d *Datastore) Get() Object { - d.localLock.RLock() - defer d.localLock.RUnlock() - return d.meta.object -} - -var _ Transaction = (*datastoreTransaction)(nil) - -type datastoreTransaction struct { - *Datastore - remoteLock store.Locker - dirty bool - id string -} - -// Commit allows to set an object in the KV store -func (s *datastoreTransaction) Commit(object Object) error { - s.localLock.Lock() - defer s.localLock.Unlock() - if s.dirty { - return fmt.Errorf("transaction already used, please begin a new one") - } - s.Datastore.meta.object = object - err := s.Datastore.meta.Marshall() - if err != nil { - return fmt.Errorf("marshall error: %s", err) - } - err = s.kv.StoreConfig(s.Datastore.meta) - if err != nil { - return fmt.Errorf("storeConfig error: %s", err) - } - - err = s.remoteLock.Unlock() - if err != nil { - return fmt.Errorf("unlock error: %s", err) - } - - s.dirty = true - log.Debugf("Transaction committed %s", s.id) - return nil -} diff --git a/old/cluster/leadership.go b/old/cluster/leadership.go deleted file mode 100644 index 68f6013aa..000000000 --- a/old/cluster/leadership.go +++ /dev/null @@ -1,146 +0,0 @@ -package cluster - -import ( - "context" - "net/http" - "time" - - "github.com/cenkalti/backoff" - "github.com/containous/mux" - "github.com/containous/traefik/old/types" - "github.com/containous/traefik/pkg/log" - "github.com/containous/traefik/pkg/safe" - "github.com/docker/leadership" - "github.com/unrolled/render" -) - -const clusterLeaderKeySuffix = "/leader" - -var templatesRenderer = render.New(render.Options{ - Directory: "nowhere", -}) - -// Leadership allows leadership election using a KV store -type Leadership struct { - *safe.Pool - *types.Cluster - candidate *leadership.Candidate - leader *safe.Safe - listeners []LeaderListener -} - -// NewLeadership creates a leadership -func NewLeadership(ctx context.Context, cluster *types.Cluster) *Leadership { - return &Leadership{ - Pool: safe.NewPool(ctx), - Cluster: cluster, - candidate: leadership.NewCandidate(cluster.Store, cluster.Store.Prefix+clusterLeaderKeySuffix, cluster.Node, 20*time.Second), - listeners: []LeaderListener{}, - leader: safe.New(false), - } -} - -// LeaderListener is called when leadership has changed -type LeaderListener func(elected bool) error - -// Participate tries to be a leader -func (l *Leadership) Participate(pool *safe.Pool) { - pool.GoCtx(func(ctx context.Context) { - log.Debugf("Node %s running for election", l.Cluster.Node) - defer log.Debugf("Node %s no more running for election", l.Cluster.Node) - backOff := backoff.NewExponentialBackOff() - operation := func() error { - return l.run(ctx, l.candidate) - } - - notify := func(err error, time time.Duration) { - log.Errorf("Leadership election error %+v, retrying in %s", err, time) - } - err := backoff.RetryNotify(safe.OperationWithRecover(operation), backOff, notify) - if err != nil { - log.Errorf("Cannot elect leadership %+v", err) - } - }) -} - -// AddListener adds a leadership listener -func (l *Leadership) AddListener(listener LeaderListener) { - l.listeners = append(l.listeners, listener) -} - -// Resign resigns from being a leader -func (l *Leadership) Resign() { - l.candidate.Resign() - log.Infof("Node %s resigned", l.Cluster.Node) -} - -func (l *Leadership) run(ctx context.Context, candidate *leadership.Candidate) error { - electedCh, errCh := candidate.RunForElection() - for { - select { - case elected := <-electedCh: - l.onElection(elected) - case err := <-errCh: - return err - case <-ctx.Done(): - l.candidate.Resign() - return nil - } - } -} - -func (l *Leadership) onElection(elected bool) { - if elected { - log.Infof("Node %s elected leader ♚", l.Cluster.Node) - l.leader.Set(true) - l.Start() - } else { - log.Infof("Node %s elected worker ♝", l.Cluster.Node) - l.leader.Set(false) - l.Stop() - } - for _, listener := range l.listeners { - err := listener(elected) - if err != nil { - log.Errorf("Error calling Leadership listener: %s", err) - } - } -} - -type leaderResponse struct { - Leader bool `json:"leader"` - LeaderNode string `json:"leader_node"` -} - -func (l *Leadership) getLeaderHandler(response http.ResponseWriter, request *http.Request) { - leaderNode := "" - leaderKv, err := l.Cluster.Store.Get(l.Cluster.Store.Prefix+clusterLeaderKeySuffix, nil) - if err != nil { - log.Error(err) - } else { - leaderNode = string(leaderKv.Value) - } - leader := &leaderResponse{Leader: l.IsLeader(), LeaderNode: leaderNode} - - status := http.StatusOK - if !leader.Leader { - // Set status to be `429`, as this will typically cause load balancers to stop sending requests to the instance without removing them from rotation. - status = http.StatusTooManyRequests - } - - err = templatesRenderer.JSON(response, status, leader) - if err != nil { - log.Error(err) - } -} - -// IsLeader returns true if current node is leader -func (l *Leadership) IsLeader() bool { - return l.leader.Get().(bool) -} - -// AddRoutes add dashboard routes on a router -func (l *Leadership) AddRoutes(router *mux.Router) { - // Expose cluster leader - router.Methods(http.MethodGet).Path("/api/cluster/leader").HandlerFunc(l.getLeaderHandler) -} diff --git a/old/cluster/store.go b/old/cluster/store.go deleted file mode 100644 index c8e9207be..000000000 --- a/old/cluster/store.go +++ /dev/null @@ -1,16 +0,0 @@ -package cluster - -// Object is the struct to store -type Object interface{} - -// Store is a generic interface to represents a storage -type Store interface { - Load() (Object, error) - Get() Object - Begin() (Transaction, Object, error) -} - -// Transaction allows to set a struct in the KV store -type Transaction interface { - Commit(object Object) error -} diff --git a/old/configuration/configuration.go b/old/configuration/configuration.go deleted file mode 100644 index 72e63fadf..000000000 --- a/old/configuration/configuration.go +++ /dev/null @@ -1,444 +0,0 @@ -package configuration - -import ( - "fmt" - "strings" - "time" - - "github.com/containous/flaeg/parse" - "github.com/containous/traefik/old/acme" - "github.com/containous/traefik/old/api" - "github.com/containous/traefik/old/log" - "github.com/containous/traefik/old/middlewares/tracing" - "github.com/containous/traefik/old/middlewares/tracing/datadog" - "github.com/containous/traefik/old/middlewares/tracing/jaeger" - "github.com/containous/traefik/old/middlewares/tracing/zipkin" - "github.com/containous/traefik/old/ping" - "github.com/containous/traefik/old/provider/boltdb" - "github.com/containous/traefik/old/provider/consul" - "github.com/containous/traefik/old/provider/consulcatalog" - "github.com/containous/traefik/old/provider/dynamodb" - "github.com/containous/traefik/old/provider/ecs" - "github.com/containous/traefik/old/provider/etcd" - "github.com/containous/traefik/old/provider/eureka" - "github.com/containous/traefik/old/provider/mesos" - "github.com/containous/traefik/old/provider/rancher" - "github.com/containous/traefik/old/provider/rest" - "github.com/containous/traefik/old/provider/zk" - "github.com/containous/traefik/old/tls" - "github.com/containous/traefik/old/types" - acmeprovider "github.com/containous/traefik/pkg/provider/acme" - "github.com/containous/traefik/pkg/provider/docker" - "github.com/containous/traefik/pkg/provider/file" - "github.com/containous/traefik/pkg/provider/kubernetes/ingress" - newtypes "github.com/containous/traefik/pkg/types" - "github.com/go-acme/lego/challenge/dns01" - "github.com/pkg/errors" -) - -const ( - // DefaultInternalEntryPointName the name of the default internal entry point - DefaultInternalEntryPointName = "traefik" - - // DefaultHealthCheckInterval is the default health check interval. - DefaultHealthCheckInterval = 30 * time.Second - - // DefaultHealthCheckTimeout is the default health check request timeout. - DefaultHealthCheckTimeout = 5 * time.Second - - // DefaultDialTimeout when connecting to a backend server. - DefaultDialTimeout = 30 * time.Second - - // DefaultIdleTimeout before closing an idle connection. - DefaultIdleTimeout = 180 * time.Second - - // DefaultGraceTimeout controls how long Traefik serves pending requests - // prior to shutting down. - DefaultGraceTimeout = 10 * time.Second - - // DefaultAcmeCAServer is the default ACME API endpoint - DefaultAcmeCAServer = "https://acme-v02.api.letsencrypt.org/directory" -) - -// GlobalConfiguration holds global configuration (with providers, etc.). -// It's populated from the traefik configuration file passed as an argument to the binary. -type GlobalConfiguration struct { - LifeCycle *LifeCycle `description:"Timeouts influencing the server life cycle" export:"true"` - Debug bool `short:"d" description:"Enable debug mode" export:"true"` - CheckNewVersion bool `description:"Periodically check if a new version has been released" export:"true"` - SendAnonymousUsage bool `description:"send periodically anonymous usage statistics" export:"true"` - AccessLog *types.AccessLog `description:"Access log settings" export:"true"` - TraefikLog *types.TraefikLog `description:"Traefik log settings" export:"true"` - Tracing *tracing.Tracing `description:"OpenTracing configuration" export:"true"` - LogLevel string `short:"l" description:"Log level" export:"true"` - EntryPoints EntryPoints `description:"Entrypoints definition using format: --entryPoints='Name:http Address::8000 Redirect.EntryPoint:https' --entryPoints='Name:https Address::4442 TLS:tests/traefik.crt,tests/traefik.key;prod/traefik.crt,prod/traefik.key'" export:"true"` - Cluster *types.Cluster - Constraints types.Constraints `description:"Filter services by constraint, matching with service tags" export:"true"` - ACME *acme.ACME `description:"Enable ACME (Let's Encrypt): automatic SSL" export:"true"` - DefaultEntryPoints DefaultEntryPoints `description:"Entrypoints to be used by frontends that do not specify any entrypoint" export:"true"` - ProvidersThrottleDuration parse.Duration `description:"Backends throttle duration: minimum duration between 2 events from providers before applying a new configuration. It avoids unnecessary reloads if multiples events are sent in a short amount of time." export:"true"` - MaxIdleConnsPerHost int `description:"If non-zero, controls the maximum idle (keep-alive) to keep per-host. If zero, DefaultMaxIdleConnsPerHost is used" export:"true"` - InsecureSkipVerify bool `description:"Disable SSL certificate verification" export:"true"` - RootCAs tls.FilesOrContents `description:"Add cert file for self-signed certificate"` - Retry *Retry `description:"Enable retry sending request if network error" export:"true"` - HealthCheck *HealthCheckConfig `description:"Health check parameters" export:"true"` - RespondingTimeouts *RespondingTimeouts `description:"Timeouts for incoming requests to the Traefik instance" export:"true"` - ForwardingTimeouts *ForwardingTimeouts `description:"Timeouts for requests forwarded to the backend servers" export:"true"` - KeepTrailingSlash bool `description:"Do not remove trailing slash." export:"true"` // Deprecated - Docker *docker.Provider `description:"Enable Docker backend with default settings" export:"true"` - File *file.Provider `description:"Enable File backend with default settings" export:"true"` - Consul *consul.Provider `description:"Enable Consul backend with default settings" export:"true"` - ConsulCatalog *consulcatalog.Provider `description:"Enable Consul catalog backend with default settings" export:"true"` - Etcd *etcd.Provider `description:"Enable Etcd backend with default settings" export:"true"` - Zookeeper *zk.Provider `description:"Enable Zookeeper backend with default settings" export:"true"` - Boltdb *boltdb.Provider `description:"Enable Boltdb backend with default settings" export:"true"` - Kubernetes *ingress.Provider `description:"Enable Kubernetes backend with default settings" export:"true"` - Mesos *mesos.Provider `description:"Enable Mesos backend with default settings" export:"true"` - Eureka *eureka.Provider `description:"Enable Eureka backend with default settings" export:"true"` - ECS *ecs.Provider `description:"Enable ECS backend with default settings" export:"true"` - Rancher *rancher.Provider `description:"Enable Rancher backend with default settings" export:"true"` - DynamoDB *dynamodb.Provider `description:"Enable DynamoDB backend with default settings" export:"true"` - Rest *rest.Provider `description:"Enable Rest backend with default settings" export:"true"` - API *api.Handler `description:"Enable api/dashboard" export:"true"` - Metrics *types.Metrics `description:"Enable a metrics exporter" export:"true"` - Ping *ping.Handler `description:"Enable ping" export:"true"` - HostResolver *HostResolverConfig `description:"Enable CNAME Flattening" export:"true"` -} - -// SetEffectiveConfiguration adds missing configuration parameters derived from existing ones. -// It also takes care of maintaining backwards compatibility. -func (gc *GlobalConfiguration) SetEffectiveConfiguration(configFile string) { - if len(gc.EntryPoints) == 0 { - gc.EntryPoints = map[string]*EntryPoint{"http": { - Address: ":80", - ForwardedHeaders: &ForwardedHeaders{}, - }} - gc.DefaultEntryPoints = []string{"http"} - } - - if (gc.API != nil && gc.API.EntryPoint == DefaultInternalEntryPointName) || - (gc.Ping != nil && gc.Ping.EntryPoint == DefaultInternalEntryPointName) || - (gc.Metrics != nil && gc.Metrics.Prometheus != nil && gc.Metrics.Prometheus.EntryPoint == DefaultInternalEntryPointName) || - (gc.Rest != nil && gc.Rest.EntryPoint == DefaultInternalEntryPointName) { - if _, ok := gc.EntryPoints[DefaultInternalEntryPointName]; !ok { - gc.EntryPoints[DefaultInternalEntryPointName] = &EntryPoint{Address: ":8080"} - } - } - - for entryPointName := range gc.EntryPoints { - entryPoint := gc.EntryPoints[entryPointName] - // ForwardedHeaders must be remove in the next breaking version - if entryPoint.ForwardedHeaders == nil { - entryPoint.ForwardedHeaders = &ForwardedHeaders{} - } - - if entryPoint.TLS != nil && entryPoint.TLS.DefaultCertificate == nil && len(entryPoint.TLS.Certificates) > 0 { - log.Infof("No tls.defaultCertificate given for %s: using the first item in tls.certificates as a fallback.", entryPointName) - entryPoint.TLS.DefaultCertificate = &entryPoint.TLS.Certificates[0] - } - } - - // Make sure LifeCycle isn't nil to spare nil checks elsewhere. - if gc.LifeCycle == nil { - gc.LifeCycle = &LifeCycle{} - } - - if gc.Rancher != nil { - // Ensure backwards compatibility for now - if len(gc.Rancher.AccessKey) > 0 || - len(gc.Rancher.Endpoint) > 0 || - len(gc.Rancher.SecretKey) > 0 { - - if gc.Rancher.API == nil { - gc.Rancher.API = &rancher.APIConfiguration{ - AccessKey: gc.Rancher.AccessKey, - SecretKey: gc.Rancher.SecretKey, - Endpoint: gc.Rancher.Endpoint, - } - } - log.Warn("Deprecated configuration found: rancher.[accesskey|secretkey|endpoint]. " + - "Please use rancher.api.[accesskey|secretkey|endpoint] instead.") - } - - if gc.Rancher.Metadata != nil && len(gc.Rancher.Metadata.Prefix) == 0 { - gc.Rancher.Metadata.Prefix = "latest" - } - } - - if gc.API != nil { - gc.API.Debug = gc.Debug - } - - if gc.File != nil { - gc.File.TraefikFile = configFile - } - - gc.initACMEProvider() - gc.initTracing() -} - -func (gc *GlobalConfiguration) initTracing() { - if gc.Tracing != nil { - switch gc.Tracing.Backend { - case jaeger.Name: - if gc.Tracing.Jaeger == nil { - gc.Tracing.Jaeger = &jaeger.Config{ - SamplingServerURL: "http://localhost:5778/sampling", - SamplingType: "const", - SamplingParam: 1.0, - LocalAgentHostPort: "127.0.0.1:6831", - Propagation: "jaeger", - Gen128Bit: false, - } - } - if gc.Tracing.Zipkin != nil { - log.Warn("Zipkin configuration will be ignored") - gc.Tracing.Zipkin = nil - } - if gc.Tracing.DataDog != nil { - log.Warn("DataDog configuration will be ignored") - gc.Tracing.DataDog = nil - } - case zipkin.Name: - if gc.Tracing.Zipkin == nil { - gc.Tracing.Zipkin = &zipkin.Config{ - HTTPEndpoint: "http://localhost:9411/api/v1/spans", - SameSpan: false, - ID128Bit: true, - Debug: false, - SampleRate: 1.0, - } - } - if gc.Tracing.Jaeger != nil { - log.Warn("Jaeger configuration will be ignored") - gc.Tracing.Jaeger = nil - } - if gc.Tracing.DataDog != nil { - log.Warn("DataDog configuration will be ignored") - gc.Tracing.DataDog = nil - } - case datadog.Name: - if gc.Tracing.DataDog == nil { - gc.Tracing.DataDog = &datadog.Config{ - LocalAgentHostPort: "localhost:8126", - GlobalTag: "", - Debug: false, - PrioritySampling: false, - } - } - if gc.Tracing.Zipkin != nil { - log.Warn("Zipkin configuration will be ignored") - gc.Tracing.Zipkin = nil - } - if gc.Tracing.Jaeger != nil { - log.Warn("Jaeger configuration will be ignored") - gc.Tracing.Jaeger = nil - } - default: - log.Warnf("Unknown tracer %q", gc.Tracing.Backend) - return - } - } -} - -func (gc *GlobalConfiguration) initACMEProvider() { - if gc.ACME != nil { - gc.ACME.CAServer = getSafeACMECAServer(gc.ACME.CAServer) - - if gc.ACME.DNSChallenge != nil && gc.ACME.HTTPChallenge != nil { - log.Warn("Unable to use DNS challenge and HTTP challenge at the same time. Fallback to DNS challenge.") - gc.ACME.HTTPChallenge = nil - } - - if gc.ACME.DNSChallenge != nil && gc.ACME.TLSChallenge != nil { - log.Warn("Unable to use DNS challenge and TLS challenge at the same time. Fallback to DNS challenge.") - gc.ACME.TLSChallenge = nil - } - - if gc.ACME.HTTPChallenge != nil && gc.ACME.TLSChallenge != nil { - log.Warn("Unable to use HTTP challenge and TLS challenge at the same time. Fallback to TLS challenge.") - gc.ACME.HTTPChallenge = nil - } - - if gc.ACME.OnDemand { - log.Warn("ACME.OnDemand is deprecated") - } - } -} - -// InitACMEProvider create an acme provider from the ACME part of globalConfiguration -func (gc *GlobalConfiguration) InitACMEProvider() (*acmeprovider.Provider, error) { - if gc.ACME != nil { - if len(gc.ACME.Storage) == 0 { - // Delete the ACME configuration to avoid starting ACME in cluster mode - gc.ACME = nil - return nil, errors.New("unable to initialize ACME provider with no storage location for the certificates") - } - // TODO: Remove when Provider ACME will replace totally ACME - // If provider file, use Provider ACME instead of ACME - if gc.Cluster == nil { - provider := &acmeprovider.Provider{} - provider.Configuration = convertACMEChallenge(gc.ACME) - - store := acmeprovider.NewLocalStore(provider.Storage) - provider.Store = store - acme.ConvertToNewFormat(provider.Storage) - gc.ACME = nil - return provider, nil - } - } - return nil, nil -} - -func getSafeACMECAServer(caServerSrc string) string { - if len(caServerSrc) == 0 { - return DefaultAcmeCAServer - } - - if strings.HasPrefix(caServerSrc, "https://acme-v01.api.letsencrypt.org") { - caServer := strings.Replace(caServerSrc, "v01", "v02", 1) - log.Warnf("The CA server %[1]q refers to a v01 endpoint of the ACME API, please change to %[2]q. Fallback to %[2]q.", caServerSrc, caServer) - return caServer - } - - if strings.HasPrefix(caServerSrc, "https://acme-staging.api.letsencrypt.org") { - caServer := strings.Replace(caServerSrc, "https://acme-staging.api.letsencrypt.org", "https://acme-staging-v02.api.letsencrypt.org", 1) - log.Warnf("The CA server %[1]q refers to a v01 endpoint of the ACME API, please change to %[2]q. Fallback to %[2]q.", caServerSrc, caServer) - return caServer - } - - return caServerSrc -} - -// ValidateConfiguration validate that configuration is coherent -func (gc *GlobalConfiguration) ValidateConfiguration() { - if gc.ACME != nil { - if _, ok := gc.EntryPoints[gc.ACME.EntryPoint]; !ok { - log.Fatalf("Unknown entrypoint %q for ACME configuration", gc.ACME.EntryPoint) - } else { - if gc.EntryPoints[gc.ACME.EntryPoint].TLS == nil { - log.Fatalf("Entrypoint %q has no TLS configuration for ACME configuration", gc.ACME.EntryPoint) - } - } - } -} - -// DefaultEntryPoints holds default entry points -type DefaultEntryPoints []string - -// String is the method to format the flag's value, part of the flag.Value interface. -// The String method's output will be used in diagnostics. -func (dep *DefaultEntryPoints) String() string { - return strings.Join(*dep, ",") -} - -// Set is the method to set the flag value, part of the flag.Value interface. -// Set's argument is a string to be parsed to set the flag. -// It's a comma-separated list, so we split it. -func (dep *DefaultEntryPoints) Set(value string) error { - entrypoints := strings.Split(value, ",") - if len(entrypoints) == 0 { - return fmt.Errorf("bad DefaultEntryPoints format: %s", value) - } - for _, entrypoint := range entrypoints { - *dep = append(*dep, entrypoint) - } - return nil -} - -// Get return the EntryPoints map -func (dep *DefaultEntryPoints) Get() interface{} { - return *dep -} - -// SetValue sets the EntryPoints map with val -func (dep *DefaultEntryPoints) SetValue(val interface{}) { - *dep = val.(DefaultEntryPoints) -} - -// Type is type of the struct -func (dep *DefaultEntryPoints) Type() string { - return "defaultentrypoints" -} - -// Retry contains request retry config -type Retry struct { - Attempts int `description:"Number of attempts" export:"true"` -} - -// HealthCheckConfig contains health check configuration parameters. -type HealthCheckConfig struct { - Interval parse.Duration `description:"Default periodicity of enabled health checks" export:"true"` - Timeout parse.Duration `description:"Default request timeout of enabled health checks" export:"true"` -} - -// RespondingTimeouts contains timeout configurations for incoming requests to the Traefik instance. -type RespondingTimeouts struct { - ReadTimeout parse.Duration `description:"ReadTimeout is the maximum duration for reading the entire request, including the body. If zero, no timeout is set" export:"true"` - WriteTimeout parse.Duration `description:"WriteTimeout is the maximum duration before timing out writes of the response. If zero, no timeout is set" export:"true"` - IdleTimeout parse.Duration `description:"IdleTimeout is the maximum amount duration an idle (keep-alive) connection will remain idle before closing itself. Defaults to 180 seconds. If zero, no timeout is set" export:"true"` -} - -// ForwardingTimeouts contains timeout configurations for forwarding requests to the backend servers. -type ForwardingTimeouts struct { - DialTimeout parse.Duration `description:"The amount of time to wait until a connection to a backend server can be established. Defaults to 30 seconds. If zero, no timeout exists" export:"true"` - ResponseHeaderTimeout parse.Duration `description:"The amount of time to wait for a server's response headers after fully writing the request (including its body, if any). If zero, no timeout exists" export:"true"` -} - -// LifeCycle contains configurations relevant to the lifecycle (such as the -// shutdown phase) of Traefik. -type LifeCycle struct { - RequestAcceptGraceTimeout parse.Duration `description:"Duration to keep accepting requests before Traefik initiates the graceful shutdown procedure"` - GraceTimeOut parse.Duration `description:"Duration to give active requests a chance to finish before Traefik stops"` -} - -// HostResolverConfig contain configuration for CNAME Flattening -type HostResolverConfig struct { - CnameFlattening bool `description:"A flag to enable/disable CNAME flattening" export:"true"` - ResolvConfig string `description:"resolv.conf used for DNS resolving" export:"true"` - ResolvDepth int `description:"The maximal depth of DNS recursive resolving" export:"true"` -} - -// Deprecated -func convertACMEChallenge(oldACMEChallenge *acme.ACME) *acmeprovider.Configuration { - conf := &acmeprovider.Configuration{ - KeyType: oldACMEChallenge.KeyType, - OnHostRule: oldACMEChallenge.OnHostRule, - // OnDemand: oldACMEChallenge.OnDemand, - Email: oldACMEChallenge.Email, - Storage: oldACMEChallenge.Storage, - ACMELogging: oldACMEChallenge.ACMELogging, - CAServer: oldACMEChallenge.CAServer, - EntryPoint: oldACMEChallenge.EntryPoint, - } - - for _, domain := range oldACMEChallenge.Domains { - if domain.Main != dns01.UnFqdn(domain.Main) { - log.Warnf("FQDN detected, please remove the trailing dot: %s", domain.Main) - } - for _, san := range domain.SANs { - if san != dns01.UnFqdn(san) { - log.Warnf("FQDN detected, please remove the trailing dot: %s", san) - } - } - conf.Domains = append(conf.Domains, newtypes.Domain(domain)) - } - if oldACMEChallenge.HTTPChallenge != nil { - conf.HTTPChallenge = &acmeprovider.HTTPChallenge{ - EntryPoint: oldACMEChallenge.HTTPChallenge.EntryPoint, - } - } - - if oldACMEChallenge.DNSChallenge != nil { - conf.DNSChallenge = &acmeprovider.DNSChallenge{ - Provider: oldACMEChallenge.DNSChallenge.Provider, - DelayBeforeCheck: oldACMEChallenge.DNSChallenge.DelayBeforeCheck, - } - } - - if oldACMEChallenge.TLSChallenge != nil { - conf.TLSChallenge = &acmeprovider.TLSChallenge{} - } - - return conf -} diff --git a/old/configuration/convert.go b/old/configuration/convert.go deleted file mode 100644 index a1d9cfb9c..000000000 --- a/old/configuration/convert.go +++ /dev/null @@ -1,218 +0,0 @@ -package configuration - -import ( - "github.com/containous/traefik/old/api" - "github.com/containous/traefik/old/middlewares/tracing" - "github.com/containous/traefik/old/types" - "github.com/containous/traefik/pkg/config/static" - "github.com/containous/traefik/pkg/ping" - "github.com/containous/traefik/pkg/tracing/datadog" - "github.com/containous/traefik/pkg/tracing/jaeger" - "github.com/containous/traefik/pkg/tracing/zipkin" - types2 "github.com/containous/traefik/pkg/types" -) - -// ConvertStaticConf FIXME sugar -// Deprecated -func ConvertStaticConf(globalConfiguration GlobalConfiguration) static.Configuration { - staticConfiguration := static.Configuration{} - - staticConfiguration.EntryPoints = make(static.EntryPoints) - - if globalConfiguration.EntryPoints != nil { - for name, ep := range globalConfiguration.EntryPoints { - staticConfiguration.EntryPoints[name] = &static.EntryPoint{ - Address: ep.Address, - } - } - } - - if globalConfiguration.Ping != nil { - old := globalConfiguration.Ping - staticConfiguration.Ping = &ping.Handler{ - EntryPoint: old.EntryPoint, - } - } - - staticConfiguration.API = convertAPI(globalConfiguration.API) - staticConfiguration.Metrics = ConvertMetrics(globalConfiguration.Metrics) - staticConfiguration.AccessLog = ConvertAccessLog(globalConfiguration.AccessLog) - staticConfiguration.Tracing = ConvertTracing(globalConfiguration.Tracing) - staticConfiguration.HostResolver = ConvertHostResolverConfig(globalConfiguration.HostResolver) - - return staticConfiguration -} - -// ConvertAccessLog FIXME sugar -// Deprecated -func ConvertAccessLog(old *types.AccessLog) *types2.AccessLog { - if old == nil { - return nil - } - - accessLog := &types2.AccessLog{ - FilePath: old.FilePath, - Format: old.Format, - BufferingSize: old.BufferingSize, - } - - if old.Filters != nil { - accessLog.Filters = &types2.AccessLogFilters{ - StatusCodes: types2.StatusCodes(old.Filters.StatusCodes), - RetryAttempts: old.Filters.RetryAttempts, - MinDuration: old.Filters.MinDuration, - } - } - - if old.Fields != nil { - accessLog.Fields = &types2.AccessLogFields{ - DefaultMode: old.Fields.DefaultMode, - Names: types2.FieldNames(old.Fields.Names), - } - - if old.Fields.Headers != nil { - accessLog.Fields.Headers = &types2.FieldHeaders{ - DefaultMode: old.Fields.Headers.DefaultMode, - Names: types2.FieldHeaderNames(old.Fields.Headers.Names), - } - } - } - - return accessLog -} - -// ConvertMetrics FIXME sugar -// Deprecated -func ConvertMetrics(old *types.Metrics) *types2.Metrics { - if old == nil { - return nil - } - - metrics := &types2.Metrics{} - - if old.Prometheus != nil { - metrics.Prometheus = &types2.Prometheus{ - EntryPoint: old.Prometheus.EntryPoint, - Buckets: types2.Buckets(old.Prometheus.Buckets), - } - } - - if old.Datadog != nil { - metrics.Datadog = &types2.Datadog{ - Address: old.Datadog.Address, - PushInterval: old.Datadog.PushInterval, - } - } - - if old.StatsD != nil { - metrics.StatsD = &types2.Statsd{ - Address: old.StatsD.Address, - PushInterval: old.StatsD.PushInterval, - } - } - if old.InfluxDB != nil { - metrics.InfluxDB = &types2.InfluxDB{ - Address: old.InfluxDB.Address, - Protocol: old.InfluxDB.Protocol, - PushInterval: old.InfluxDB.PushInterval, - Database: old.InfluxDB.Database, - RetentionPolicy: old.InfluxDB.RetentionPolicy, - Username: old.InfluxDB.Username, - Password: old.InfluxDB.Password, - } - } - - return metrics -} - -// ConvertTracing FIXME sugar -// Deprecated -func ConvertTracing(old *tracing.Tracing) *static.Tracing { - if old == nil { - return nil - } - - tra := &static.Tracing{ - Backend: old.Backend, - ServiceName: old.ServiceName, - SpanNameLimit: old.SpanNameLimit, - } - - if old.Jaeger != nil { - tra.Jaeger = &jaeger.Config{ - SamplingServerURL: old.Jaeger.SamplingServerURL, - SamplingType: old.Jaeger.SamplingType, - SamplingParam: old.Jaeger.SamplingParam, - LocalAgentHostPort: old.Jaeger.LocalAgentHostPort, - Gen128Bit: old.Jaeger.Gen128Bit, - Propagation: old.Jaeger.Propagation, - } - } - - if old.Zipkin != nil { - tra.Zipkin = &zipkin.Config{ - HTTPEndpoint: old.Zipkin.HTTPEndpoint, - SameSpan: old.Zipkin.SameSpan, - ID128Bit: old.Zipkin.ID128Bit, - Debug: old.Zipkin.Debug, - } - } - - if old.DataDog != nil { - tra.DataDog = &datadog.Config{ - LocalAgentHostPort: old.DataDog.LocalAgentHostPort, - GlobalTag: old.DataDog.GlobalTag, - Debug: old.DataDog.Debug, - } - } - - return tra -} - -func convertAPI(old *api.Handler) *static.API { - if old == nil { - return nil - } - - api := &static.API{ - EntryPoint: old.EntryPoint, - Dashboard: old.Dashboard, - DashboardAssets: old.DashboardAssets, - } - - if old.Statistics != nil { - api.Statistics = &types2.Statistics{ - RecentErrors: old.Statistics.RecentErrors, - } - } - - return api -} - -func convertConstraints(oldConstraints types.Constraints) types2.Constraints { - constraints := types2.Constraints{} - for _, value := range oldConstraints { - constraint := &types2.Constraint{ - Key: value.Key, - MustMatch: value.MustMatch, - Regex: value.Regex, - } - - constraints = append(constraints, constraint) - } - return constraints -} - -// ConvertHostResolverConfig FIXME -// Deprecated -func ConvertHostResolverConfig(oldconfig *HostResolverConfig) *types2.HostResolverConfig { - if oldconfig == nil { - return nil - } - - return &types2.HostResolverConfig{ - CnameFlattening: oldconfig.CnameFlattening, - ResolvConfig: oldconfig.ResolvConfig, - ResolvDepth: oldconfig.ResolvDepth, - } -} diff --git a/old/configuration/entrypoints.go b/old/configuration/entrypoints.go deleted file mode 100644 index b5dcedbc1..000000000 --- a/old/configuration/entrypoints.go +++ /dev/null @@ -1,322 +0,0 @@ -package configuration - -import ( - "fmt" - "strconv" - "strings" - - "github.com/containous/traefik/old/log" - "github.com/containous/traefik/old/tls" - "github.com/containous/traefik/old/types" -) - -// EntryPoint holds an entry point configuration of the reverse proxy (ip, port, TLS...) -type EntryPoint struct { - Address string - TLS *tls.TLS `export:"true"` - Redirect *types.Redirect `export:"true"` - Auth *types.Auth `export:"true"` - WhiteList *types.WhiteList `export:"true"` - Compress *Compress `export:"true"` - ProxyProtocol *ProxyProtocol `export:"true"` - ForwardedHeaders *ForwardedHeaders `export:"true"` - ClientIPStrategy *types.IPStrategy `export:"true"` -} - -// Compress contains compress configuration -type Compress struct{} - -// ProxyProtocol contains Proxy-Protocol configuration -type ProxyProtocol struct { - Insecure bool `export:"true"` - TrustedIPs []string -} - -// ForwardedHeaders Trust client forwarding headers -type ForwardedHeaders struct { - Insecure bool `export:"true"` - TrustedIPs []string -} - -// EntryPoints holds entry points configuration of the reverse proxy (ip, port, TLS...) -type EntryPoints map[string]*EntryPoint - -// String is the method to format the flag's value, part of the flag.Value interface. -// The String method's output will be used in diagnostics. -func (ep EntryPoints) String() string { - return fmt.Sprintf("%+v", map[string]*EntryPoint(ep)) -} - -// Get return the EntryPoints map -func (ep *EntryPoints) Get() interface{} { - return *ep -} - -// SetValue sets the EntryPoints map with val -func (ep *EntryPoints) SetValue(val interface{}) { - *ep = val.(EntryPoints) -} - -// Type is type of the struct -func (ep *EntryPoints) Type() string { - return "entrypoints" -} - -// Set is the method to set the flag value, part of the flag.Value interface. -// Set's argument is a string to be parsed to set the flag. -// It's a comma-separated list, so we split it. -func (ep *EntryPoints) Set(value string) error { - result := parseEntryPointsConfiguration(value) - - var compress *Compress - if len(result["compress"]) > 0 { - compress = &Compress{} - } - - configTLS, err := makeEntryPointTLS(result) - if err != nil { - return err - } - - (*ep)[result["name"]] = &EntryPoint{ - Address: result["address"], - TLS: configTLS, - Auth: makeEntryPointAuth(result), - Redirect: makeEntryPointRedirect(result), - Compress: compress, - WhiteList: makeWhiteList(result), - ProxyProtocol: makeEntryPointProxyProtocol(result), - ForwardedHeaders: makeEntryPointForwardedHeaders(result), - ClientIPStrategy: makeIPStrategy("clientipstrategy", result), - } - - return nil -} - -func makeWhiteList(result map[string]string) *types.WhiteList { - if rawRange, ok := result["whitelist_sourcerange"]; ok { - return &types.WhiteList{ - SourceRange: strings.Split(rawRange, ","), - IPStrategy: makeIPStrategy("whitelist_ipstrategy", result), - } - } - return nil -} - -func makeIPStrategy(prefix string, result map[string]string) *types.IPStrategy { - depth := toInt(result, prefix+"_depth") - excludedIPs := result[prefix+"_excludedips"] - - if depth == 0 && len(excludedIPs) == 0 { - return nil - } - - return &types.IPStrategy{ - Depth: depth, - ExcludedIPs: strings.Split(excludedIPs, ","), - } -} - -func makeEntryPointAuth(result map[string]string) *types.Auth { - var basic *types.Basic - if v, ok := result["auth_basic_users"]; ok { - basic = &types.Basic{ - Realm: result["auth_basic_realm"], - Users: strings.Split(v, ","), - RemoveHeader: toBool(result, "auth_basic_removeheader"), - } - } - - var digest *types.Digest - if v, ok := result["auth_digest_users"]; ok { - digest = &types.Digest{ - Users: strings.Split(v, ","), - RemoveHeader: toBool(result, "auth_digest_removeheader"), - } - } - - var forward *types.Forward - if address, ok := result["auth_forward_address"]; ok { - var clientTLS *types.ClientTLS - - cert := result["auth_forward_tls_cert"] - key := result["auth_forward_tls_key"] - insecureSkipVerify := toBool(result, "auth_forward_tls_insecureskipverify") - - if len(cert) > 0 && len(key) > 0 || insecureSkipVerify { - clientTLS = &types.ClientTLS{ - CA: result["auth_forward_tls_ca"], - CAOptional: toBool(result, "auth_forward_tls_caoptional"), - Cert: cert, - Key: key, - InsecureSkipVerify: insecureSkipVerify, - } - } - - var authResponseHeaders []string - if v, ok := result["auth_forward_authresponseheaders"]; ok { - authResponseHeaders = strings.Split(v, ",") - } - - forward = &types.Forward{ - Address: address, - TLS: clientTLS, - TrustForwardHeader: toBool(result, "auth_forward_trustforwardheader"), - AuthResponseHeaders: authResponseHeaders, - } - } - - var auth *types.Auth - if basic != nil || digest != nil || forward != nil { - auth = &types.Auth{ - Basic: basic, - Digest: digest, - Forward: forward, - HeaderField: result["auth_headerfield"], - } - } - - return auth -} - -func makeEntryPointProxyProtocol(result map[string]string) *ProxyProtocol { - var proxyProtocol *ProxyProtocol - - ppTrustedIPs := result["proxyprotocol_trustedips"] - if len(result["proxyprotocol_insecure"]) > 0 || len(ppTrustedIPs) > 0 { - proxyProtocol = &ProxyProtocol{ - Insecure: toBool(result, "proxyprotocol_insecure"), - } - if len(ppTrustedIPs) > 0 { - proxyProtocol.TrustedIPs = strings.Split(ppTrustedIPs, ",") - } - } - - if proxyProtocol != nil && proxyProtocol.Insecure { - log.Warn("ProxyProtocol.insecure:true is dangerous. Please use 'ProxyProtocol.TrustedIPs:IPs' and remove 'ProxyProtocol.insecure:true'") - } - - return proxyProtocol -} - -func makeEntryPointForwardedHeaders(result map[string]string) *ForwardedHeaders { - forwardedHeaders := &ForwardedHeaders{} - if _, ok := result["forwardedheaders_insecure"]; ok { - forwardedHeaders.Insecure = toBool(result, "forwardedheaders_insecure") - } - - fhTrustedIPs := result["forwardedheaders_trustedips"] - if len(fhTrustedIPs) > 0 { - // TODO must be removed in the next breaking version. - forwardedHeaders.Insecure = toBool(result, "forwardedheaders_insecure") - forwardedHeaders.TrustedIPs = strings.Split(fhTrustedIPs, ",") - } - - return forwardedHeaders -} - -func makeEntryPointRedirect(result map[string]string) *types.Redirect { - var redirect *types.Redirect - - if len(result["redirect_entrypoint"]) > 0 || len(result["redirect_regex"]) > 0 || len(result["redirect_replacement"]) > 0 { - redirect = &types.Redirect{ - EntryPoint: result["redirect_entrypoint"], - Regex: result["redirect_regex"], - Replacement: result["redirect_replacement"], - Permanent: toBool(result, "redirect_permanent"), - } - } - - return redirect -} - -func makeEntryPointTLS(result map[string]string) (*tls.TLS, error) { - var configTLS *tls.TLS - - if len(result["tls"]) > 0 { - certs := tls.Certificates{} - if err := certs.Set(result["tls"]); err != nil { - return nil, err - } - configTLS = &tls.TLS{ - Certificates: certs, - } - } else if len(result["tls_acme"]) > 0 { - configTLS = &tls.TLS{ - Certificates: tls.Certificates{}, - } - } - - if configTLS != nil { - if len(result["ca"]) > 0 { - files := tls.FilesOrContents{} - files.Set(result["ca"]) - optional := toBool(result, "ca_optional") - configTLS.ClientCA = tls.ClientCA{ - Files: files, - Optional: optional, - } - } - - if len(result["tls_minversion"]) > 0 { - configTLS.MinVersion = result["tls_minversion"] - } - - if len(result["tls_ciphersuites"]) > 0 { - configTLS.CipherSuites = strings.Split(result["tls_ciphersuites"], ",") - } - - if len(result["tls_snistrict"]) > 0 { - configTLS.SniStrict = toBool(result, "tls_snistrict") - } - - if len(result["tls_defaultcertificate_cert"]) > 0 && len(result["tls_defaultcertificate_key"]) > 0 { - configTLS.DefaultCertificate = &tls.Certificate{ - CertFile: tls.FileOrContent(result["tls_defaultcertificate_cert"]), - KeyFile: tls.FileOrContent(result["tls_defaultcertificate_key"]), - } - } - } - - return configTLS, nil -} - -func parseEntryPointsConfiguration(raw string) map[string]string { - sections := strings.Fields(raw) - - config := make(map[string]string) - for _, part := range sections { - field := strings.SplitN(part, ":", 2) - name := strings.ToLower(strings.Replace(field[0], ".", "_", -1)) - if len(field) > 1 { - config[name] = field[1] - } else { - if strings.EqualFold(name, "TLS") { - config["tls_acme"] = "TLS" - } else { - config[name] = "" - } - } - } - return config -} - -func toBool(conf map[string]string, key string) bool { - if val, ok := conf[key]; ok { - return strings.EqualFold(val, "true") || - strings.EqualFold(val, "enable") || - strings.EqualFold(val, "on") - } - return false -} - -func toInt(conf map[string]string, key string) int { - if val, ok := conf[key]; ok { - intVal, err := strconv.Atoi(val) - if err != nil { - return 0 - } - return intVal - } - return 0 -} diff --git a/old/configuration/router/internal_router.go b/old/configuration/router/internal_router.go deleted file mode 100644 index 4a4141eb2..000000000 --- a/old/configuration/router/internal_router.go +++ /dev/null @@ -1,116 +0,0 @@ -package router - -import ( - "github.com/containous/mux" - "github.com/containous/traefik/old/configuration" - "github.com/containous/traefik/old/log" - "github.com/containous/traefik/old/middlewares" - mauth "github.com/containous/traefik/old/middlewares/auth" - "github.com/containous/traefik/old/types" - "github.com/urfave/negroni" -) - -// NewInternalRouterAggregator Create a new internalRouterAggregator -func NewInternalRouterAggregator(globalConfiguration configuration.GlobalConfiguration, entryPointName string) *InternalRouterAggregator { - var serverMiddlewares []negroni.Handler - - if globalConfiguration.EntryPoints[entryPointName].WhiteList != nil { - ipStrategy := globalConfiguration.EntryPoints[entryPointName].ClientIPStrategy - if globalConfiguration.EntryPoints[entryPointName].WhiteList.IPStrategy != nil { - ipStrategy = globalConfiguration.EntryPoints[entryPointName].WhiteList.IPStrategy - } - - strategy, err := ipStrategy.Get() - if err != nil { - log.Fatalf("Error creating whitelist middleware: %s", err) - } - - ipWhitelistMiddleware, err := middlewares.NewIPWhiteLister(globalConfiguration.EntryPoints[entryPointName].WhiteList.SourceRange, strategy) - if err != nil { - log.Fatalf("Error creating whitelist middleware: %s", err) - } - if ipWhitelistMiddleware != nil { - serverMiddlewares = append(serverMiddlewares, ipWhitelistMiddleware) - } - } - - if globalConfiguration.EntryPoints[entryPointName].Auth != nil { - authMiddleware, err := mauth.NewAuthenticator(globalConfiguration.EntryPoints[entryPointName].Auth, nil) - if err != nil { - log.Fatalf("Error creating authenticator middleware: %s", err) - } - serverMiddlewares = append(serverMiddlewares, authMiddleware) - } - - router := InternalRouterAggregator{} - routerWithMiddleware := InternalRouterAggregator{} - - if globalConfiguration.Metrics != nil && globalConfiguration.Metrics.Prometheus != nil && globalConfiguration.Metrics.Prometheus.EntryPoint == entryPointName { - // routerWithMiddleware.AddRouter(metrics.PrometheusHandler{}) - } - - if globalConfiguration.Rest != nil && globalConfiguration.Rest.EntryPoint == entryPointName { - routerWithMiddleware.AddRouter(globalConfiguration.Rest) - } - - if globalConfiguration.API != nil && globalConfiguration.API.EntryPoint == entryPointName { - routerWithMiddleware.AddRouter(globalConfiguration.API) - } - - if globalConfiguration.Ping != nil && globalConfiguration.Ping.EntryPoint == entryPointName { - router.AddRouter(globalConfiguration.Ping) - } - - if globalConfiguration.ACME != nil && globalConfiguration.ACME.HTTPChallenge != nil && globalConfiguration.ACME.HTTPChallenge.EntryPoint == entryPointName { - router.AddRouter(globalConfiguration.ACME) - } - - router.AddRouter(&WithMiddleware{router: &routerWithMiddleware, routerMiddlewares: serverMiddlewares}) - - return &router -} - -// WithMiddleware router with internal middleware -type WithMiddleware struct { - router types.InternalRouter - routerMiddlewares []negroni.Handler -} - -// AddRoutes Add routes to the router -func (wm *WithMiddleware) AddRoutes(systemRouter *mux.Router) { - realRouter := systemRouter.PathPrefix("/").Subrouter() - - wm.router.AddRoutes(realRouter) - - if len(wm.routerMiddlewares) > 0 { - if err := realRouter.Walk(wrapRoute(wm.routerMiddlewares)); err != nil { - log.Error(err) - } - } -} - -// InternalRouterAggregator InternalRouter that aggregate other internalRouter -type InternalRouterAggregator struct { - internalRouters []types.InternalRouter -} - -// AddRouter add a router in the aggregator -func (r *InternalRouterAggregator) AddRouter(router types.InternalRouter) { - r.internalRouters = append(r.internalRouters, router) -} - -// AddRoutes Add routes to the router -func (r *InternalRouterAggregator) AddRoutes(systemRouter *mux.Router) { - for _, router := range r.internalRouters { - router.AddRoutes(systemRouter) - } -} - -// wrapRoute with middlewares -func wrapRoute(middlewares []negroni.Handler) func(*mux.Route, *mux.Router, []*mux.Route) error { - return func(route *mux.Route, router *mux.Router, ancestors []*mux.Route) error { - middles := append(middlewares, negroni.Wrap(route.GetHandler())) - route.Handler(negroni.New(middles...)) - return nil - } -} diff --git a/old/log/logger.go b/old/log/logger.go deleted file mode 100644 index 768352520..000000000 --- a/old/log/logger.go +++ /dev/null @@ -1,315 +0,0 @@ -package log - -import ( - "bufio" - "fmt" - "io" - "os" - "runtime" - - "github.com/sirupsen/logrus" -) - -// Logger allows overriding the logrus logger behavior -type Logger interface { - logrus.FieldLogger - WriterLevel(logrus.Level) *io.PipeWriter -} - -var ( - logger Logger - logFilePath string - logFile *os.File -) - -func init() { - logger = logrus.StandardLogger().WithFields(logrus.Fields{}) - logrus.SetOutput(os.Stdout) -} - -// Context sets the Context of the logger -func Context(context interface{}) *logrus.Entry { - return logger.WithField("context", context) -} - -// SetOutput sets the standard logger output. -func SetOutput(out io.Writer) { - logrus.SetOutput(out) -} - -// SetFormatter sets the standard logger formatter. -func SetFormatter(formatter logrus.Formatter) { - logrus.SetFormatter(formatter) -} - -// SetLevel sets the standard logger level. -func SetLevel(level logrus.Level) { - logrus.SetLevel(level) -} - -// SetLogger sets the logger. -func SetLogger(l Logger) { - logger = l -} - -// GetLevel returns the standard logger level. -func GetLevel() logrus.Level { - return logrus.GetLevel() -} - -// AddHook adds a hook to the standard logger hooks. -func AddHook(hook logrus.Hook) { - logrus.AddHook(hook) -} - -// WithError creates an entry from the standard logger and adds an error to it, using the value defined in ErrorKey as key. -func WithError(err error) *logrus.Entry { - return logger.WithError(err) -} - -// WithField creates an entry from the standard logger and adds a field to -// it. If you want multiple fields, use `WithFields`. -// -// Note that it doesn't log until you call Debug, Print, Info, Warn, Fatal -// or Panic on the Entry it returns. -func WithField(key string, value interface{}) *logrus.Entry { - return logger.WithField(key, value) -} - -// WithFields creates an entry from the standard logger and adds multiple -// fields to it. This is simply a helper for `WithField`, invoking it -// once for each field. -// -// Note that it doesn't log until you call Debug, Print, Info, Warn, Fatal -// or Panic on the Entry it returns. -func WithFields(fields logrus.Fields) *logrus.Entry { - return logger.WithFields(fields) -} - -// Debug logs a message at level Debug on the standard logger. -func Debug(args ...interface{}) { - logger.Debug(args...) -} - -// Print logs a message at level Info on the standard logger. -func Print(args ...interface{}) { - logger.Print(args...) -} - -// Info logs a message at level Info on the standard logger. -func Info(args ...interface{}) { - logger.Info(args...) -} - -// Warn logs a message at level Warn on the standard logger. -func Warn(args ...interface{}) { - logger.Warn(args...) -} - -// Warning logs a message at level Warn on the standard logger. -func Warning(args ...interface{}) { - logger.Warning(args...) -} - -// Error logs a message at level Error on the standard logger. -func Error(args ...interface{}) { - logger.Error(args...) -} - -// Panic logs a message at level Panic on the standard logger. -func Panic(args ...interface{}) { - logger.Panic(args...) -} - -// Fatal logs a message at level Fatal on the standard logger. -func Fatal(args ...interface{}) { - logger.Fatal(args...) -} - -// Debugf logs a message at level Debug on the standard logger. -func Debugf(format string, args ...interface{}) { - logger.Debugf(format, args...) -} - -// Printf logs a message at level Info on the standard logger. -func Printf(format string, args ...interface{}) { - logger.Printf(format, args...) -} - -// Infof logs a message at level Info on the standard logger. -func Infof(format string, args ...interface{}) { - logger.Infof(format, args...) -} - -// Warnf logs a message at level Warn on the standard logger. -func Warnf(format string, args ...interface{}) { - logger.Warnf(format, args...) -} - -// Warningf logs a message at level Warn on the standard logger. -func Warningf(format string, args ...interface{}) { - logger.Warningf(format, args...) -} - -// Errorf logs a message at level Error on the standard logger. -func Errorf(format string, args ...interface{}) { - logger.Errorf(format, args...) -} - -// Panicf logs a message at level Panic on the standard logger. -func Panicf(format string, args ...interface{}) { - logger.Panicf(format, args...) -} - -// Fatalf logs a message at level Fatal on the standard logger. -func Fatalf(format string, args ...interface{}) { - logger.Fatalf(format, args...) -} - -// Debugln logs a message at level Debug on the standard logger. -func Debugln(args ...interface{}) { - logger.Debugln(args...) -} - -// Println logs a message at level Info on the standard logger. -func Println(args ...interface{}) { - logger.Println(args...) -} - -// Infoln logs a message at level Info on the standard logger. -func Infoln(args ...interface{}) { - logger.Infoln(args...) -} - -// Warnln logs a message at level Warn on the standard logger. -func Warnln(args ...interface{}) { - logger.Warnln(args...) -} - -// Warningln logs a message at level Warn on the standard logger. -func Warningln(args ...interface{}) { - logger.Warningln(args...) -} - -// Errorln logs a message at level Error on the standard logger. -func Errorln(args ...interface{}) { - logger.Errorln(args...) -} - -// Panicln logs a message at level Panic on the standard logger. -func Panicln(args ...interface{}) { - logger.Panicln(args...) -} - -// Fatalln logs a message at level Fatal on the standard logger. -func Fatalln(args ...interface{}) { - logger.Fatalln(args...) -} - -// OpenFile opens the log file using the specified path -func OpenFile(path string) error { - logFilePath = path - var err error - logFile, err = os.OpenFile(logFilePath, os.O_RDWR|os.O_CREATE|os.O_APPEND, 0666) - - if err == nil { - SetOutput(logFile) - } - - return err -} - -// CloseFile closes the log and sets the Output to stdout -func CloseFile() error { - logrus.SetOutput(os.Stdout) - - if logFile != nil { - return logFile.Close() - } - return nil -} - -// RotateFile closes and reopens the log file to allow for rotation -// by an external source. If the log isn't backed by a file then -// it does nothing. -func RotateFile() error { - if logFile == nil && logFilePath == "" { - Debug("Traefik log is not writing to a file, ignoring rotate request") - return nil - } - - if logFile != nil { - defer func(f *os.File) { - f.Close() - }(logFile) - } - - if err := OpenFile(logFilePath); err != nil { - return fmt.Errorf("error opening log file: %s", err) - } - - return nil -} - -// Writer logs writer (Level Info) -func Writer() *io.PipeWriter { - return WriterLevel(logrus.InfoLevel) -} - -// WriterLevel logs writer for a specific level. -func WriterLevel(level logrus.Level) *io.PipeWriter { - return logger.WriterLevel(level) -} - -// CustomWriterLevel logs writer for a specific level. (with a custom scanner buffer size.) -// adapted from github.com/Sirupsen/logrus/writer.go -func CustomWriterLevel(level logrus.Level, maxScanTokenSize int) *io.PipeWriter { - reader, writer := io.Pipe() - - var printFunc func(args ...interface{}) - - switch level { - case logrus.DebugLevel: - printFunc = Debug - case logrus.InfoLevel: - printFunc = Info - case logrus.WarnLevel: - printFunc = Warn - case logrus.ErrorLevel: - printFunc = Error - case logrus.FatalLevel: - printFunc = Fatal - case logrus.PanicLevel: - printFunc = Panic - default: - printFunc = Print - } - - go writerScanner(reader, maxScanTokenSize, printFunc) - runtime.SetFinalizer(writer, writerFinalizer) - - return writer -} - -// extract from github.com/Sirupsen/logrus/writer.go -// Hack the buffer size -func writerScanner(reader io.ReadCloser, scanTokenSize int, printFunc func(args ...interface{})) { - scanner := bufio.NewScanner(reader) - - if scanTokenSize > bufio.MaxScanTokenSize { - buf := make([]byte, bufio.MaxScanTokenSize) - scanner.Buffer(buf, scanTokenSize) - } - - for scanner.Scan() { - printFunc(scanner.Text()) - } - if err := scanner.Err(); err != nil { - Errorf("Error while reading from Writer: %s", err) - } - reader.Close() -} - -func writerFinalizer(writer *io.PipeWriter) { - writer.Close() -} diff --git a/old/middlewares/accesslog/capture_request_reader.go b/old/middlewares/accesslog/capture_request_reader.go deleted file mode 100644 index 4fd0088b1..000000000 --- a/old/middlewares/accesslog/capture_request_reader.go +++ /dev/null @@ -1,18 +0,0 @@ -package accesslog - -import "io" - -type captureRequestReader struct { - source io.ReadCloser - count int64 -} - -func (r *captureRequestReader) Read(p []byte) (int, error) { - n, err := r.source.Read(p) - r.count += int64(n) - return n, err -} - -func (r *captureRequestReader) Close() error { - return r.source.Close() -} diff --git a/old/middlewares/accesslog/capture_response_writer.go b/old/middlewares/accesslog/capture_response_writer.go deleted file mode 100644 index 58fd368c4..000000000 --- a/old/middlewares/accesslog/capture_response_writer.go +++ /dev/null @@ -1,68 +0,0 @@ -package accesslog - -import ( - "bufio" - "fmt" - "net" - "net/http" - - "github.com/containous/traefik/old/middlewares" -) - -var ( - _ middlewares.Stateful = &captureResponseWriter{} -) - -// captureResponseWriter is a wrapper of type http.ResponseWriter -// that tracks request status and size -type captureResponseWriter struct { - rw http.ResponseWriter - status int - size int64 -} - -func (crw *captureResponseWriter) Header() http.Header { - return crw.rw.Header() -} - -func (crw *captureResponseWriter) Write(b []byte) (int, error) { - if crw.status == 0 { - crw.status = http.StatusOK - } - size, err := crw.rw.Write(b) - crw.size += int64(size) - return size, err -} - -func (crw *captureResponseWriter) WriteHeader(s int) { - crw.rw.WriteHeader(s) - crw.status = s -} - -func (crw *captureResponseWriter) Flush() { - if f, ok := crw.rw.(http.Flusher); ok { - f.Flush() - } -} - -func (crw *captureResponseWriter) Hijack() (net.Conn, *bufio.ReadWriter, error) { - if h, ok := crw.rw.(http.Hijacker); ok { - return h.Hijack() - } - return nil, nil, fmt.Errorf("not a hijacker: %T", crw.rw) -} - -func (crw *captureResponseWriter) CloseNotify() <-chan bool { - if c, ok := crw.rw.(http.CloseNotifier); ok { - return c.CloseNotify() - } - return nil -} - -func (crw *captureResponseWriter) Status() int { - return crw.status -} - -func (crw *captureResponseWriter) Size() int64 { - return crw.size -} diff --git a/old/middlewares/accesslog/logdata.go b/old/middlewares/accesslog/logdata.go deleted file mode 100644 index e20e753fe..000000000 --- a/old/middlewares/accesslog/logdata.go +++ /dev/null @@ -1,120 +0,0 @@ -package accesslog - -import ( - "net/http" -) - -const ( - // StartUTC is the map key used for the time at which request processing started. - StartUTC = "StartUTC" - // StartLocal is the map key used for the local time at which request processing started. - StartLocal = "StartLocal" - // Duration is the map key used for the total time taken by processing the response, including the origin server's time but - // not the log writing time. - Duration = "Duration" - // FrontendName is the map key used for the name of the Traefik frontend. - FrontendName = "FrontendName" - // BackendName is the map key used for the name of the Traefik backend. - BackendName = "BackendName" - // BackendURL is the map key used for the URL of the Traefik backend. - BackendURL = "BackendURL" - // BackendAddr is the map key used for the IP:port of the Traefik backend (extracted from BackendURL) - BackendAddr = "BackendAddr" - // ClientAddr is the map key used for the remote address in its original form (usually IP:port). - ClientAddr = "ClientAddr" - // ClientHost is the map key used for the remote IP address from which the client request was received. - ClientHost = "ClientHost" - // ClientPort is the map key used for the remote TCP port from which the client request was received. - ClientPort = "ClientPort" - // ClientUsername is the map key used for the username provided in the URL, if present. - ClientUsername = "ClientUsername" - // RequestAddr is the map key used for the HTTP Host header (usually IP:port). This is treated as not a header by the Go API. - RequestAddr = "RequestAddr" - // RequestHost is the map key used for the HTTP Host server name (not including port). - RequestHost = "RequestHost" - // RequestPort is the map key used for the TCP port from the HTTP Host. - RequestPort = "RequestPort" - // RequestMethod is the map key used for the HTTP method. - RequestMethod = "RequestMethod" - // RequestPath is the map key used for the HTTP request URI, not including the scheme, host or port. - RequestPath = "RequestPath" - // RequestProtocol is the map key used for the version of HTTP requested. - RequestProtocol = "RequestProtocol" - // RequestContentSize is the map key used for the number of bytes in the request entity (a.k.a. body) sent by the client. - RequestContentSize = "RequestContentSize" - // RequestRefererHeader is the Referer header in the request - RequestRefererHeader = "request_Referer" - // RequestUserAgentHeader is the User-Agent header in the request - RequestUserAgentHeader = "request_User-Agent" - // OriginDuration is the map key used for the time taken by the origin server ('upstream') to return its response. - OriginDuration = "OriginDuration" - // OriginContentSize is the map key used for the content length specified by the origin server, or 0 if unspecified. - OriginContentSize = "OriginContentSize" - // OriginStatus is the map key used for the HTTP status code returned by the origin server. - // If the request was handled by this Traefik instance (e.g. with a redirect), then this value will be absent. - OriginStatus = "OriginStatus" - // DownstreamStatus is the map key used for the HTTP status code returned to the client. - DownstreamStatus = "DownstreamStatus" - // DownstreamContentSize is the map key used for the number of bytes in the response entity returned to the client. - // This is in addition to the "Content-Length" header, which may be present in the origin response. - DownstreamContentSize = "DownstreamContentSize" - // RequestCount is the map key used for the number of requests received since the Traefik instance started. - RequestCount = "RequestCount" - // GzipRatio is the map key used for the response body compression ratio achieved. - GzipRatio = "GzipRatio" - // Overhead is the map key used for the processing time overhead caused by Traefik. - Overhead = "Overhead" - // RetryAttempts is the map key used for the amount of attempts the request was retried. - RetryAttempts = "RetryAttempts" -) - -// These are written out in the default case when no config is provided to specify keys of interest. -var defaultCoreKeys = [...]string{ - StartUTC, - Duration, - FrontendName, - BackendName, - BackendURL, - ClientHost, - ClientPort, - ClientUsername, - RequestHost, - RequestPort, - RequestMethod, - RequestPath, - RequestProtocol, - RequestContentSize, - OriginDuration, - OriginContentSize, - OriginStatus, - DownstreamStatus, - DownstreamContentSize, - RequestCount, -} - -// This contains the set of all keys, i.e. all the default keys plus all non-default keys. -var allCoreKeys = make(map[string]struct{}) - -func init() { - for _, k := range defaultCoreKeys { - allCoreKeys[k] = struct{}{} - } - allCoreKeys[BackendAddr] = struct{}{} - allCoreKeys[ClientAddr] = struct{}{} - allCoreKeys[RequestAddr] = struct{}{} - allCoreKeys[GzipRatio] = struct{}{} - allCoreKeys[StartLocal] = struct{}{} - allCoreKeys[Overhead] = struct{}{} - allCoreKeys[RetryAttempts] = struct{}{} -} - -// CoreLogData holds the fields computed from the request/response. -type CoreLogData map[string]interface{} - -// LogData is the data captured by the middleware so that it can be logged. -type LogData struct { - Core CoreLogData - Request http.Header - OriginResponse http.Header - DownstreamResponse http.Header -} diff --git a/old/middlewares/accesslog/logger.go b/old/middlewares/accesslog/logger.go deleted file mode 100644 index 9483e661b..000000000 --- a/old/middlewares/accesslog/logger.go +++ /dev/null @@ -1,334 +0,0 @@ -package accesslog - -import ( - "context" - "fmt" - "net" - "net/http" - "net/url" - "os" - "path/filepath" - "sync" - "sync/atomic" - "time" - - "github.com/containous/flaeg/parse" - "github.com/containous/traefik/old/log" - "github.com/containous/traefik/old/types" - "github.com/sirupsen/logrus" -) - -type key string - -const ( - // DataTableKey is the key within the request context used to - // store the Log Data Table - DataTableKey key = "LogDataTable" - - // CommonFormat is the common logging format (CLF) - CommonFormat string = "common" - - // JSONFormat is the JSON logging format - JSONFormat string = "json" -) - -type logHandlerParams struct { - logDataTable *LogData - crr *captureRequestReader - crw *captureResponseWriter -} - -// LogHandler will write each request and its response to the access log. -type LogHandler struct { - config *types.AccessLog - logger *logrus.Logger - file *os.File - mu sync.Mutex - httpCodeRanges types.HTTPCodeRanges - logHandlerChan chan logHandlerParams - wg sync.WaitGroup -} - -// NewLogHandler creates a new LogHandler -func NewLogHandler(config *types.AccessLog) (*LogHandler, error) { - file := os.Stdout - if len(config.FilePath) > 0 { - f, err := openAccessLogFile(config.FilePath) - if err != nil { - return nil, fmt.Errorf("error opening access log file: %s", err) - } - file = f - } - logHandlerChan := make(chan logHandlerParams, config.BufferingSize) - - var formatter logrus.Formatter - - switch config.Format { - case CommonFormat: - formatter = new(CommonLogFormatter) - case JSONFormat: - formatter = new(logrus.JSONFormatter) - default: - return nil, fmt.Errorf("unsupported access log format: %s", config.Format) - } - - logger := &logrus.Logger{ - Out: file, - Formatter: formatter, - Hooks: make(logrus.LevelHooks), - Level: logrus.InfoLevel, - } - - logHandler := &LogHandler{ - config: config, - logger: logger, - file: file, - logHandlerChan: logHandlerChan, - } - - if config.Filters != nil { - if httpCodeRanges, err := types.NewHTTPCodeRanges(config.Filters.StatusCodes); err != nil { - log.Errorf("Failed to create new HTTP code ranges: %s", err) - } else { - logHandler.httpCodeRanges = httpCodeRanges - } - } - - if config.BufferingSize > 0 { - logHandler.wg.Add(1) - go func() { - defer logHandler.wg.Done() - for handlerParams := range logHandler.logHandlerChan { - logHandler.logTheRoundTrip(handlerParams.logDataTable, handlerParams.crr, handlerParams.crw) - } - }() - } - - return logHandler, nil -} - -func openAccessLogFile(filePath string) (*os.File, error) { - dir := filepath.Dir(filePath) - - if err := os.MkdirAll(dir, 0755); err != nil { - return nil, fmt.Errorf("failed to create log path %s: %s", dir, err) - } - - file, err := os.OpenFile(filePath, os.O_RDWR|os.O_CREATE|os.O_APPEND, 0664) - if err != nil { - return nil, fmt.Errorf("error opening file %s: %s", filePath, err) - } - - return file, nil -} - -// GetLogDataTable gets the request context object that contains logging data. -// This creates data as the request passes through the middleware chain. -func GetLogDataTable(req *http.Request) *LogData { - if ld, ok := req.Context().Value(DataTableKey).(*LogData); ok { - return ld - } - log.Errorf("%s is nil", DataTableKey) - return &LogData{Core: make(CoreLogData)} -} - -func (l *LogHandler) ServeHTTP(rw http.ResponseWriter, req *http.Request, next http.HandlerFunc) { - now := time.Now().UTC() - - core := CoreLogData{ - StartUTC: now, - StartLocal: now.Local(), - } - - logDataTable := &LogData{Core: core, Request: req.Header} - - reqWithDataTable := req.WithContext(context.WithValue(req.Context(), DataTableKey, logDataTable)) - - var crr *captureRequestReader - if req.Body != nil { - crr = &captureRequestReader{source: req.Body, count: 0} - reqWithDataTable.Body = crr - } - - core[RequestCount] = nextRequestCount() - if req.Host != "" { - core[RequestAddr] = req.Host - core[RequestHost], core[RequestPort] = silentSplitHostPort(req.Host) - } - // copy the URL without the scheme, hostname etc - urlCopy := &url.URL{ - Path: req.URL.Path, - RawPath: req.URL.RawPath, - RawQuery: req.URL.RawQuery, - ForceQuery: req.URL.ForceQuery, - Fragment: req.URL.Fragment, - } - urlCopyString := urlCopy.String() - core[RequestMethod] = req.Method - core[RequestPath] = urlCopyString - core[RequestProtocol] = req.Proto - - core[ClientAddr] = req.RemoteAddr - core[ClientHost], core[ClientPort] = silentSplitHostPort(req.RemoteAddr) - - if forwardedFor := req.Header.Get("X-Forwarded-For"); forwardedFor != "" { - core[ClientHost] = forwardedFor - } - - crw := &captureResponseWriter{rw: rw} - - next.ServeHTTP(crw, reqWithDataTable) - - core[ClientUsername] = formatUsernameForLog(core[ClientUsername]) - - logDataTable.DownstreamResponse = crw.Header() - - if l.config.BufferingSize > 0 { - l.logHandlerChan <- logHandlerParams{ - logDataTable: logDataTable, - crr: crr, - crw: crw, - } - } else { - l.logTheRoundTrip(logDataTable, crr, crw) - } -} - -// Close closes the Logger (i.e. the file, drain logHandlerChan, etc). -func (l *LogHandler) Close() error { - close(l.logHandlerChan) - l.wg.Wait() - return l.file.Close() -} - -// Rotate closes and reopens the log file to allow for rotation -// by an external source. -func (l *LogHandler) Rotate() error { - var err error - - if l.file != nil { - defer func(f *os.File) { - f.Close() - }(l.file) - } - - l.file, err = os.OpenFile(l.config.FilePath, os.O_RDWR|os.O_CREATE|os.O_APPEND, 0664) - if err != nil { - return err - } - l.mu.Lock() - defer l.mu.Unlock() - l.logger.Out = l.file - return nil -} - -func silentSplitHostPort(value string) (host string, port string) { - host, port, err := net.SplitHostPort(value) - if err != nil { - return value, "-" - } - return host, port -} - -func formatUsernameForLog(usernameField interface{}) string { - username, ok := usernameField.(string) - if ok && len(username) != 0 { - return username - } - return "-" -} - -// Logging handler to log frontend name, backend name, and elapsed time -func (l *LogHandler) logTheRoundTrip(logDataTable *LogData, crr *captureRequestReader, crw *captureResponseWriter) { - core := logDataTable.Core - - retryAttempts, ok := core[RetryAttempts].(int) - if !ok { - retryAttempts = 0 - } - core[RetryAttempts] = retryAttempts - - if crr != nil { - core[RequestContentSize] = crr.count - } - - core[DownstreamStatus] = crw.Status() - - // n.b. take care to perform time arithmetic using UTC to avoid errors at DST boundaries - totalDuration := time.Now().UTC().Sub(core[StartUTC].(time.Time)) - core[Duration] = totalDuration - - if l.keepAccessLog(crw.Status(), retryAttempts, totalDuration) { - core[DownstreamContentSize] = crw.Size() - if original, ok := core[OriginContentSize]; ok { - o64 := original.(int64) - if o64 != crw.Size() && 0 != crw.Size() { - core[GzipRatio] = float64(o64) / float64(crw.Size()) - } - } - - core[Overhead] = totalDuration - if origin, ok := core[OriginDuration]; ok { - core[Overhead] = totalDuration - origin.(time.Duration) - } - - fields := logrus.Fields{} - - for k, v := range logDataTable.Core { - if l.config.Fields.Keep(k) { - fields[k] = v - } - } - - l.redactHeaders(logDataTable.Request, fields, "request_") - l.redactHeaders(logDataTable.OriginResponse, fields, "origin_") - l.redactHeaders(logDataTable.DownstreamResponse, fields, "downstream_") - - l.mu.Lock() - defer l.mu.Unlock() - l.logger.WithFields(fields).Println() - } -} - -func (l *LogHandler) redactHeaders(headers http.Header, fields logrus.Fields, prefix string) { - for k := range headers { - v := l.config.Fields.KeepHeader(k) - if v == types.AccessLogKeep { - fields[prefix+k] = headers.Get(k) - } else if v == types.AccessLogRedact { - fields[prefix+k] = "REDACTED" - } - } -} - -func (l *LogHandler) keepAccessLog(statusCode, retryAttempts int, duration time.Duration) bool { - if l.config.Filters == nil { - // no filters were specified - return true - } - - if len(l.httpCodeRanges) == 0 && !l.config.Filters.RetryAttempts && l.config.Filters.MinDuration == 0 { - // empty filters were specified, e.g. by passing --accessLog.filters only (without other filter options) - return true - } - - if l.httpCodeRanges.Contains(statusCode) { - return true - } - - if l.config.Filters.RetryAttempts && retryAttempts > 0 { - return true - } - - if l.config.Filters.MinDuration > 0 && (parse.Duration(duration) > l.config.Filters.MinDuration) { - return true - } - - return false -} - -var requestCounter uint64 // Request ID - -func nextRequestCount() uint64 { - return atomic.AddUint64(&requestCounter, 1) -} diff --git a/old/middlewares/accesslog/logger_formatters.go b/old/middlewares/accesslog/logger_formatters.go deleted file mode 100644 index 4755079fe..000000000 --- a/old/middlewares/accesslog/logger_formatters.go +++ /dev/null @@ -1,82 +0,0 @@ -package accesslog - -import ( - "bytes" - "fmt" - "time" - - "github.com/sirupsen/logrus" -) - -// default format for time presentation -const ( - commonLogTimeFormat = "02/Jan/2006:15:04:05 -0700" - defaultValue = "-" -) - -// CommonLogFormatter provides formatting in the Traefik common log format -type CommonLogFormatter struct{} - -// Format formats the log entry in the Traefik common log format -func (f *CommonLogFormatter) Format(entry *logrus.Entry) ([]byte, error) { - b := &bytes.Buffer{} - - var timestamp = defaultValue - if v, ok := entry.Data[StartUTC]; ok { - timestamp = v.(time.Time).Format(commonLogTimeFormat) - } - - var elapsedMillis int64 - if v, ok := entry.Data[Duration]; ok { - elapsedMillis = v.(time.Duration).Nanoseconds() / 1000000 - } - - _, err := fmt.Fprintf(b, "%s - %s [%s] \"%s %s %s\" %v %v %s %s %v %s %s %dms\n", - toLog(entry.Data, ClientHost, defaultValue, false), - toLog(entry.Data, ClientUsername, defaultValue, false), - timestamp, - toLog(entry.Data, RequestMethod, defaultValue, false), - toLog(entry.Data, RequestPath, defaultValue, false), - toLog(entry.Data, RequestProtocol, defaultValue, false), - toLog(entry.Data, OriginStatus, defaultValue, true), - toLog(entry.Data, OriginContentSize, defaultValue, true), - toLog(entry.Data, "request_Referer", `"-"`, true), - toLog(entry.Data, "request_User-Agent", `"-"`, true), - toLog(entry.Data, RequestCount, defaultValue, true), - toLog(entry.Data, FrontendName, defaultValue, true), - toLog(entry.Data, BackendURL, defaultValue, true), - elapsedMillis) - - return b.Bytes(), err -} - -func toLog(fields logrus.Fields, key string, defaultValue string, quoted bool) interface{} { - if v, ok := fields[key]; ok { - if v == nil { - return defaultValue - } - - switch s := v.(type) { - case string: - return toLogEntry(s, defaultValue, quoted) - - case fmt.Stringer: - return toLogEntry(s.String(), defaultValue, quoted) - - default: - return v - } - } - return defaultValue - -} -func toLogEntry(s string, defaultValue string, quote bool) string { - if len(s) == 0 { - return defaultValue - } - - if quote { - return `"` + s + `"` - } - return s -} diff --git a/old/middlewares/accesslog/logger_formatters_test.go b/old/middlewares/accesslog/logger_formatters_test.go deleted file mode 100644 index 22b68da58..000000000 --- a/old/middlewares/accesslog/logger_formatters_test.go +++ /dev/null @@ -1,140 +0,0 @@ -package accesslog - -import ( - "net/http" - "testing" - "time" - - "github.com/sirupsen/logrus" - "github.com/stretchr/testify/assert" -) - -func TestCommonLogFormatter_Format(t *testing.T) { - clf := CommonLogFormatter{} - - testCases := []struct { - name string - data map[string]interface{} - expectedLog string - }{ - { - name: "OriginStatus & OriginContentSize are nil", - data: map[string]interface{}{ - StartUTC: time.Date(2009, time.November, 10, 23, 0, 0, 0, time.UTC), - Duration: 123 * time.Second, - ClientHost: "10.0.0.1", - ClientUsername: "Client", - RequestMethod: http.MethodGet, - RequestPath: "/foo", - RequestProtocol: "http", - OriginStatus: nil, - OriginContentSize: nil, - RequestRefererHeader: "", - RequestUserAgentHeader: "", - RequestCount: 0, - FrontendName: "", - BackendURL: "", - }, - expectedLog: `10.0.0.1 - Client [10/Nov/2009:23:00:00 +0000] "GET /foo http" - - "-" "-" 0 - - 123000ms -`, - }, - { - name: "all data", - data: map[string]interface{}{ - StartUTC: time.Date(2009, time.November, 10, 23, 0, 0, 0, time.UTC), - Duration: 123 * time.Second, - ClientHost: "10.0.0.1", - ClientUsername: "Client", - RequestMethod: http.MethodGet, - RequestPath: "/foo", - RequestProtocol: "http", - OriginStatus: 123, - OriginContentSize: 132, - RequestRefererHeader: "referer", - RequestUserAgentHeader: "agent", - RequestCount: nil, - FrontendName: "foo", - BackendURL: "http://10.0.0.2/toto", - }, - expectedLog: `10.0.0.1 - Client [10/Nov/2009:23:00:00 +0000] "GET /foo http" 123 132 "referer" "agent" - "foo" "http://10.0.0.2/toto" 123000ms -`, - }, - } - - for _, test := range testCases { - test := test - t.Run(test.name, func(t *testing.T) { - t.Parallel() - - entry := &logrus.Entry{Data: test.data} - - raw, err := clf.Format(entry) - assert.NoError(t, err) - - assert.Equal(t, test.expectedLog, string(raw)) - }) - } - -} - -func Test_toLog(t *testing.T) { - - testCases := []struct { - desc string - fields logrus.Fields - fieldName string - defaultValue string - quoted bool - expectedLog interface{} - }{ - { - desc: "Should return int 1", - fields: logrus.Fields{ - "Powpow": 1, - }, - fieldName: "Powpow", - defaultValue: defaultValue, - quoted: false, - expectedLog: 1, - }, - { - desc: "Should return string foo", - fields: logrus.Fields{ - "Powpow": "foo", - }, - fieldName: "Powpow", - defaultValue: defaultValue, - quoted: true, - expectedLog: `"foo"`, - }, - { - desc: "Should return defaultValue if fieldName does not exist", - fields: logrus.Fields{ - "Powpow": "foo", - }, - fieldName: "", - defaultValue: defaultValue, - quoted: false, - expectedLog: "-", - }, - { - desc: "Should return defaultValue if fields is nil", - fields: nil, - fieldName: "", - defaultValue: defaultValue, - quoted: false, - expectedLog: "-", - }, - } - - for _, test := range testCases { - test := test - t.Run(test.desc, func(t *testing.T) { - t.Parallel() - - lg := toLog(test.fields, test.fieldName, defaultValue, test.quoted) - - assert.Equal(t, test.expectedLog, lg) - }) - } -} diff --git a/old/middlewares/accesslog/logger_test.go b/old/middlewares/accesslog/logger_test.go deleted file mode 100644 index 35cbf9280..000000000 --- a/old/middlewares/accesslog/logger_test.go +++ /dev/null @@ -1,644 +0,0 @@ -package accesslog - -import ( - "encoding/json" - "fmt" - "io/ioutil" - "net/http" - "net/http/httptest" - "net/url" - "os" - "path/filepath" - "regexp" - "strings" - "testing" - "time" - - "github.com/containous/flaeg/parse" - "github.com/containous/traefik/old/log" - "github.com/containous/traefik/old/types" - "github.com/stretchr/testify/assert" - "github.com/stretchr/testify/require" -) - -var ( - logFileNameSuffix = "/traefik/logger/test.log" - testContent = "Hello, World" - testBackendName = "http://127.0.0.1/testBackend" - testFrontendName = "testFrontend" - testStatus = 123 - testContentSize int64 = 12 - testHostname = "TestHost" - testUsername = "TestUser" - testPath = "testpath" - testPort = 8181 - testProto = "HTTP/0.0" - testMethod = http.MethodPost - testReferer = "testReferer" - testUserAgent = "testUserAgent" - testRetryAttempts = 2 - testStart = time.Now() -) - -func TestLogRotation(t *testing.T) { - tempDir, err := ioutil.TempDir("", "traefik_") - if err != nil { - t.Fatalf("Error setting up temporary directory: %s", err) - } - - fileName := tempDir + "traefik.log" - rotatedFileName := fileName + ".rotated" - - config := &types.AccessLog{FilePath: fileName, Format: CommonFormat} - logHandler, err := NewLogHandler(config) - if err != nil { - t.Fatalf("Error creating new log handler: %s", err) - } - defer logHandler.Close() - - recorder := httptest.NewRecorder() - req := httptest.NewRequest(http.MethodGet, "http://localhost", nil) - next := func(rw http.ResponseWriter, req *http.Request) { - rw.WriteHeader(http.StatusOK) - } - - iterations := 20 - halfDone := make(chan bool) - writeDone := make(chan bool) - go func() { - for i := 0; i < iterations; i++ { - logHandler.ServeHTTP(recorder, req, next) - if i == iterations/2 { - halfDone <- true - } - } - writeDone <- true - }() - - <-halfDone - err = os.Rename(fileName, rotatedFileName) - if err != nil { - t.Fatalf("Error renaming file: %s", err) - } - - err = logHandler.Rotate() - if err != nil { - t.Fatalf("Error rotating file: %s", err) - } - - select { - case <-writeDone: - gotLineCount := lineCount(t, fileName) + lineCount(t, rotatedFileName) - if iterations != gotLineCount { - t.Errorf("Wanted %d written log lines, got %d", iterations, gotLineCount) - } - case <-time.After(500 * time.Millisecond): - t.Fatalf("test timed out") - } - - close(halfDone) - close(writeDone) -} - -func lineCount(t *testing.T, fileName string) int { - t.Helper() - fileContents, err := ioutil.ReadFile(fileName) - if err != nil { - t.Fatalf("Error reading from file %s: %s", fileName, err) - } - - count := 0 - for _, line := range strings.Split(string(fileContents), "\n") { - if strings.TrimSpace(line) == "" { - continue - } - count++ - } - - return count -} - -func TestLoggerCLF(t *testing.T) { - tmpDir := createTempDir(t, CommonFormat) - defer os.RemoveAll(tmpDir) - - logFilePath := filepath.Join(tmpDir, logFileNameSuffix) - config := &types.AccessLog{FilePath: logFilePath, Format: CommonFormat} - doLogging(t, config) - - logData, err := ioutil.ReadFile(logFilePath) - require.NoError(t, err) - - expectedLog := ` TestHost - TestUser [13/Apr/2016:07:14:19 -0700] "POST testpath HTTP/0.0" 123 12 "testReferer" "testUserAgent" 1 "testFrontend" "http://127.0.0.1/testBackend" 1ms` - assertValidLogData(t, expectedLog, logData) -} - -func TestAsyncLoggerCLF(t *testing.T) { - tmpDir := createTempDir(t, CommonFormat) - defer os.RemoveAll(tmpDir) - - logFilePath := filepath.Join(tmpDir, logFileNameSuffix) - config := &types.AccessLog{FilePath: logFilePath, Format: CommonFormat, BufferingSize: 1024} - doLogging(t, config) - - logData, err := ioutil.ReadFile(logFilePath) - require.NoError(t, err) - - expectedLog := ` TestHost - TestUser [13/Apr/2016:07:14:19 -0700] "POST testpath HTTP/0.0" 123 12 "testReferer" "testUserAgent" 1 "testFrontend" "http://127.0.0.1/testBackend" 1ms` - assertValidLogData(t, expectedLog, logData) -} - -func assertString(exp string) func(t *testing.T, actual interface{}) { - return func(t *testing.T, actual interface{}) { - t.Helper() - - assert.Equal(t, exp, actual) - } -} - -func assertNotEqual(exp string) func(t *testing.T, actual interface{}) { - return func(t *testing.T, actual interface{}) { - t.Helper() - - assert.NotEqual(t, exp, actual) - } -} - -func assertFloat64(exp float64) func(t *testing.T, actual interface{}) { - return func(t *testing.T, actual interface{}) { - t.Helper() - - assert.Equal(t, exp, actual) - } -} - -func assertFloat64NotZero() func(t *testing.T, actual interface{}) { - return func(t *testing.T, actual interface{}) { - t.Helper() - - assert.NotZero(t, actual) - } -} - -func TestLoggerJSON(t *testing.T) { - testCases := []struct { - desc string - config *types.AccessLog - expected map[string]func(t *testing.T, value interface{}) - }{ - { - desc: "default config", - config: &types.AccessLog{ - FilePath: "", - Format: JSONFormat, - }, - expected: map[string]func(t *testing.T, value interface{}){ - RequestHost: assertString(testHostname), - RequestAddr: assertString(testHostname), - RequestMethod: assertString(testMethod), - RequestPath: assertString(testPath), - RequestProtocol: assertString(testProto), - RequestPort: assertString("-"), - DownstreamStatus: assertFloat64(float64(testStatus)), - DownstreamContentSize: assertFloat64(float64(len(testContent))), - OriginContentSize: assertFloat64(float64(len(testContent))), - OriginStatus: assertFloat64(float64(testStatus)), - RequestRefererHeader: assertString(testReferer), - RequestUserAgentHeader: assertString(testUserAgent), - FrontendName: assertString(testFrontendName), - BackendURL: assertString(testBackendName), - ClientUsername: assertString(testUsername), - ClientHost: assertString(testHostname), - ClientPort: assertString(fmt.Sprintf("%d", testPort)), - ClientAddr: assertString(fmt.Sprintf("%s:%d", testHostname, testPort)), - "level": assertString("info"), - "msg": assertString(""), - "downstream_Content-Type": assertString("text/plain; charset=utf-8"), - RequestCount: assertFloat64NotZero(), - Duration: assertFloat64NotZero(), - Overhead: assertFloat64NotZero(), - RetryAttempts: assertFloat64(float64(testRetryAttempts)), - "time": assertNotEqual(""), - "StartLocal": assertNotEqual(""), - "StartUTC": assertNotEqual(""), - }, - }, - { - desc: "default config drop all fields", - config: &types.AccessLog{ - FilePath: "", - Format: JSONFormat, - Fields: &types.AccessLogFields{ - DefaultMode: "drop", - }, - }, - expected: map[string]func(t *testing.T, value interface{}){ - "level": assertString("info"), - "msg": assertString(""), - "time": assertNotEqual(""), - "downstream_Content-Type": assertString("text/plain; charset=utf-8"), - RequestRefererHeader: assertString(testReferer), - RequestUserAgentHeader: assertString(testUserAgent), - }, - }, - { - desc: "default config drop all fields and headers", - config: &types.AccessLog{ - FilePath: "", - Format: JSONFormat, - Fields: &types.AccessLogFields{ - DefaultMode: "drop", - Headers: &types.FieldHeaders{ - DefaultMode: "drop", - }, - }, - }, - expected: map[string]func(t *testing.T, value interface{}){ - "level": assertString("info"), - "msg": assertString(""), - "time": assertNotEqual(""), - }, - }, - { - desc: "default config drop all fields and redact headers", - config: &types.AccessLog{ - FilePath: "", - Format: JSONFormat, - Fields: &types.AccessLogFields{ - DefaultMode: "drop", - Headers: &types.FieldHeaders{ - DefaultMode: "redact", - }, - }, - }, - expected: map[string]func(t *testing.T, value interface{}){ - "level": assertString("info"), - "msg": assertString(""), - "time": assertNotEqual(""), - "downstream_Content-Type": assertString("REDACTED"), - RequestRefererHeader: assertString("REDACTED"), - RequestUserAgentHeader: assertString("REDACTED"), - }, - }, - { - desc: "default config drop all fields and headers but kept someone", - config: &types.AccessLog{ - FilePath: "", - Format: JSONFormat, - Fields: &types.AccessLogFields{ - DefaultMode: "drop", - Names: types.FieldNames{ - RequestHost: "keep", - }, - Headers: &types.FieldHeaders{ - DefaultMode: "drop", - Names: types.FieldHeaderNames{ - "Referer": "keep", - }, - }, - }, - }, - expected: map[string]func(t *testing.T, value interface{}){ - RequestHost: assertString(testHostname), - "level": assertString("info"), - "msg": assertString(""), - "time": assertNotEqual(""), - RequestRefererHeader: assertString(testReferer), - }, - }, - } - - for _, test := range testCases { - test := test - t.Run(test.desc, func(t *testing.T) { - t.Parallel() - - tmpDir := createTempDir(t, JSONFormat) - defer os.RemoveAll(tmpDir) - - logFilePath := filepath.Join(tmpDir, logFileNameSuffix) - - test.config.FilePath = logFilePath - doLogging(t, test.config) - - logData, err := ioutil.ReadFile(logFilePath) - require.NoError(t, err) - - jsonData := make(map[string]interface{}) - err = json.Unmarshal(logData, &jsonData) - require.NoError(t, err) - - assert.Equal(t, len(test.expected), len(jsonData)) - - for field, assertion := range test.expected { - assertion(t, jsonData[field]) - } - }) - } -} - -func TestNewLogHandlerOutputStdout(t *testing.T) { - testCases := []struct { - desc string - config *types.AccessLog - expectedLog string - }{ - { - desc: "default config", - config: &types.AccessLog{ - FilePath: "", - Format: CommonFormat, - }, - expectedLog: `TestHost - TestUser [13/Apr/2016:07:14:19 -0700] "POST testpath HTTP/0.0" 123 12 "testReferer" "testUserAgent" 23 "testFrontend" "http://127.0.0.1/testBackend" 1ms`, - }, - { - desc: "default config with empty filters", - config: &types.AccessLog{ - FilePath: "", - Format: CommonFormat, - Filters: &types.AccessLogFilters{}, - }, - expectedLog: `TestHost - TestUser [13/Apr/2016:07:14:19 -0700] "POST testpath HTTP/0.0" 123 12 "testReferer" "testUserAgent" 23 "testFrontend" "http://127.0.0.1/testBackend" 1ms`, - }, - { - desc: "Status code filter not matching", - config: &types.AccessLog{ - FilePath: "", - Format: CommonFormat, - Filters: &types.AccessLogFilters{ - StatusCodes: []string{"200"}, - }, - }, - expectedLog: ``, - }, - { - desc: "Status code filter matching", - config: &types.AccessLog{ - FilePath: "", - Format: CommonFormat, - Filters: &types.AccessLogFilters{ - StatusCodes: []string{"123"}, - }, - }, - expectedLog: `TestHost - TestUser [13/Apr/2016:07:14:19 -0700] "POST testpath HTTP/0.0" 123 12 "testReferer" "testUserAgent" 23 "testFrontend" "http://127.0.0.1/testBackend" 1ms`, - }, - { - desc: "Duration filter not matching", - config: &types.AccessLog{ - FilePath: "", - Format: CommonFormat, - Filters: &types.AccessLogFilters{ - MinDuration: parse.Duration(1 * time.Hour), - }, - }, - expectedLog: ``, - }, - { - desc: "Duration filter matching", - config: &types.AccessLog{ - FilePath: "", - Format: CommonFormat, - Filters: &types.AccessLogFilters{ - MinDuration: parse.Duration(1 * time.Millisecond), - }, - }, - expectedLog: `TestHost - TestUser [13/Apr/2016:07:14:19 -0700] "POST testpath HTTP/0.0" 123 12 "testReferer" "testUserAgent" 23 "testFrontend" "http://127.0.0.1/testBackend" 1ms`, - }, - { - desc: "Retry attempts filter matching", - config: &types.AccessLog{ - FilePath: "", - Format: CommonFormat, - Filters: &types.AccessLogFilters{ - RetryAttempts: true, - }, - }, - expectedLog: `TestHost - TestUser [13/Apr/2016:07:14:19 -0700] "POST testpath HTTP/0.0" 123 12 "testReferer" "testUserAgent" 23 "testFrontend" "http://127.0.0.1/testBackend" 1ms`, - }, - { - desc: "Default mode keep", - config: &types.AccessLog{ - FilePath: "", - Format: CommonFormat, - Fields: &types.AccessLogFields{ - DefaultMode: "keep", - }, - }, - expectedLog: `TestHost - TestUser [13/Apr/2016:07:14:19 -0700] "POST testpath HTTP/0.0" 123 12 "testReferer" "testUserAgent" 23 "testFrontend" "http://127.0.0.1/testBackend" 1ms`, - }, - { - desc: "Default mode keep with override", - config: &types.AccessLog{ - FilePath: "", - Format: CommonFormat, - Fields: &types.AccessLogFields{ - DefaultMode: "keep", - Names: types.FieldNames{ - ClientHost: "drop", - }, - }, - }, - expectedLog: `- - TestUser [13/Apr/2016:07:14:19 -0700] "POST testpath HTTP/0.0" 123 12 "testReferer" "testUserAgent" 23 "testFrontend" "http://127.0.0.1/testBackend" 1ms`, - }, - { - desc: "Default mode drop", - config: &types.AccessLog{ - FilePath: "", - Format: CommonFormat, - Fields: &types.AccessLogFields{ - DefaultMode: "drop", - }, - }, - expectedLog: `- - - [-] "- - -" - - "testReferer" "testUserAgent" - - - 0ms`, - }, - { - desc: "Default mode drop with override", - config: &types.AccessLog{ - FilePath: "", - Format: CommonFormat, - Fields: &types.AccessLogFields{ - DefaultMode: "drop", - Names: types.FieldNames{ - ClientHost: "drop", - ClientUsername: "keep", - }, - }, - }, - expectedLog: `- - TestUser [-] "- - -" - - "testReferer" "testUserAgent" - - - 0ms`, - }, - { - desc: "Default mode drop with header dropped", - config: &types.AccessLog{ - FilePath: "", - Format: CommonFormat, - Fields: &types.AccessLogFields{ - DefaultMode: "drop", - Names: types.FieldNames{ - ClientHost: "drop", - ClientUsername: "keep", - }, - Headers: &types.FieldHeaders{ - DefaultMode: "drop", - }, - }, - }, - expectedLog: `- - TestUser [-] "- - -" - - "-" "-" - - - 0ms`, - }, - { - desc: "Default mode drop with header redacted", - config: &types.AccessLog{ - FilePath: "", - Format: CommonFormat, - Fields: &types.AccessLogFields{ - DefaultMode: "drop", - Names: types.FieldNames{ - ClientHost: "drop", - ClientUsername: "keep", - }, - Headers: &types.FieldHeaders{ - DefaultMode: "redact", - }, - }, - }, - expectedLog: `- - TestUser [-] "- - -" - - "REDACTED" "REDACTED" - - - 0ms`, - }, - { - desc: "Default mode drop with header redacted", - config: &types.AccessLog{ - FilePath: "", - Format: CommonFormat, - Fields: &types.AccessLogFields{ - DefaultMode: "drop", - Names: types.FieldNames{ - ClientHost: "drop", - ClientUsername: "keep", - }, - Headers: &types.FieldHeaders{ - DefaultMode: "keep", - Names: types.FieldHeaderNames{ - "Referer": "redact", - }, - }, - }, - }, - expectedLog: `- - TestUser [-] "- - -" - - "REDACTED" "testUserAgent" - - - 0ms`, - }, - } - - for _, test := range testCases { - test := test - t.Run(test.desc, func(t *testing.T) { - - // NOTE: It is not possible to run these cases in parallel because we capture Stdout - - file, restoreStdout := captureStdout(t) - defer restoreStdout() - - doLogging(t, test.config) - - written, err := ioutil.ReadFile(file.Name()) - require.NoError(t, err, "unable to read captured stdout from file") - assertValidLogData(t, test.expectedLog, written) - }) - } -} - -func assertValidLogData(t *testing.T, expected string, logData []byte) { - - if len(expected) == 0 { - assert.Zero(t, len(logData)) - t.Log(string(logData)) - return - } - - result, err := ParseAccessLog(string(logData)) - require.NoError(t, err) - - resultExpected, err := ParseAccessLog(expected) - require.NoError(t, err) - - formatErrMessage := fmt.Sprintf(` - Expected: %s - Actual: %s`, expected, string(logData)) - - require.Equal(t, len(resultExpected), len(result), formatErrMessage) - assert.Equal(t, resultExpected[ClientHost], result[ClientHost], formatErrMessage) - assert.Equal(t, resultExpected[ClientUsername], result[ClientUsername], formatErrMessage) - assert.Equal(t, resultExpected[RequestMethod], result[RequestMethod], formatErrMessage) - assert.Equal(t, resultExpected[RequestPath], result[RequestPath], formatErrMessage) - assert.Equal(t, resultExpected[RequestProtocol], result[RequestProtocol], formatErrMessage) - assert.Equal(t, resultExpected[OriginStatus], result[OriginStatus], formatErrMessage) - assert.Equal(t, resultExpected[OriginContentSize], result[OriginContentSize], formatErrMessage) - assert.Equal(t, resultExpected[RequestRefererHeader], result[RequestRefererHeader], formatErrMessage) - assert.Equal(t, resultExpected[RequestUserAgentHeader], result[RequestUserAgentHeader], formatErrMessage) - assert.Regexp(t, regexp.MustCompile("[0-9]*"), result[RequestCount], formatErrMessage) - assert.Equal(t, resultExpected[FrontendName], result[FrontendName], formatErrMessage) - assert.Equal(t, resultExpected[BackendURL], result[BackendURL], formatErrMessage) - assert.Regexp(t, regexp.MustCompile("[0-9]*ms"), result[Duration], formatErrMessage) -} - -func captureStdout(t *testing.T) (out *os.File, restoreStdout func()) { - file, err := ioutil.TempFile("", "testlogger") - require.NoError(t, err, "failed to create temp file") - - original := os.Stdout - os.Stdout = file - - restoreStdout = func() { - os.Stdout = original - } - - return file, restoreStdout -} - -func createTempDir(t *testing.T, prefix string) string { - tmpDir, err := ioutil.TempDir("", prefix) - require.NoError(t, err, "failed to create temp dir") - - return tmpDir -} - -func doLogging(t *testing.T, config *types.AccessLog) { - logger, err := NewLogHandler(config) - require.NoError(t, err) - defer logger.Close() - - if config.FilePath != "" { - _, err = os.Stat(config.FilePath) - require.NoError(t, err, fmt.Sprintf("logger should create %s", config.FilePath)) - } - - req := &http.Request{ - Header: map[string][]string{ - "User-Agent": {testUserAgent}, - "Referer": {testReferer}, - }, - Proto: testProto, - Host: testHostname, - Method: testMethod, - RemoteAddr: fmt.Sprintf("%s:%d", testHostname, testPort), - URL: &url.URL{ - Path: testPath, - }, - } - - logger.ServeHTTP(httptest.NewRecorder(), req, logWriterTestHandlerFunc) -} - -func logWriterTestHandlerFunc(rw http.ResponseWriter, r *http.Request) { - if _, err := rw.Write([]byte(testContent)); err != nil { - log.Error(err) - } - - rw.WriteHeader(testStatus) - - logDataTable := GetLogDataTable(r) - logDataTable.Core[FrontendName] = testFrontendName - logDataTable.Core[BackendURL] = testBackendName - logDataTable.Core[OriginStatus] = testStatus - logDataTable.Core[OriginContentSize] = testContentSize - logDataTable.Core[RetryAttempts] = testRetryAttempts - logDataTable.Core[StartUTC] = testStart.UTC() - logDataTable.Core[StartLocal] = testStart.Local() - logDataTable.Core[ClientUsername] = testUsername -} diff --git a/old/middlewares/accesslog/parser.go b/old/middlewares/accesslog/parser.go deleted file mode 100644 index c2931d153..000000000 --- a/old/middlewares/accesslog/parser.go +++ /dev/null @@ -1,54 +0,0 @@ -package accesslog - -import ( - "bytes" - "regexp" -) - -// ParseAccessLog parse line of access log and return a map with each fields -func ParseAccessLog(data string) (map[string]string, error) { - var buffer bytes.Buffer - buffer.WriteString(`(\S+)`) // 1 - ClientHost - buffer.WriteString(`\s-\s`) // - - Spaces - buffer.WriteString(`(\S+)\s`) // 2 - ClientUsername - buffer.WriteString(`\[([^]]+)\]\s`) // 3 - StartUTC - buffer.WriteString(`"(\S*)\s?`) // 4 - RequestMethod - buffer.WriteString(`((?:[^"]*(?:\\")?)*)\s`) // 5 - RequestPath - buffer.WriteString(`([^"]*)"\s`) // 6 - RequestProtocol - buffer.WriteString(`(\S+)\s`) // 7 - OriginStatus - buffer.WriteString(`(\S+)\s`) // 8 - OriginContentSize - buffer.WriteString(`("?\S+"?)\s`) // 9 - Referrer - buffer.WriteString(`("\S+")\s`) // 10 - User-Agent - buffer.WriteString(`(\S+)\s`) // 11 - RequestCount - buffer.WriteString(`("[^"]*"|-)\s`) // 12 - FrontendName - buffer.WriteString(`("[^"]*"|-)\s`) // 13 - BackendURL - buffer.WriteString(`(\S+)`) // 14 - Duration - - regex, err := regexp.Compile(buffer.String()) - if err != nil { - return nil, err - } - - submatch := regex.FindStringSubmatch(data) - result := make(map[string]string) - - // Need to be > 13 to match CLF format - if len(submatch) > 13 { - result[ClientHost] = submatch[1] - result[ClientUsername] = submatch[2] - result[StartUTC] = submatch[3] - result[RequestMethod] = submatch[4] - result[RequestPath] = submatch[5] - result[RequestProtocol] = submatch[6] - result[OriginStatus] = submatch[7] - result[OriginContentSize] = submatch[8] - result[RequestRefererHeader] = submatch[9] - result[RequestUserAgentHeader] = submatch[10] - result[RequestCount] = submatch[11] - result[FrontendName] = submatch[12] - result[BackendURL] = submatch[13] - result[Duration] = submatch[14] - } - - return result, nil -} diff --git a/old/middlewares/accesslog/parser_test.go b/old/middlewares/accesslog/parser_test.go deleted file mode 100644 index 701fed4c3..000000000 --- a/old/middlewares/accesslog/parser_test.go +++ /dev/null @@ -1,75 +0,0 @@ -package accesslog - -import ( - "testing" - - "github.com/stretchr/testify/assert" -) - -func TestParseAccessLog(t *testing.T) { - testCases := []struct { - desc string - value string - expected map[string]string - }{ - { - desc: "full log", - value: `TestHost - TestUser [13/Apr/2016:07:14:19 -0700] "POST testpath HTTP/0.0" 123 12 "testReferer" "testUserAgent" 1 "testFrontend" "http://127.0.0.1/testBackend" 1ms`, - expected: map[string]string{ - ClientHost: "TestHost", - ClientUsername: "TestUser", - StartUTC: "13/Apr/2016:07:14:19 -0700", - RequestMethod: "POST", - RequestPath: "testpath", - RequestProtocol: "HTTP/0.0", - OriginStatus: "123", - OriginContentSize: "12", - RequestRefererHeader: `"testReferer"`, - RequestUserAgentHeader: `"testUserAgent"`, - RequestCount: "1", - FrontendName: `"testFrontend"`, - BackendURL: `"http://127.0.0.1/testBackend"`, - Duration: "1ms", - }, - }, - { - desc: "log with space", - value: `127.0.0.1 - - [09/Mar/2018:10:51:32 +0000] "GET / HTTP/1.1" 401 17 "-" "Go-http-client/1.1" 1 "testFrontend with space" - 0ms`, - expected: map[string]string{ - ClientHost: "127.0.0.1", - ClientUsername: "-", - StartUTC: "09/Mar/2018:10:51:32 +0000", - RequestMethod: "GET", - RequestPath: "/", - RequestProtocol: "HTTP/1.1", - OriginStatus: "401", - OriginContentSize: "17", - RequestRefererHeader: `"-"`, - RequestUserAgentHeader: `"Go-http-client/1.1"`, - RequestCount: "1", - FrontendName: `"testFrontend with space"`, - BackendURL: `-`, - Duration: "0ms", - }, - }, - { - desc: "bad log", - value: `bad`, - expected: map[string]string{}, - }, - } - - for _, test := range testCases { - test := test - t.Run(test.desc, func(t *testing.T) { - t.Parallel() - - result, err := ParseAccessLog(test.value) - assert.NoError(t, err) - assert.Equal(t, len(test.expected), len(result)) - for key, value := range test.expected { - assert.Equal(t, value, result[key]) - } - }) - } -} diff --git a/old/middlewares/accesslog/save_backend.go b/old/middlewares/accesslog/save_backend.go deleted file mode 100644 index cd62c65a0..000000000 --- a/old/middlewares/accesslog/save_backend.go +++ /dev/null @@ -1,64 +0,0 @@ -package accesslog - -import ( - "net/http" - "time" - - "github.com/urfave/negroni" - "github.com/vulcand/oxy/utils" -) - -// SaveBackend sends the backend name to the logger. -// These are always used with a corresponding SaveFrontend handler. -type SaveBackend struct { - next http.Handler - backendName string -} - -// NewSaveBackend creates a SaveBackend handler. -func NewSaveBackend(next http.Handler, backendName string) http.Handler { - return &SaveBackend{next, backendName} -} - -func (sb *SaveBackend) ServeHTTP(rw http.ResponseWriter, r *http.Request) { - serveSaveBackend(rw, r, sb.backendName, func(crw *captureResponseWriter) { - sb.next.ServeHTTP(crw, r) - }) -} - -// SaveNegroniBackend sends the backend name to the logger. -type SaveNegroniBackend struct { - next negroni.Handler - backendName string -} - -// NewSaveNegroniBackend creates a SaveBackend handler. -func NewSaveNegroniBackend(next negroni.Handler, backendName string) negroni.Handler { - return &SaveNegroniBackend{next, backendName} -} - -func (sb *SaveNegroniBackend) ServeHTTP(rw http.ResponseWriter, r *http.Request, next http.HandlerFunc) { - serveSaveBackend(rw, r, sb.backendName, func(crw *captureResponseWriter) { - sb.next.ServeHTTP(crw, r, next) - }) -} - -func serveSaveBackend(rw http.ResponseWriter, r *http.Request, backendName string, apply func(*captureResponseWriter)) { - table := GetLogDataTable(r) - table.Core[BackendName] = backendName - table.Core[BackendURL] = r.URL // note that this is *not* the original incoming URL - table.Core[BackendAddr] = r.URL.Host - - crw := &captureResponseWriter{rw: rw} - start := time.Now().UTC() - - apply(crw) - - // use UTC to handle switchover of daylight saving correctly - table.Core[OriginDuration] = time.Now().UTC().Sub(start) - table.Core[OriginStatus] = crw.Status() - // make copy of headers so we can ensure there is no subsequent mutation during response processing - table.OriginResponse = make(http.Header) - utils.CopyHeaders(table.OriginResponse, crw.Header()) - table.Core[OriginContentSize] = crw.Size() -} diff --git a/old/middlewares/accesslog/save_frontend.go b/old/middlewares/accesslog/save_frontend.go deleted file mode 100644 index e256444a5..000000000 --- a/old/middlewares/accesslog/save_frontend.go +++ /dev/null @@ -1,51 +0,0 @@ -package accesslog - -import ( - "net/http" - "strings" - - "github.com/urfave/negroni" -) - -// SaveFrontend sends the frontend name to the logger. -// These are sometimes used with a corresponding SaveBackend handler, but not always. -// For example, redirected requests don't reach a backend. -type SaveFrontend struct { - next http.Handler - frontendName string -} - -// NewSaveFrontend creates a SaveFrontend handler. -func NewSaveFrontend(next http.Handler, frontendName string) http.Handler { - return &SaveFrontend{next, frontendName} -} - -func (sf *SaveFrontend) ServeHTTP(rw http.ResponseWriter, r *http.Request) { - serveSaveFrontend(r, sf.frontendName, func() { - sf.next.ServeHTTP(rw, r) - }) -} - -// SaveNegroniFrontend sends the frontend name to the logger. -type SaveNegroniFrontend struct { - next negroni.Handler - frontendName string -} - -// NewSaveNegroniFrontend creates a SaveNegroniFrontend handler. -func NewSaveNegroniFrontend(next negroni.Handler, frontendName string) negroni.Handler { - return &SaveNegroniFrontend{next, frontendName} -} - -func (sf *SaveNegroniFrontend) ServeHTTP(rw http.ResponseWriter, r *http.Request, next http.HandlerFunc) { - serveSaveFrontend(r, sf.frontendName, func() { - sf.next.ServeHTTP(rw, r, next) - }) -} - -func serveSaveFrontend(r *http.Request, frontendName string, apply func()) { - table := GetLogDataTable(r) - table.Core[FrontendName] = strings.TrimPrefix(frontendName, "frontend-") - - apply() -} diff --git a/old/middlewares/accesslog/save_retries.go b/old/middlewares/accesslog/save_retries.go deleted file mode 100644 index 56b19a14b..000000000 --- a/old/middlewares/accesslog/save_retries.go +++ /dev/null @@ -1,19 +0,0 @@ -package accesslog - -import ( - "net/http" -) - -// SaveRetries is an implementation of RetryListener that stores RetryAttempts in the LogDataTable. -type SaveRetries struct{} - -// Retried implements the RetryListener interface and will be called for each retry that happens. -func (s *SaveRetries) Retried(req *http.Request, attempt int) { - // it is the request attempt x, but the retry attempt is x-1 - if attempt > 0 { - attempt-- - } - - table := GetLogDataTable(req) - table.Core[RetryAttempts] = attempt -} diff --git a/old/middlewares/accesslog/save_retries_test.go b/old/middlewares/accesslog/save_retries_test.go deleted file mode 100644 index add4cc28f..000000000 --- a/old/middlewares/accesslog/save_retries_test.go +++ /dev/null @@ -1,48 +0,0 @@ -package accesslog - -import ( - "context" - "fmt" - "net/http" - "net/http/httptest" - "testing" -) - -func TestSaveRetries(t *testing.T) { - tests := []struct { - requestAttempt int - wantRetryAttemptsInLog int - }{ - { - requestAttempt: 0, - wantRetryAttemptsInLog: 0, - }, - { - requestAttempt: 1, - wantRetryAttemptsInLog: 0, - }, - { - requestAttempt: 3, - wantRetryAttemptsInLog: 2, - }, - } - - for _, test := range tests { - test := test - - t.Run(fmt.Sprintf("%d retries", test.requestAttempt), func(t *testing.T) { - t.Parallel() - saveRetries := &SaveRetries{} - - logDataTable := &LogData{Core: make(CoreLogData)} - req := httptest.NewRequest(http.MethodGet, "/some/path", nil) - reqWithDataTable := req.WithContext(context.WithValue(req.Context(), DataTableKey, logDataTable)) - - saveRetries.Retried(reqWithDataTable, test.requestAttempt) - - if logDataTable.Core[RetryAttempts] != test.wantRetryAttemptsInLog { - t.Errorf("got %v in logDataTable, want %v", logDataTable.Core[RetryAttempts], test.wantRetryAttemptsInLog) - } - }) - } -} diff --git a/old/middlewares/accesslog/save_username.go b/old/middlewares/accesslog/save_username.go deleted file mode 100644 index 6debf7795..000000000 --- a/old/middlewares/accesslog/save_username.go +++ /dev/null @@ -1,60 +0,0 @@ -package accesslog - -import ( - "context" - "net/http" - - "github.com/urfave/negroni" -) - -const ( - clientUsernameKey key = "ClientUsername" -) - -// SaveUsername sends the Username name to the access logger. -type SaveUsername struct { - next http.Handler -} - -// NewSaveUsername creates a SaveUsername handler. -func NewSaveUsername(next http.Handler) http.Handler { - return &SaveUsername{next} -} - -func (sf *SaveUsername) ServeHTTP(rw http.ResponseWriter, r *http.Request) { - serveSaveUsername(r, func() { - sf.next.ServeHTTP(rw, r) - }) -} - -// SaveNegroniUsername adds the Username to the access logger data table. -type SaveNegroniUsername struct { - next negroni.Handler -} - -// NewSaveNegroniUsername creates a SaveNegroniUsername handler. -func NewSaveNegroniUsername(next negroni.Handler) negroni.Handler { - return &SaveNegroniUsername{next} -} - -func (sf *SaveNegroniUsername) ServeHTTP(rw http.ResponseWriter, r *http.Request, next http.HandlerFunc) { - serveSaveUsername(r, func() { - sf.next.ServeHTTP(rw, r, next) - }) -} - -func serveSaveUsername(r *http.Request, apply func()) { - table := GetLogDataTable(r) - - username, ok := r.Context().Value(clientUsernameKey).(string) - if ok { - table.Core[ClientUsername] = username - } - - apply() -} - -// WithUserName adds a username to a requests' context -func WithUserName(req *http.Request, username string) *http.Request { - return req.WithContext(context.WithValue(req.Context(), clientUsernameKey, username)) -} diff --git a/old/middlewares/addPrefix.go b/old/middlewares/addPrefix.go deleted file mode 100644 index 19f142fe3..000000000 --- a/old/middlewares/addPrefix.go +++ /dev/null @@ -1,35 +0,0 @@ -package middlewares - -import ( - "context" - "net/http" -) - -// AddPrefix is a middleware used to add prefix to an URL request -type AddPrefix struct { - Handler http.Handler - Prefix string -} - -type key string - -const ( - // AddPrefixKey is the key within the request context used to - // store the added prefix - AddPrefixKey key = "AddPrefix" -) - -func (s *AddPrefix) ServeHTTP(w http.ResponseWriter, r *http.Request) { - r.URL.Path = s.Prefix + r.URL.Path - if r.URL.RawPath != "" { - r.URL.RawPath = s.Prefix + r.URL.RawPath - } - r.RequestURI = r.URL.RequestURI() - r = r.WithContext(context.WithValue(r.Context(), AddPrefixKey, s.Prefix)) - s.Handler.ServeHTTP(w, r) -} - -// SetHandler sets handler -func (s *AddPrefix) SetHandler(Handler http.Handler) { - s.Handler = Handler -} diff --git a/old/middlewares/addPrefix_test.go b/old/middlewares/addPrefix_test.go deleted file mode 100644 index 720cc3944..000000000 --- a/old/middlewares/addPrefix_test.go +++ /dev/null @@ -1,66 +0,0 @@ -package middlewares - -import ( - "net/http" - "testing" - - "github.com/containous/traefik/pkg/testhelpers" - "github.com/sirupsen/logrus" - "github.com/stretchr/testify/assert" -) - -func TestAddPrefix(t *testing.T) { - logrus.SetLevel(logrus.DebugLevel) - tests := []struct { - desc string - prefix string - path string - expectedPath string - expectedRawPath string - }{ - { - desc: "regular path", - prefix: "/a", - path: "/b", - expectedPath: "/a/b", - }, - { - desc: "raw path is supported", - prefix: "/a", - path: "/b%2Fc", - expectedPath: "/a/b/c", - expectedRawPath: "/a/b%2Fc", - }, - } - - for _, test := range tests { - test := test - t.Run(test.desc, func(t *testing.T) { - t.Parallel() - - var actualPath, actualRawPath, requestURI string - handler := &AddPrefix{ - Prefix: test.prefix, - Handler: http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { - actualPath = r.URL.Path - actualRawPath = r.URL.RawPath - requestURI = r.RequestURI - }), - } - - req := testhelpers.MustNewRequest(http.MethodGet, "http://localhost"+test.path, nil) - - handler.ServeHTTP(nil, req) - - assert.Equal(t, test.expectedPath, actualPath, "Unexpected path.") - assert.Equal(t, test.expectedRawPath, actualRawPath, "Unexpected raw path.") - - expectedURI := test.expectedPath - if test.expectedRawPath != "" { - // go HTTP uses the raw path when existent in the RequestURI - expectedURI = test.expectedRawPath - } - assert.Equal(t, expectedURI, requestURI, "Unexpected request URI.") - }) - } -} diff --git a/old/middlewares/auth/authenticator.go b/old/middlewares/auth/authenticator.go deleted file mode 100644 index cdcd96266..000000000 --- a/old/middlewares/auth/authenticator.go +++ /dev/null @@ -1,167 +0,0 @@ -package auth - -import ( - "fmt" - "io/ioutil" - "net/http" - "strings" - - goauth "github.com/abbot/go-http-auth" - "github.com/containous/traefik/old/log" - "github.com/containous/traefik/old/middlewares/accesslog" - "github.com/containous/traefik/old/middlewares/tracing" - "github.com/containous/traefik/old/types" - "github.com/urfave/negroni" -) - -// Authenticator is a middleware that provides HTTP basic and digest authentication -type Authenticator struct { - handler negroni.Handler - users map[string]string -} - -type tracingAuthenticator struct { - name string - handler negroni.Handler - clientSpanKind bool -} - -const ( - authorizationHeader = "Authorization" -) - -// NewAuthenticator builds a new Authenticator given a config -func NewAuthenticator(authConfig *types.Auth, tracingMiddleware *tracing.Tracing) (*Authenticator, error) { - if authConfig == nil { - return nil, fmt.Errorf("error creating Authenticator: auth is nil") - } - - var err error - authenticator := &Authenticator{} - tracingAuth := tracingAuthenticator{} - - if authConfig.Basic != nil { - authenticator.users, err = parserBasicUsers(authConfig.Basic) - if err != nil { - return nil, err - } - realm := "traefik" - if authConfig.Basic.Realm != "" { - realm = authConfig.Basic.Realm - } - basicAuth := goauth.NewBasicAuthenticator(realm, authenticator.secretBasic) - tracingAuth.handler = createAuthBasicHandler(basicAuth, authConfig) - tracingAuth.name = "Auth Basic" - tracingAuth.clientSpanKind = false - } else if authConfig.Digest != nil { - authenticator.users, err = parserDigestUsers(authConfig.Digest) - if err != nil { - return nil, err - } - - digestAuth := goauth.NewDigestAuthenticator("traefik", authenticator.secretDigest) - tracingAuth.handler = createAuthDigestHandler(digestAuth, authConfig) - tracingAuth.name = "Auth Digest" - tracingAuth.clientSpanKind = false - } else if authConfig.Forward != nil { - tracingAuth.handler = createAuthForwardHandler(authConfig) - tracingAuth.name = "Auth Forward" - tracingAuth.clientSpanKind = true - } - - if tracingMiddleware != nil { - authenticator.handler = tracingMiddleware.NewNegroniHandlerWrapper(tracingAuth.name, tracingAuth.handler, tracingAuth.clientSpanKind) - } else { - authenticator.handler = tracingAuth.handler - } - return authenticator, nil -} - -func createAuthForwardHandler(authConfig *types.Auth) negroni.HandlerFunc { - return negroni.HandlerFunc(func(w http.ResponseWriter, r *http.Request, next http.HandlerFunc) { - Forward(authConfig.Forward, w, r, next) - }) -} - -func createAuthDigestHandler(digestAuth *goauth.DigestAuth, authConfig *types.Auth) negroni.HandlerFunc { - return negroni.HandlerFunc(func(w http.ResponseWriter, r *http.Request, next http.HandlerFunc) { - if username, _ := digestAuth.CheckAuth(r); username == "" { - log.Debugf("Digest auth failed") - digestAuth.RequireAuth(w, r) - } else { - log.Debugf("Digest auth succeeded") - - // set username in request context - r = accesslog.WithUserName(r, username) - - if authConfig.HeaderField != "" { - r.Header[authConfig.HeaderField] = []string{username} - } - if authConfig.Digest.RemoveHeader { - log.Debugf("Remove the Authorization header from the Digest auth") - r.Header.Del(authorizationHeader) - } - next.ServeHTTP(w, r) - } - }) -} - -func createAuthBasicHandler(basicAuth *goauth.BasicAuth, authConfig *types.Auth) negroni.HandlerFunc { - return negroni.HandlerFunc(func(w http.ResponseWriter, r *http.Request, next http.HandlerFunc) { - if username := basicAuth.CheckAuth(r); username == "" { - log.Debugf("Basic auth failed") - basicAuth.RequireAuth(w, r) - } else { - log.Debugf("Basic auth succeeded") - - // set username in request context - r = accesslog.WithUserName(r, username) - - if authConfig.HeaderField != "" { - r.Header[authConfig.HeaderField] = []string{username} - } - if authConfig.Basic.RemoveHeader { - log.Debugf("Remove the Authorization header from the Basic auth") - r.Header.Del(authorizationHeader) - } - next.ServeHTTP(w, r) - } - }) -} - -func getLinesFromFile(filename string) ([]string, error) { - dat, err := ioutil.ReadFile(filename) - if err != nil { - return nil, err - } - // Trim lines and filter out blanks - rawLines := strings.Split(string(dat), "\n") - var filteredLines []string - for _, rawLine := range rawLines { - line := strings.TrimSpace(rawLine) - if line != "" { - filteredLines = append(filteredLines, line) - } - } - return filteredLines, nil -} - -func (a *Authenticator) secretBasic(user, realm string) string { - if secret, ok := a.users[user]; ok { - return secret - } - log.Debugf("User not found: %s", user) - return "" -} - -func (a *Authenticator) secretDigest(user, realm string) string { - if secret, ok := a.users[user+":"+realm]; ok { - return secret - } - log.Debugf("User not found: %s:%s", user, realm) - return "" -} - -func (a *Authenticator) ServeHTTP(rw http.ResponseWriter, r *http.Request, next http.HandlerFunc) { - a.handler.ServeHTTP(rw, r, next) -} diff --git a/old/middlewares/auth/authenticator_test.go b/old/middlewares/auth/authenticator_test.go deleted file mode 100644 index 86fbadd27..000000000 --- a/old/middlewares/auth/authenticator_test.go +++ /dev/null @@ -1,297 +0,0 @@ -package auth - -import ( - "fmt" - "io/ioutil" - "net/http" - "net/http/httptest" - "os" - "testing" - - "github.com/containous/traefik/old/middlewares/tracing" - "github.com/containous/traefik/old/types" - "github.com/containous/traefik/pkg/testhelpers" - "github.com/stretchr/testify/assert" - "github.com/stretchr/testify/require" - "github.com/urfave/negroni" -) - -func TestAuthUsersFromFile(t *testing.T) { - tests := []struct { - authType string - usersStr string - userKeys []string - parserFunc func(fileName string) (map[string]string, error) - }{ - { - authType: "basic", - usersStr: "test:$apr1$H6uskkkW$IgXLP6ewTrSuBkTrqE8wj/\ntest2:$apr1$d9hr9HBB$4HxwgUir3HP4EsggP/QNo0\n", - userKeys: []string{"test", "test2"}, - parserFunc: func(fileName string) (map[string]string, error) { - basic := &types.Basic{ - UsersFile: fileName, - } - return parserBasicUsers(basic) - }, - }, - { - authType: "digest", - usersStr: "test:traefik:a2688e031edb4be6a3797f3882655c05 \ntest2:traefik:518845800f9e2bfb1f1f740ec24f074e\n", - userKeys: []string{"test:traefik", "test2:traefik"}, - parserFunc: func(fileName string) (map[string]string, error) { - digest := &types.Digest{ - UsersFile: fileName, - } - return parserDigestUsers(digest) - }, - }, - } - - for _, test := range tests { - test := test - t.Run(test.authType, func(t *testing.T) { - t.Parallel() - usersFile, err := ioutil.TempFile("", "auth-users") - require.NoError(t, err) - defer os.Remove(usersFile.Name()) - - _, err = usersFile.Write([]byte(test.usersStr)) - require.NoError(t, err) - - users, err := test.parserFunc(usersFile.Name()) - require.NoError(t, err) - assert.Equal(t, 2, len(users), "they should be equal") - - _, ok := users[test.userKeys[0]] - assert.True(t, ok, "user test should be found") - _, ok = users[test.userKeys[1]] - assert.True(t, ok, "user test2 should be found") - }) - } -} - -func TestBasicAuthFail(t *testing.T) { - _, err := NewAuthenticator(&types.Auth{ - Basic: &types.Basic{ - Users: []string{"test"}, - }, - }, &tracing.Tracing{}) - assert.Contains(t, err.Error(), "error parsing Authenticator user", "should contains") - - authMiddleware, err := NewAuthenticator(&types.Auth{ - Basic: &types.Basic{ - Users: []string{"test:test"}, - }, - }, &tracing.Tracing{}) - require.NoError(t, err) - - handler := http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { - fmt.Fprintln(w, "traefik") - }) - n := negroni.New(authMiddleware) - n.UseHandler(handler) - ts := httptest.NewServer(n) - defer ts.Close() - - client := &http.Client{} - req := testhelpers.MustNewRequest(http.MethodGet, ts.URL, nil) - req.SetBasicAuth("test", "test") - res, err := client.Do(req) - require.NoError(t, err) - assert.Equal(t, http.StatusUnauthorized, res.StatusCode, "they should be equal") -} - -func TestBasicAuthSuccess(t *testing.T) { - authMiddleware, err := NewAuthenticator(&types.Auth{ - Basic: &types.Basic{ - Users: []string{"test:$apr1$H6uskkkW$IgXLP6ewTrSuBkTrqE8wj/"}, - }, - }, &tracing.Tracing{}) - require.NoError(t, err) - - handler := http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { - fmt.Fprintln(w, "traefik") - }) - n := negroni.New(authMiddleware) - n.UseHandler(handler) - ts := httptest.NewServer(n) - defer ts.Close() - - client := &http.Client{} - req := testhelpers.MustNewRequest(http.MethodGet, ts.URL, nil) - req.SetBasicAuth("test", "test") - res, err := client.Do(req) - require.NoError(t, err) - assert.Equal(t, http.StatusOK, res.StatusCode, "they should be equal") - - body, err := ioutil.ReadAll(res.Body) - require.NoError(t, err) - assert.Equal(t, "traefik\n", string(body), "they should be equal") -} - -func TestBasicRealm(t *testing.T) { - authMiddlewareDefaultRealm, errdefault := NewAuthenticator(&types.Auth{ - Basic: &types.Basic{ - Users: []string{"test:$apr1$H6uskkkW$IgXLP6ewTrSuBkTrqE8wj/"}, - }, - }, &tracing.Tracing{}) - require.NoError(t, errdefault) - - authMiddlewareCustomRealm, errcustom := NewAuthenticator(&types.Auth{ - Basic: &types.Basic{ - Realm: "foobar", - Users: []string{"test:$apr1$H6uskkkW$IgXLP6ewTrSuBkTrqE8wj/"}, - }, - }, &tracing.Tracing{}) - require.NoError(t, errcustom) - - handler := http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { - fmt.Fprintln(w, "traefik") - }) - - n := negroni.New(authMiddlewareDefaultRealm) - n.UseHandler(handler) - ts := httptest.NewServer(n) - defer ts.Close() - - client := &http.Client{} - req := testhelpers.MustNewRequest(http.MethodGet, ts.URL, nil) - res, err := client.Do(req) - require.NoError(t, err) - assert.Equal(t, "Basic realm=\"traefik\"", res.Header.Get("Www-Authenticate"), "they should be equal") - - n = negroni.New(authMiddlewareCustomRealm) - n.UseHandler(handler) - ts = httptest.NewServer(n) - defer ts.Close() - - client = &http.Client{} - req = testhelpers.MustNewRequest(http.MethodGet, ts.URL, nil) - res, err = client.Do(req) - require.NoError(t, err) - assert.Equal(t, "Basic realm=\"foobar\"", res.Header.Get("Www-Authenticate"), "they should be equal") -} - -func TestDigestAuthFail(t *testing.T) { - _, err := NewAuthenticator(&types.Auth{ - Digest: &types.Digest{ - Users: []string{"test"}, - }, - }, &tracing.Tracing{}) - assert.Contains(t, err.Error(), "error parsing Authenticator user", "should contains") - - authMiddleware, err := NewAuthenticator(&types.Auth{ - Digest: &types.Digest{ - Users: []string{"test:traefik:test"}, - }, - }, &tracing.Tracing{}) - require.NoError(t, err) - assert.NotNil(t, authMiddleware, "this should not be nil") - - handler := http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { - fmt.Fprintln(w, "traefik") - }) - n := negroni.New(authMiddleware) - n.UseHandler(handler) - ts := httptest.NewServer(n) - defer ts.Close() - - client := &http.Client{} - req := testhelpers.MustNewRequest(http.MethodGet, ts.URL, nil) - req.SetBasicAuth("test", "test") - res, err := client.Do(req) - require.NoError(t, err) - assert.Equal(t, http.StatusUnauthorized, res.StatusCode, "they should be equal") -} - -func TestBasicAuthUserHeader(t *testing.T) { - middleware, err := NewAuthenticator(&types.Auth{ - Basic: &types.Basic{ - Users: []string{"test:$apr1$H6uskkkW$IgXLP6ewTrSuBkTrqE8wj/"}, - }, - HeaderField: "X-Webauth-User", - }, &tracing.Tracing{}) - require.NoError(t, err) - - handler := http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { - assert.Equal(t, "test", r.Header["X-Webauth-User"][0], "auth user should be set") - fmt.Fprintln(w, "traefik") - }) - n := negroni.New(middleware) - n.UseHandler(handler) - ts := httptest.NewServer(n) - defer ts.Close() - - client := &http.Client{} - req := testhelpers.MustNewRequest(http.MethodGet, ts.URL, nil) - req.SetBasicAuth("test", "test") - res, err := client.Do(req) - require.NoError(t, err) - - assert.Equal(t, http.StatusOK, res.StatusCode, "they should be equal") - - body, err := ioutil.ReadAll(res.Body) - require.NoError(t, err) - assert.Equal(t, "traefik\n", string(body), "they should be equal") -} - -func TestBasicAuthHeaderRemoved(t *testing.T) { - middleware, err := NewAuthenticator(&types.Auth{ - Basic: &types.Basic{ - RemoveHeader: true, - Users: []string{"test:$apr1$H6uskkkW$IgXLP6ewTrSuBkTrqE8wj/"}, - }, - }, &tracing.Tracing{}) - require.NoError(t, err) - - handler := http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { - assert.Empty(t, r.Header.Get(authorizationHeader)) - fmt.Fprintln(w, "traefik") - }) - n := negroni.New(middleware) - n.UseHandler(handler) - ts := httptest.NewServer(n) - defer ts.Close() - - client := &http.Client{} - req := testhelpers.MustNewRequest(http.MethodGet, ts.URL, nil) - req.SetBasicAuth("test", "test") - res, err := client.Do(req) - require.NoError(t, err) - - assert.Equal(t, http.StatusOK, res.StatusCode, "they should be equal") - - body, err := ioutil.ReadAll(res.Body) - require.NoError(t, err) - assert.Equal(t, "traefik\n", string(body), "they should be equal") -} - -func TestBasicAuthHeaderPresent(t *testing.T) { - middleware, err := NewAuthenticator(&types.Auth{ - Basic: &types.Basic{ - Users: []string{"test:$apr1$H6uskkkW$IgXLP6ewTrSuBkTrqE8wj/"}, - }, - }, &tracing.Tracing{}) - require.NoError(t, err) - - handler := http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { - assert.NotEmpty(t, r.Header.Get(authorizationHeader)) - fmt.Fprintln(w, "traefik") - }) - n := negroni.New(middleware) - n.UseHandler(handler) - ts := httptest.NewServer(n) - defer ts.Close() - - client := &http.Client{} - req := testhelpers.MustNewRequest(http.MethodGet, ts.URL, nil) - req.SetBasicAuth("test", "test") - res, err := client.Do(req) - require.NoError(t, err) - - assert.Equal(t, http.StatusOK, res.StatusCode, "they should be equal") - - body, err := ioutil.ReadAll(res.Body) - require.NoError(t, err) - assert.Equal(t, "traefik\n", string(body), "they should be equal") -} diff --git a/old/middlewares/auth/forward.go b/old/middlewares/auth/forward.go deleted file mode 100644 index 2c274d88f..000000000 --- a/old/middlewares/auth/forward.go +++ /dev/null @@ -1,157 +0,0 @@ -package auth - -import ( - "io/ioutil" - "net" - "net/http" - "strings" - - "github.com/containous/traefik/old/log" - "github.com/containous/traefik/old/middlewares/tracing" - "github.com/containous/traefik/old/types" - "github.com/vulcand/oxy/forward" - "github.com/vulcand/oxy/utils" -) - -const ( - xForwardedURI = "X-Forwarded-Uri" - xForwardedMethod = "X-Forwarded-Method" -) - -// Forward the authentication to a external server -func Forward(config *types.Forward, w http.ResponseWriter, r *http.Request, next http.HandlerFunc) { - // Ensure our request client does not follow redirects - httpClient := http.Client{ - CheckRedirect: func(r *http.Request, via []*http.Request) error { - return http.ErrUseLastResponse - }, - } - - if config.TLS != nil { - tlsConfig, err := config.TLS.CreateTLSConfig() - if err != nil { - tracing.SetErrorAndDebugLog(r, "Unable to configure TLS to call %s. Cause %s", config.Address, err) - w.WriteHeader(http.StatusInternalServerError) - return - } - - httpClient.Transport = &http.Transport{ - TLSClientConfig: tlsConfig, - } - } - - forwardReq, err := http.NewRequest(http.MethodGet, config.Address, http.NoBody) - tracing.LogRequest(tracing.GetSpan(r), forwardReq) - if err != nil { - tracing.SetErrorAndDebugLog(r, "Error calling %s. Cause %s", config.Address, err) - w.WriteHeader(http.StatusInternalServerError) - return - } - - writeHeader(r, forwardReq, config.TrustForwardHeader) - - tracing.InjectRequestHeaders(forwardReq) - - forwardResponse, forwardErr := httpClient.Do(forwardReq) - if forwardErr != nil { - tracing.SetErrorAndDebugLog(r, "Error calling %s. Cause: %s", config.Address, forwardErr) - w.WriteHeader(http.StatusInternalServerError) - return - } - - body, readError := ioutil.ReadAll(forwardResponse.Body) - if readError != nil { - tracing.SetErrorAndDebugLog(r, "Error reading body %s. Cause: %s", config.Address, readError) - w.WriteHeader(http.StatusInternalServerError) - return - } - defer forwardResponse.Body.Close() - - // Pass the forward response's body and selected headers if it - // didn't return a response within the range of [200, 300). - if forwardResponse.StatusCode < http.StatusOK || forwardResponse.StatusCode >= http.StatusMultipleChoices { - log.Debugf("Remote error %s. StatusCode: %d", config.Address, forwardResponse.StatusCode) - - utils.CopyHeaders(w.Header(), forwardResponse.Header) - utils.RemoveHeaders(w.Header(), forward.HopHeaders...) - - // Grab the location header, if any. - redirectURL, err := forwardResponse.Location() - - if err != nil { - if err != http.ErrNoLocation { - tracing.SetErrorAndDebugLog(r, "Error reading response location header %s. Cause: %s", config.Address, err) - w.WriteHeader(http.StatusInternalServerError) - return - } - } else if redirectURL.String() != "" { - // Set the location in our response if one was sent back. - w.Header().Set("Location", redirectURL.String()) - } - - tracing.LogResponseCode(tracing.GetSpan(r), forwardResponse.StatusCode) - w.WriteHeader(forwardResponse.StatusCode) - - if _, err = w.Write(body); err != nil { - log.Error(err) - } - return - } - - for _, headerName := range config.AuthResponseHeaders { - r.Header.Set(headerName, forwardResponse.Header.Get(headerName)) - } - - r.RequestURI = r.URL.RequestURI() - next(w, r) -} - -func writeHeader(req *http.Request, forwardReq *http.Request, trustForwardHeader bool) { - utils.CopyHeaders(forwardReq.Header, req.Header) - utils.RemoveHeaders(forwardReq.Header, forward.HopHeaders...) - - if clientIP, _, err := net.SplitHostPort(req.RemoteAddr); err == nil { - if trustForwardHeader { - if prior, ok := req.Header[forward.XForwardedFor]; ok { - clientIP = strings.Join(prior, ", ") + ", " + clientIP - } - } - forwardReq.Header.Set(forward.XForwardedFor, clientIP) - } - - if xMethod := req.Header.Get(xForwardedMethod); xMethod != "" && trustForwardHeader { - forwardReq.Header.Set(xForwardedMethod, xMethod) - } else if req.Method != "" { - forwardReq.Header.Set(xForwardedMethod, req.Method) - } else { - forwardReq.Header.Del(xForwardedMethod) - } - - if xfp := req.Header.Get(forward.XForwardedProto); xfp != "" && trustForwardHeader { - forwardReq.Header.Set(forward.XForwardedProto, xfp) - } else if req.TLS != nil { - forwardReq.Header.Set(forward.XForwardedProto, "https") - } else { - forwardReq.Header.Set(forward.XForwardedProto, "http") - } - - if xfp := req.Header.Get(forward.XForwardedPort); xfp != "" && trustForwardHeader { - forwardReq.Header.Set(forward.XForwardedPort, xfp) - } - - if xfh := req.Header.Get(forward.XForwardedHost); xfh != "" && trustForwardHeader { - forwardReq.Header.Set(forward.XForwardedHost, xfh) - } else if req.Host != "" { - forwardReq.Header.Set(forward.XForwardedHost, req.Host) - } else { - forwardReq.Header.Del(forward.XForwardedHost) - } - - if xfURI := req.Header.Get(xForwardedURI); xfURI != "" && trustForwardHeader { - forwardReq.Header.Set(xForwardedURI, xfURI) - } else if req.URL.RequestURI() != "" { - forwardReq.Header.Set(xForwardedURI, req.URL.RequestURI()) - } else { - forwardReq.Header.Del(xForwardedURI) - } -} diff --git a/old/middlewares/auth/forward_test.go b/old/middlewares/auth/forward_test.go deleted file mode 100644 index e3f2d0e47..000000000 --- a/old/middlewares/auth/forward_test.go +++ /dev/null @@ -1,392 +0,0 @@ -package auth - -import ( - "fmt" - "io/ioutil" - "net/http" - "net/http/httptest" - "testing" - - "github.com/containous/traefik/old/middlewares/tracing" - "github.com/containous/traefik/old/types" - "github.com/containous/traefik/pkg/testhelpers" - "github.com/stretchr/testify/assert" - "github.com/stretchr/testify/require" - "github.com/urfave/negroni" - "github.com/vulcand/oxy/forward" -) - -func TestForwardAuthFail(t *testing.T) { - server := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { - http.Error(w, "Forbidden", http.StatusForbidden) - })) - defer server.Close() - - middleware, err := NewAuthenticator(&types.Auth{ - Forward: &types.Forward{ - Address: server.URL, - }, - }, &tracing.Tracing{}) - assert.NoError(t, err, "there should be no error") - - handler := http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { - fmt.Fprintln(w, "traefik") - }) - n := negroni.New(middleware) - n.UseHandler(handler) - ts := httptest.NewServer(n) - defer ts.Close() - - req := testhelpers.MustNewRequest(http.MethodGet, ts.URL, nil) - res, err := http.DefaultClient.Do(req) - assert.NoError(t, err, "there should be no error") - assert.Equal(t, http.StatusForbidden, res.StatusCode, "they should be equal") - - body, err := ioutil.ReadAll(res.Body) - assert.NoError(t, err, "there should be no error") - assert.Equal(t, "Forbidden\n", string(body), "they should be equal") -} - -func TestForwardAuthSuccess(t *testing.T) { - server := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { - w.Header().Set("X-Auth-User", "user@example.com") - w.Header().Set("X-Auth-Secret", "secret") - fmt.Fprintln(w, "Success") - })) - defer server.Close() - - middleware, err := NewAuthenticator(&types.Auth{ - Forward: &types.Forward{ - Address: server.URL, - AuthResponseHeaders: []string{"X-Auth-User"}, - }, - }, &tracing.Tracing{}) - assert.NoError(t, err, "there should be no error") - - handler := http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { - assert.Equal(t, "user@example.com", r.Header.Get("X-Auth-User")) - assert.Empty(t, r.Header.Get("X-Auth-Secret")) - fmt.Fprintln(w, "traefik") - }) - n := negroni.New(middleware) - n.UseHandler(handler) - ts := httptest.NewServer(n) - defer ts.Close() - - req := testhelpers.MustNewRequest(http.MethodGet, ts.URL, nil) - res, err := http.DefaultClient.Do(req) - assert.NoError(t, err, "there should be no error") - assert.Equal(t, http.StatusOK, res.StatusCode, "they should be equal") - - body, err := ioutil.ReadAll(res.Body) - assert.NoError(t, err, "there should be no error") - assert.Equal(t, "traefik\n", string(body), "they should be equal") -} - -func TestForwardAuthRedirect(t *testing.T) { - authTs := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { - http.Redirect(w, r, "http://example.com/redirect-test", http.StatusFound) - })) - defer authTs.Close() - - authMiddleware, err := NewAuthenticator(&types.Auth{ - Forward: &types.Forward{ - Address: authTs.URL, - }, - }, &tracing.Tracing{}) - assert.NoError(t, err, "there should be no error") - - handler := http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { - fmt.Fprintln(w, "traefik") - }) - n := negroni.New(authMiddleware) - n.UseHandler(handler) - ts := httptest.NewServer(n) - defer ts.Close() - - client := &http.Client{ - CheckRedirect: func(r *http.Request, via []*http.Request) error { - return http.ErrUseLastResponse - }, - } - req := testhelpers.MustNewRequest(http.MethodGet, ts.URL, nil) - res, err := client.Do(req) - assert.NoError(t, err, "there should be no error") - assert.Equal(t, http.StatusFound, res.StatusCode, "they should be equal") - - location, err := res.Location() - assert.NoError(t, err, "there should be no error") - assert.Equal(t, "http://example.com/redirect-test", location.String(), "they should be equal") - - body, err := ioutil.ReadAll(res.Body) - assert.NoError(t, err, "there should be no error") - assert.NotEmpty(t, string(body), "there should be something in the body") -} - -func TestForwardAuthRemoveHopByHopHeaders(t *testing.T) { - authTs := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { - headers := w.Header() - for _, header := range forward.HopHeaders { - if header == forward.TransferEncoding { - headers.Add(header, "identity") - } else { - headers.Add(header, "test") - } - } - - http.Redirect(w, r, "http://example.com/redirect-test", http.StatusFound) - })) - defer authTs.Close() - - authMiddleware, err := NewAuthenticator(&types.Auth{ - Forward: &types.Forward{ - Address: authTs.URL, - }, - }, &tracing.Tracing{}) - assert.NoError(t, err, "there should be no error") - - handler := http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { - fmt.Fprintln(w, "traefik") - }) - n := negroni.New(authMiddleware) - n.UseHandler(handler) - ts := httptest.NewServer(n) - defer ts.Close() - - client := &http.Client{ - CheckRedirect: func(r *http.Request, via []*http.Request) error { - return http.ErrUseLastResponse - }, - } - req := testhelpers.MustNewRequest(http.MethodGet, ts.URL, nil) - res, err := client.Do(req) - assert.NoError(t, err, "there should be no error") - assert.Equal(t, http.StatusFound, res.StatusCode, "they should be equal") - - for _, header := range forward.HopHeaders { - assert.Equal(t, "", res.Header.Get(header), "hop-by-hop header '%s' mustn't be set", header) - } - - location, err := res.Location() - assert.NoError(t, err, "there should be no error") - assert.Equal(t, "http://example.com/redirect-test", location.String(), "they should be equal") - - body, err := ioutil.ReadAll(res.Body) - assert.NoError(t, err, "there should be no error") - assert.NotEmpty(t, string(body), "there should be something in the body") -} - -func TestForwardAuthFailResponseHeaders(t *testing.T) { - authTs := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { - cookie := &http.Cookie{Name: "example", Value: "testing", Path: "/"} - http.SetCookie(w, cookie) - w.Header().Add("X-Foo", "bar") - http.Error(w, "Forbidden", http.StatusForbidden) - })) - defer authTs.Close() - - authMiddleware, err := NewAuthenticator(&types.Auth{ - Forward: &types.Forward{ - Address: authTs.URL, - }, - }, &tracing.Tracing{}) - assert.NoError(t, err, "there should be no error") - - handler := http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { - fmt.Fprintln(w, "traefik") - }) - n := negroni.New(authMiddleware) - n.UseHandler(handler) - ts := httptest.NewServer(n) - defer ts.Close() - - req := testhelpers.MustNewRequest(http.MethodGet, ts.URL, nil) - client := &http.Client{} - res, err := client.Do(req) - assert.NoError(t, err, "there should be no error") - assert.Equal(t, http.StatusForbidden, res.StatusCode, "they should be equal") - - require.Len(t, res.Cookies(), 1) - for _, cookie := range res.Cookies() { - assert.Equal(t, "testing", cookie.Value, "they should be equal") - } - - expectedHeaders := http.Header{ - "Content-Length": []string{"10"}, - "Content-Type": []string{"text/plain; charset=utf-8"}, - "X-Foo": []string{"bar"}, - "Set-Cookie": []string{"example=testing; Path=/"}, - "X-Content-Type-Options": []string{"nosniff"}, - } - - assert.Len(t, res.Header, 6) - for key, value := range expectedHeaders { - assert.Equal(t, value, res.Header[key]) - } - - body, err := ioutil.ReadAll(res.Body) - assert.NoError(t, err, "there should be no error") - assert.Equal(t, "Forbidden\n", string(body), "they should be equal") -} - -func Test_writeHeader(t *testing.T) { - testCases := []struct { - name string - headers map[string]string - trustForwardHeader bool - emptyHost bool - expectedHeaders map[string]string - checkForUnexpectedHeaders bool - }{ - { - name: "trust Forward Header", - headers: map[string]string{ - "Accept": "application/json", - "X-Forwarded-Host": "fii.bir", - }, - trustForwardHeader: true, - expectedHeaders: map[string]string{ - "Accept": "application/json", - "X-Forwarded-Host": "fii.bir", - }, - }, - { - name: "not trust Forward Header", - headers: map[string]string{ - "Accept": "application/json", - "X-Forwarded-Host": "fii.bir", - }, - trustForwardHeader: false, - expectedHeaders: map[string]string{ - "Accept": "application/json", - "X-Forwarded-Host": "foo.bar", - }, - }, - { - name: "trust Forward Header with empty Host", - headers: map[string]string{ - "Accept": "application/json", - "X-Forwarded-Host": "fii.bir", - }, - trustForwardHeader: true, - emptyHost: true, - expectedHeaders: map[string]string{ - "Accept": "application/json", - "X-Forwarded-Host": "fii.bir", - }, - }, - { - name: "not trust Forward Header with empty Host", - headers: map[string]string{ - "Accept": "application/json", - "X-Forwarded-Host": "fii.bir", - }, - trustForwardHeader: false, - emptyHost: true, - expectedHeaders: map[string]string{ - "Accept": "application/json", - "X-Forwarded-Host": "", - }, - }, - { - name: "trust Forward Header with forwarded URI", - headers: map[string]string{ - "Accept": "application/json", - "X-Forwarded-Host": "fii.bir", - "X-Forwarded-Uri": "/forward?q=1", - }, - trustForwardHeader: true, - expectedHeaders: map[string]string{ - "Accept": "application/json", - "X-Forwarded-Host": "fii.bir", - "X-Forwarded-Uri": "/forward?q=1", - }, - }, - { - name: "not trust Forward Header with forward requested URI", - headers: map[string]string{ - "Accept": "application/json", - "X-Forwarded-Host": "fii.bir", - "X-Forwarded-Uri": "/forward?q=1", - }, - trustForwardHeader: false, - expectedHeaders: map[string]string{ - "Accept": "application/json", - "X-Forwarded-Host": "foo.bar", - "X-Forwarded-Uri": "/path?q=1", - }, - }, { - name: "trust Forward Header with forwarded request Method", - headers: map[string]string{ - "X-Forwarded-Method": "OPTIONS", - }, - trustForwardHeader: true, - expectedHeaders: map[string]string{ - "X-Forwarded-Method": "OPTIONS", - }, - }, - { - name: "not trust Forward Header with forward request Method", - headers: map[string]string{ - "X-Forwarded-Method": "OPTIONS", - }, - trustForwardHeader: false, - expectedHeaders: map[string]string{ - "X-Forwarded-Method": "GET", - }, - }, - { - name: "remove hop-by-hop headers", - headers: map[string]string{ - forward.Connection: "Connection", - forward.KeepAlive: "KeepAlive", - forward.ProxyAuthenticate: "ProxyAuthenticate", - forward.ProxyAuthorization: "ProxyAuthorization", - forward.Te: "Te", - forward.Trailers: "Trailers", - forward.TransferEncoding: "TransferEncoding", - forward.Upgrade: "Upgrade", - "X-CustomHeader": "CustomHeader", - }, - trustForwardHeader: false, - expectedHeaders: map[string]string{ - "X-CustomHeader": "CustomHeader", - "X-Forwarded-Proto": "http", - "X-Forwarded-Host": "foo.bar", - "X-Forwarded-Uri": "/path?q=1", - "X-Forwarded-Method": "GET", - }, - checkForUnexpectedHeaders: true, - }, - } - - for _, test := range testCases { - t.Run(test.name, func(t *testing.T) { - - req := testhelpers.MustNewRequest(http.MethodGet, "http://foo.bar/path?q=1", nil) - for key, value := range test.headers { - req.Header.Set(key, value) - } - - if test.emptyHost { - req.Host = "" - } - - forwardReq := testhelpers.MustNewRequest(http.MethodGet, "http://foo.bar/path?q=1", nil) - - writeHeader(req, forwardReq, test.trustForwardHeader) - - actualHeaders := forwardReq.Header - expectedHeaders := test.expectedHeaders - for key, value := range expectedHeaders { - assert.Equal(t, value, actualHeaders.Get(key)) - actualHeaders.Del(key) - } - if test.checkForUnexpectedHeaders { - for key := range actualHeaders { - assert.Fail(t, "Unexpected header found", key) - } - } - }) - } -} diff --git a/old/middlewares/auth/parser.go b/old/middlewares/auth/parser.go deleted file mode 100644 index 885d56fed..000000000 --- a/old/middlewares/auth/parser.go +++ /dev/null @@ -1,48 +0,0 @@ -package auth - -import ( - "fmt" - "strings" - - "github.com/containous/traefik/old/types" -) - -func parserBasicUsers(basic *types.Basic) (map[string]string, error) { - var userStrs []string - if basic.UsersFile != "" { - var err error - if userStrs, err = getLinesFromFile(basic.UsersFile); err != nil { - return nil, err - } - } - userStrs = append(basic.Users, userStrs...) - userMap := make(map[string]string) - for _, user := range userStrs { - split := strings.Split(user, ":") - if len(split) != 2 { - return nil, fmt.Errorf("error parsing Authenticator user: %v", user) - } - userMap[split[0]] = split[1] - } - return userMap, nil -} - -func parserDigestUsers(digest *types.Digest) (map[string]string, error) { - var userStrs []string - if digest.UsersFile != "" { - var err error - if userStrs, err = getLinesFromFile(digest.UsersFile); err != nil { - return nil, err - } - } - userStrs = append(digest.Users, userStrs...) - userMap := make(map[string]string) - for _, user := range userStrs { - split := strings.Split(user, ":") - if len(split) != 3 { - return nil, fmt.Errorf("error parsing Authenticator user: %v", user) - } - userMap[split[0]+":"+split[1]] = split[2] - } - return userMap, nil -} diff --git a/old/middlewares/cbreaker.go b/old/middlewares/cbreaker.go deleted file mode 100644 index a8f534960..000000000 --- a/old/middlewares/cbreaker.go +++ /dev/null @@ -1,40 +0,0 @@ -package middlewares - -import ( - "net/http" - - "github.com/containous/traefik/old/log" - "github.com/containous/traefik/old/middlewares/tracing" - "github.com/vulcand/oxy/cbreaker" -) - -// CircuitBreaker holds the oxy circuit breaker. -type CircuitBreaker struct { - circuitBreaker *cbreaker.CircuitBreaker -} - -// NewCircuitBreaker returns a new CircuitBreaker. -func NewCircuitBreaker(next http.Handler, expression string, options ...cbreaker.CircuitBreakerOption) (*CircuitBreaker, error) { - circuitBreaker, err := cbreaker.New(next, expression, options...) - if err != nil { - return nil, err - } - return &CircuitBreaker{circuitBreaker}, nil -} - -// NewCircuitBreakerOptions returns a new CircuitBreakerOption -func NewCircuitBreakerOptions(expression string) cbreaker.CircuitBreakerOption { - return cbreaker.Fallback(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { - tracing.LogEventf(r, "blocked by circuit-breaker (%q)", expression) - - w.WriteHeader(http.StatusServiceUnavailable) - - if _, err := w.Write([]byte(http.StatusText(http.StatusServiceUnavailable))); err != nil { - log.Error(err) - } - })) -} - -func (cb *CircuitBreaker) ServeHTTP(rw http.ResponseWriter, r *http.Request) { - cb.circuitBreaker.ServeHTTP(rw, r) -} diff --git a/old/middlewares/compress.go b/old/middlewares/compress.go deleted file mode 100644 index 9989e889c..000000000 --- a/old/middlewares/compress.go +++ /dev/null @@ -1,33 +0,0 @@ -package middlewares - -import ( - "compress/gzip" - "net/http" - "strings" - - "github.com/NYTimes/gziphandler" - "github.com/containous/traefik/old/log" -) - -// Compress is a middleware that allows to compress the response -type Compress struct{} - -// ServeHTTP is a function used by Negroni -func (c *Compress) ServeHTTP(rw http.ResponseWriter, r *http.Request, next http.HandlerFunc) { - contentType := r.Header.Get("Content-Type") - if strings.HasPrefix(contentType, "application/grpc") { - next.ServeHTTP(rw, r) - } else { - gzipHandler(next).ServeHTTP(rw, r) - } -} - -func gzipHandler(h http.Handler) http.Handler { - wrapper, err := gziphandler.GzipHandlerWithOpts( - gziphandler.CompressionLevel(gzip.DefaultCompression), - gziphandler.MinSize(gziphandler.DefaultMinSize)) - if err != nil { - log.Error(err) - } - return wrapper(h) -} diff --git a/old/middlewares/compress_test.go b/old/middlewares/compress_test.go deleted file mode 100644 index 06fe20a3c..000000000 --- a/old/middlewares/compress_test.go +++ /dev/null @@ -1,248 +0,0 @@ -package middlewares - -import ( - "io/ioutil" - "net/http" - "net/http/httptest" - "testing" - - "github.com/NYTimes/gziphandler" - "github.com/containous/traefik/pkg/testhelpers" - "github.com/stretchr/testify/assert" - "github.com/stretchr/testify/require" - "github.com/urfave/negroni" -) - -const ( - acceptEncodingHeader = "Accept-Encoding" - contentEncodingHeader = "Content-Encoding" - contentTypeHeader = "Content-Type" - varyHeader = "Vary" - gzipValue = "gzip" -) - -func TestShouldCompressWhenNoContentEncodingHeader(t *testing.T) { - handler := &Compress{} - - req := testhelpers.MustNewRequest(http.MethodGet, "http://localhost", nil) - req.Header.Add(acceptEncodingHeader, gzipValue) - - baseBody := generateBytes(gziphandler.DefaultMinSize) - next := func(rw http.ResponseWriter, r *http.Request) { - _, err := rw.Write(baseBody) - assert.NoError(t, err) - } - - rw := httptest.NewRecorder() - handler.ServeHTTP(rw, req, next) - - assert.Equal(t, gzipValue, rw.Header().Get(contentEncodingHeader)) - assert.Equal(t, acceptEncodingHeader, rw.Header().Get(varyHeader)) - - if assert.ObjectsAreEqualValues(rw.Body.Bytes(), baseBody) { - assert.Fail(t, "expected a compressed body", "got %v", rw.Body.Bytes()) - } -} - -func TestShouldNotCompressWhenContentEncodingHeader(t *testing.T) { - handler := &Compress{} - - req := testhelpers.MustNewRequest(http.MethodGet, "http://localhost", nil) - req.Header.Add(acceptEncodingHeader, gzipValue) - - fakeCompressedBody := generateBytes(gziphandler.DefaultMinSize) - next := func(rw http.ResponseWriter, r *http.Request) { - rw.Header().Add(contentEncodingHeader, gzipValue) - rw.Header().Add(varyHeader, acceptEncodingHeader) - rw.Write(fakeCompressedBody) - } - - rw := httptest.NewRecorder() - handler.ServeHTTP(rw, req, next) - - assert.Equal(t, gzipValue, rw.Header().Get(contentEncodingHeader)) - assert.Equal(t, acceptEncodingHeader, rw.Header().Get(varyHeader)) - - assert.EqualValues(t, rw.Body.Bytes(), fakeCompressedBody) -} - -func TestShouldNotCompressWhenNoAcceptEncodingHeader(t *testing.T) { - handler := &Compress{} - - req := testhelpers.MustNewRequest(http.MethodGet, "http://localhost", nil) - - fakeBody := generateBytes(gziphandler.DefaultMinSize) - next := func(rw http.ResponseWriter, r *http.Request) { - rw.Write(fakeBody) - } - - rw := httptest.NewRecorder() - handler.ServeHTTP(rw, req, next) - - assert.Empty(t, rw.Header().Get(contentEncodingHeader)) - assert.EqualValues(t, rw.Body.Bytes(), fakeBody) -} - -func TestShouldNotCompressWhenGRPC(t *testing.T) { - handler := &Compress{} - - req := testhelpers.MustNewRequest(http.MethodGet, "http://localhost", nil) - req.Header.Add(acceptEncodingHeader, gzipValue) - req.Header.Add(contentTypeHeader, "application/grpc") - - baseBody := generateBytes(gziphandler.DefaultMinSize) - next := func(rw http.ResponseWriter, r *http.Request) { - rw.Write(baseBody) - } - - rw := httptest.NewRecorder() - handler.ServeHTTP(rw, req, next) - - assert.Empty(t, rw.Header().Get(acceptEncodingHeader)) - assert.Empty(t, rw.Header().Get(contentEncodingHeader)) - assert.EqualValues(t, rw.Body.Bytes(), baseBody) -} - -func TestIntegrationShouldNotCompress(t *testing.T) { - fakeCompressedBody := generateBytes(100000) - comp := &Compress{} - - testCases := []struct { - name string - handler func(rw http.ResponseWriter, r *http.Request) - expectedStatusCode int - }{ - { - name: "when content already compressed", - handler: func(rw http.ResponseWriter, r *http.Request) { - rw.Header().Add(contentEncodingHeader, gzipValue) - rw.Header().Add(varyHeader, acceptEncodingHeader) - rw.Write(fakeCompressedBody) - }, - expectedStatusCode: http.StatusOK, - }, - { - name: "when content already compressed and status code Created", - handler: func(rw http.ResponseWriter, r *http.Request) { - rw.Header().Add(contentEncodingHeader, gzipValue) - rw.Header().Add(varyHeader, acceptEncodingHeader) - rw.WriteHeader(http.StatusCreated) - rw.Write(fakeCompressedBody) - }, - expectedStatusCode: http.StatusCreated, - }, - } - - for _, test := range testCases { - - t.Run(test.name, func(t *testing.T) { - negro := negroni.New(comp) - negro.UseHandlerFunc(test.handler) - ts := httptest.NewServer(negro) - defer ts.Close() - - req := testhelpers.MustNewRequest(http.MethodGet, ts.URL, nil) - req.Header.Add(acceptEncodingHeader, gzipValue) - - resp, err := http.DefaultClient.Do(req) - require.NoError(t, err) - - assert.Equal(t, test.expectedStatusCode, resp.StatusCode) - - assert.Equal(t, gzipValue, resp.Header.Get(contentEncodingHeader)) - assert.Equal(t, acceptEncodingHeader, resp.Header.Get(varyHeader)) - - body, err := ioutil.ReadAll(resp.Body) - require.NoError(t, err) - assert.EqualValues(t, fakeCompressedBody, body) - }) - } -} - -func TestShouldWriteHeaderWhenFlush(t *testing.T) { - comp := &Compress{} - negro := negroni.New(comp) - negro.UseHandlerFunc(func(rw http.ResponseWriter, r *http.Request) { - rw.Header().Add(contentEncodingHeader, gzipValue) - rw.Header().Add(varyHeader, acceptEncodingHeader) - rw.WriteHeader(http.StatusUnauthorized) - rw.(http.Flusher).Flush() - rw.Write([]byte("short")) - }) - ts := httptest.NewServer(negro) - defer ts.Close() - - req := testhelpers.MustNewRequest(http.MethodGet, ts.URL, nil) - req.Header.Add(acceptEncodingHeader, gzipValue) - - resp, err := http.DefaultClient.Do(req) - require.NoError(t, err) - - assert.Equal(t, http.StatusUnauthorized, resp.StatusCode) - - assert.Equal(t, gzipValue, resp.Header.Get(contentEncodingHeader)) - assert.Equal(t, acceptEncodingHeader, resp.Header.Get(varyHeader)) -} - -func TestIntegrationShouldCompress(t *testing.T) { - fakeBody := generateBytes(100000) - - testCases := []struct { - name string - handler func(rw http.ResponseWriter, r *http.Request) - expectedStatusCode int - }{ - { - name: "when AcceptEncoding header is present", - handler: func(rw http.ResponseWriter, r *http.Request) { - rw.Write(fakeBody) - }, - expectedStatusCode: http.StatusOK, - }, - { - name: "when AcceptEncoding header is present and status code Created", - handler: func(rw http.ResponseWriter, r *http.Request) { - rw.WriteHeader(http.StatusCreated) - rw.Write(fakeBody) - }, - expectedStatusCode: http.StatusCreated, - }, - } - - for _, test := range testCases { - - t.Run(test.name, func(t *testing.T) { - comp := &Compress{} - - negro := negroni.New(comp) - negro.UseHandlerFunc(test.handler) - ts := httptest.NewServer(negro) - defer ts.Close() - - req := testhelpers.MustNewRequest(http.MethodGet, ts.URL, nil) - req.Header.Add(acceptEncodingHeader, gzipValue) - - resp, err := http.DefaultClient.Do(req) - require.NoError(t, err) - - assert.Equal(t, test.expectedStatusCode, resp.StatusCode) - - assert.Equal(t, gzipValue, resp.Header.Get(contentEncodingHeader)) - assert.Equal(t, acceptEncodingHeader, resp.Header.Get(varyHeader)) - - body, err := ioutil.ReadAll(resp.Body) - require.NoError(t, err) - if assert.ObjectsAreEqualValues(body, fakeBody) { - assert.Fail(t, "expected a compressed body", "got %v", body) - } - }) - } -} - -func generateBytes(len int) []byte { - var value []byte - for i := 0; i < len; i++ { - value = append(value, 0x61+byte(i)) - } - return value -} diff --git a/old/middlewares/empty_backend_handler.go b/old/middlewares/empty_backend_handler.go deleted file mode 100644 index 7e6fe1196..000000000 --- a/old/middlewares/empty_backend_handler.go +++ /dev/null @@ -1,30 +0,0 @@ -package middlewares - -import ( - "net/http" - - "github.com/containous/traefik/pkg/healthcheck" -) - -// EmptyBackendHandler is a middlware that checks whether the current Backend -// has at least one active Server in respect to the healthchecks and if this -// is not the case, it will stop the middleware chain and respond with 503. -type EmptyBackendHandler struct { - next healthcheck.BalancerHandler -} - -// NewEmptyBackendHandler creates a new EmptyBackendHandler instance. -func NewEmptyBackendHandler(lb healthcheck.BalancerHandler) *EmptyBackendHandler { - return &EmptyBackendHandler{next: lb} -} - -// ServeHTTP responds with 503 when there is no active Server and otherwise -// invokes the next handler in the middleware chain. -func (h *EmptyBackendHandler) ServeHTTP(rw http.ResponseWriter, r *http.Request) { - if len(h.next.Servers()) == 0 { - rw.WriteHeader(http.StatusServiceUnavailable) - rw.Write([]byte(http.StatusText(http.StatusServiceUnavailable))) - } else { - h.next.ServeHTTP(rw, r) - } -} diff --git a/old/middlewares/empty_backend_handler_test.go b/old/middlewares/empty_backend_handler_test.go deleted file mode 100644 index 224e68a98..000000000 --- a/old/middlewares/empty_backend_handler_test.go +++ /dev/null @@ -1,83 +0,0 @@ -package middlewares - -import ( - "fmt" - "net/http" - "net/http/httptest" - "net/url" - "testing" - - "github.com/containous/traefik/pkg/testhelpers" - "github.com/vulcand/oxy/roundrobin" -) - -func TestEmptyBackendHandler(t *testing.T) { - tests := []struct { - amountServer int - wantStatusCode int - }{ - { - amountServer: 0, - wantStatusCode: http.StatusServiceUnavailable, - }, - { - amountServer: 1, - wantStatusCode: http.StatusOK, - }, - } - - for _, test := range tests { - test := test - - t.Run(fmt.Sprintf("amount servers %d", test.amountServer), func(t *testing.T) { - t.Parallel() - - handler := NewEmptyBackendHandler(&healthCheckLoadBalancer{test.amountServer}) - - recorder := httptest.NewRecorder() - req := httptest.NewRequest(http.MethodGet, "http://localhost", nil) - - handler.ServeHTTP(recorder, req) - - if recorder.Result().StatusCode != test.wantStatusCode { - t.Errorf("Received status code %d, wanted %d", recorder.Result().StatusCode, test.wantStatusCode) - } - }) - } -} - -type healthCheckLoadBalancer struct { - amountServer int -} - -func (lb *healthCheckLoadBalancer) ServeHTTP(w http.ResponseWriter, r *http.Request) { - w.WriteHeader(http.StatusOK) -} - -func (lb *healthCheckLoadBalancer) Servers() []*url.URL { - servers := make([]*url.URL, lb.amountServer) - for i := 0; i < lb.amountServer; i++ { - servers = append(servers, testhelpers.MustParseURL("http://localhost")) - } - return servers -} - -func (lb *healthCheckLoadBalancer) RemoveServer(u *url.URL) error { - return nil -} - -func (lb *healthCheckLoadBalancer) UpsertServer(u *url.URL, options ...roundrobin.ServerOption) error { - return nil -} - -func (lb *healthCheckLoadBalancer) ServerWeight(u *url.URL) (int, bool) { - return 0, false -} - -func (lb *healthCheckLoadBalancer) NextServer() (*url.URL, error) { - return nil, nil -} - -func (lb *healthCheckLoadBalancer) Next() http.Handler { - return nil -} diff --git a/old/middlewares/errorpages/error_pages.go b/old/middlewares/errorpages/error_pages.go deleted file mode 100644 index a5dcd584b..000000000 --- a/old/middlewares/errorpages/error_pages.go +++ /dev/null @@ -1,236 +0,0 @@ -package errorpages - -import ( - "bufio" - "bytes" - "errors" - "fmt" - "net" - "net/http" - "net/url" - "strconv" - "strings" - - "github.com/containous/traefik/old/log" - "github.com/containous/traefik/old/middlewares" - "github.com/containous/traefik/old/types" - "github.com/vulcand/oxy/forward" - "github.com/vulcand/oxy/utils" -) - -// Compile time validation that the response recorder implements http interfaces correctly. -var _ middlewares.Stateful = &responseRecorderWithCloseNotify{} - -// Handler is a middleware that provides the custom error pages -type Handler struct { - BackendName string - backendHandler http.Handler - httpCodeRanges types.HTTPCodeRanges - backendURL string - backendQuery string - FallbackURL string // Deprecated -} - -// NewHandler initializes the utils.ErrorHandler for the custom error pages -func NewHandler(errorPage *types.ErrorPage, backendName string) (*Handler, error) { - if len(backendName) == 0 { - return nil, errors.New("error pages: backend name is mandatory ") - } - - httpCodeRanges, err := types.NewHTTPCodeRanges(errorPage.Status) - if err != nil { - return nil, err - } - - return &Handler{ - BackendName: backendName, - httpCodeRanges: httpCodeRanges, - backendQuery: errorPage.Query, - backendURL: "http://0.0.0.0", - }, nil -} - -// PostLoad adds backend handler if available -func (h *Handler) PostLoad(backendHandler http.Handler) error { - if backendHandler == nil { - fwd, err := forward.New() - if err != nil { - return err - } - - h.backendHandler = fwd - h.backendURL = h.FallbackURL - } else { - h.backendHandler = backendHandler - } - - return nil -} - -func (h *Handler) ServeHTTP(w http.ResponseWriter, req *http.Request, next http.HandlerFunc) { - if h.backendHandler == nil { - log.Error("Error pages: no backend handler.") - next.ServeHTTP(w, req) - return - } - - recorder := newResponseRecorder(w) - next.ServeHTTP(recorder, req) - - // check the recorder code against the configured http status code ranges - for _, block := range h.httpCodeRanges { - if recorder.GetCode() >= block[0] && recorder.GetCode() <= block[1] { - log.Errorf("Caught HTTP Status Code %d, returning error page", recorder.GetCode()) - - var query string - if len(h.backendQuery) > 0 { - query = "/" + strings.TrimPrefix(h.backendQuery, "/") - query = strings.Replace(query, "{status}", strconv.Itoa(recorder.GetCode()), -1) - } - - pageReq, err := newRequest(h.backendURL + query) - if err != nil { - log.Error(err) - w.WriteHeader(recorder.GetCode()) - fmt.Fprint(w, http.StatusText(recorder.GetCode())) - return - } - - recorderErrorPage := newResponseRecorder(w) - utils.CopyHeaders(pageReq.Header, req.Header) - - h.backendHandler.ServeHTTP(recorderErrorPage, pageReq.WithContext(req.Context())) - - utils.CopyHeaders(w.Header(), recorderErrorPage.Header()) - w.WriteHeader(recorder.GetCode()) - - if _, err = w.Write(recorderErrorPage.GetBody().Bytes()); err != nil { - log.Error(err) - } - return - } - } - - // did not catch a configured status code so proceed with the request - utils.CopyHeaders(w.Header(), recorder.Header()) - w.WriteHeader(recorder.GetCode()) - w.Write(recorder.GetBody().Bytes()) -} - -func newRequest(baseURL string) (*http.Request, error) { - u, err := url.Parse(baseURL) - if err != nil { - return nil, fmt.Errorf("error pages: error when parse URL: %v", err) - } - - req, err := http.NewRequest(http.MethodGet, u.String(), http.NoBody) - if err != nil { - return nil, fmt.Errorf("error pages: error when create query: %v", err) - } - - req.RequestURI = u.RequestURI() - return req, nil -} - -type responseRecorder interface { - http.ResponseWriter - http.Flusher - GetCode() int - GetBody() *bytes.Buffer - IsStreamingResponseStarted() bool -} - -// newResponseRecorder returns an initialized responseRecorder. -func newResponseRecorder(rw http.ResponseWriter) responseRecorder { - recorder := &responseRecorderWithoutCloseNotify{ - HeaderMap: make(http.Header), - Body: new(bytes.Buffer), - Code: http.StatusOK, - responseWriter: rw, - } - if _, ok := rw.(http.CloseNotifier); ok { - return &responseRecorderWithCloseNotify{recorder} - } - return recorder -} - -// responseRecorderWithoutCloseNotify is an implementation of http.ResponseWriter that -// records its mutations for later inspection. -type responseRecorderWithoutCloseNotify struct { - Code int // the HTTP response code from WriteHeader - HeaderMap http.Header // the HTTP response headers - Body *bytes.Buffer // if non-nil, the bytes.Buffer to append written data to - - responseWriter http.ResponseWriter - err error - streamingResponseStarted bool -} - -type responseRecorderWithCloseNotify struct { - *responseRecorderWithoutCloseNotify -} - -// CloseNotify returns a channel that receives at most a -// single value (true) when the client connection has gone away. -func (r *responseRecorderWithCloseNotify) CloseNotify() <-chan bool { - return r.responseWriter.(http.CloseNotifier).CloseNotify() -} - -// Header returns the response headers. -func (r *responseRecorderWithoutCloseNotify) Header() http.Header { - if r.HeaderMap == nil { - r.HeaderMap = make(http.Header) - } - - return r.HeaderMap -} - -func (r *responseRecorderWithoutCloseNotify) GetCode() int { - return r.Code -} - -func (r *responseRecorderWithoutCloseNotify) GetBody() *bytes.Buffer { - return r.Body -} - -func (r *responseRecorderWithoutCloseNotify) IsStreamingResponseStarted() bool { - return r.streamingResponseStarted -} - -// Write always succeeds and writes to rw.Body, if not nil. -func (r *responseRecorderWithoutCloseNotify) Write(buf []byte) (int, error) { - if r.err != nil { - return 0, r.err - } - return r.Body.Write(buf) -} - -// WriteHeader sets rw.Code. -func (r *responseRecorderWithoutCloseNotify) WriteHeader(code int) { - r.Code = code -} - -// Hijack hijacks the connection -func (r *responseRecorderWithoutCloseNotify) Hijack() (net.Conn, *bufio.ReadWriter, error) { - return r.responseWriter.(http.Hijacker).Hijack() -} - -// Flush sends any buffered data to the client. -func (r *responseRecorderWithoutCloseNotify) Flush() { - if !r.streamingResponseStarted { - utils.CopyHeaders(r.responseWriter.Header(), r.Header()) - r.responseWriter.WriteHeader(r.Code) - r.streamingResponseStarted = true - } - - _, err := r.responseWriter.Write(r.Body.Bytes()) - if err != nil { - log.Errorf("Error writing response in responseRecorder: %v", err) - r.err = err - } - r.Body.Reset() - - if flusher, ok := r.responseWriter.(http.Flusher); ok { - flusher.Flush() - } -} diff --git a/old/middlewares/errorpages/error_pages_test.go b/old/middlewares/errorpages/error_pages_test.go deleted file mode 100644 index 20623c97a..000000000 --- a/old/middlewares/errorpages/error_pages_test.go +++ /dev/null @@ -1,384 +0,0 @@ -package errorpages - -import ( - "fmt" - "net/http" - "net/http/httptest" - "strconv" - "testing" - - "github.com/containous/traefik/old/types" - "github.com/containous/traefik/pkg/testhelpers" - "github.com/stretchr/testify/assert" - "github.com/stretchr/testify/require" - "github.com/urfave/negroni" -) - -func TestHandler(t *testing.T) { - testCases := []struct { - desc string - errorPage *types.ErrorPage - backendCode int - backendErrorHandler http.HandlerFunc - validate func(t *testing.T, recorder *httptest.ResponseRecorder) - }{ - { - desc: "no error", - errorPage: &types.ErrorPage{Backend: "error", Query: "/test", Status: []string{"500-501", "503-599"}}, - backendCode: http.StatusOK, - backendErrorHandler: http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { - fmt.Fprintln(w, "My error page.") - }), - validate: func(t *testing.T, recorder *httptest.ResponseRecorder) { - assert.Equal(t, http.StatusOK, recorder.Code, "HTTP status") - assert.Contains(t, recorder.Body.String(), http.StatusText(http.StatusOK)) - }, - }, - { - desc: "in the range", - errorPage: &types.ErrorPage{Backend: "error", Query: "/test", Status: []string{"500-501", "503-599"}}, - backendCode: http.StatusInternalServerError, - backendErrorHandler: http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { - fmt.Fprintln(w, "My error page.") - }), - validate: func(t *testing.T, recorder *httptest.ResponseRecorder) { - assert.Equal(t, http.StatusInternalServerError, recorder.Code, "HTTP status") - assert.Contains(t, recorder.Body.String(), "My error page.") - assert.NotContains(t, recorder.Body.String(), "oops", "Should not return the oops page") - }, - }, - { - desc: "not in the range", - errorPage: &types.ErrorPage{Backend: "error", Query: "/test", Status: []string{"500-501", "503-599"}}, - backendCode: http.StatusBadGateway, - backendErrorHandler: http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { - fmt.Fprintln(w, "My error page.") - }), - validate: func(t *testing.T, recorder *httptest.ResponseRecorder) { - assert.Equal(t, http.StatusBadGateway, recorder.Code, "HTTP status") - assert.Contains(t, recorder.Body.String(), http.StatusText(http.StatusBadGateway)) - assert.NotContains(t, recorder.Body.String(), "Test Server", "Should return the oops page since we have not configured the 502 code") - }, - }, - { - desc: "query replacement", - errorPage: &types.ErrorPage{Backend: "error", Query: "/{status}", Status: []string{"503-503"}}, - backendCode: http.StatusServiceUnavailable, - backendErrorHandler: http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { - if r.RequestURI == "/503" { - fmt.Fprintln(w, "My 503 page.") - } else { - fmt.Fprintln(w, "Failed") - } - }), - validate: func(t *testing.T, recorder *httptest.ResponseRecorder) { - assert.Equal(t, http.StatusServiceUnavailable, recorder.Code, "HTTP status") - assert.Contains(t, recorder.Body.String(), "My 503 page.") - assert.NotContains(t, recorder.Body.String(), "oops", "Should not return the oops page") - }, - }, - { - desc: "Single code", - errorPage: &types.ErrorPage{Backend: "error", Query: "/{status}", Status: []string{"503"}}, - backendCode: http.StatusServiceUnavailable, - backendErrorHandler: http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { - if r.RequestURI == "/503" { - fmt.Fprintln(w, "My 503 page.") - } else { - fmt.Fprintln(w, "Failed") - } - }), - validate: func(t *testing.T, recorder *httptest.ResponseRecorder) { - assert.Equal(t, http.StatusServiceUnavailable, recorder.Code, "HTTP status") - assert.Contains(t, recorder.Body.String(), "My 503 page.") - assert.NotContains(t, recorder.Body.String(), "oops", "Should not return the oops page") - }, - }, - } - - for _, test := range testCases { - test := test - t.Run(test.desc, func(t *testing.T) { - t.Parallel() - - errorPageHandler, err := NewHandler(test.errorPage, "test") - require.NoError(t, err) - - errorPageHandler.backendHandler = test.backendErrorHandler - - handler := http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { - w.WriteHeader(test.backendCode) - fmt.Fprintln(w, http.StatusText(test.backendCode)) - }) - - req := testhelpers.MustNewRequest(http.MethodGet, "http://localhost/test", nil) - - n := negroni.New() - n.Use(errorPageHandler) - n.UseHandler(handler) - - recorder := httptest.NewRecorder() - n.ServeHTTP(recorder, req) - - test.validate(t, recorder) - }) - } -} - -func TestHandlerOldWay(t *testing.T) { - testCases := []struct { - desc string - errorPage *types.ErrorPage - backendCode int - errorPageForwarder http.HandlerFunc - validate func(t *testing.T, recorder *httptest.ResponseRecorder) - }{ - { - desc: "no error", - errorPage: &types.ErrorPage{Backend: "error", Query: "/test", Status: []string{"500-501", "503-599"}}, - backendCode: http.StatusOK, - errorPageForwarder: http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { - fmt.Fprintln(w, "My error page.") - }), - validate: func(t *testing.T, recorder *httptest.ResponseRecorder) { - assert.Equal(t, http.StatusOK, recorder.Code, "HTTP status") - assert.Contains(t, recorder.Body.String(), "OK") - }, - }, - { - desc: "in the range", - errorPage: &types.ErrorPage{Backend: "error", Query: "/test", Status: []string{"500-501", "503-599"}}, - backendCode: http.StatusInternalServerError, - errorPageForwarder: http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { - fmt.Fprintln(w, "My error page.") - }), - validate: func(t *testing.T, recorder *httptest.ResponseRecorder) { - assert.Equal(t, http.StatusInternalServerError, recorder.Code) - assert.Contains(t, recorder.Body.String(), "My error page.") - assert.NotContains(t, recorder.Body.String(), http.StatusText(http.StatusInternalServerError), "Should not return the oops page") - }, - }, - { - desc: "not in the range", - errorPage: &types.ErrorPage{Backend: "error", Query: "/test", Status: []string{"500-501", "503-599"}}, - backendCode: http.StatusBadGateway, - errorPageForwarder: http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { - fmt.Fprintln(w, "My error page.") - }), - validate: func(t *testing.T, recorder *httptest.ResponseRecorder) { - assert.Equal(t, http.StatusBadGateway, recorder.Code) - assert.Contains(t, recorder.Body.String(), http.StatusText(http.StatusBadGateway)) - assert.NotContains(t, recorder.Body.String(), "My error page.", "Should return the oops page since we have not configured the 502 code") - }, - }, - { - desc: "query replacement", - errorPage: &types.ErrorPage{Backend: "error", Query: "/{status}", Status: []string{"503-503"}}, - backendCode: http.StatusServiceUnavailable, - errorPageForwarder: http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { - if r.URL.RequestURI() == "/"+strconv.Itoa(503) { - fmt.Fprintln(w, "My 503 page.") - } else { - fmt.Fprintln(w, "Failed") - } - }), - validate: func(t *testing.T, recorder *httptest.ResponseRecorder) { - assert.Equal(t, http.StatusServiceUnavailable, recorder.Code, "HTTP status") - assert.Contains(t, recorder.Body.String(), "My 503 page.") - assert.NotContains(t, recorder.Body.String(), "oops", "Should not return the oops page") - }, - }, - { - desc: "Single code", - errorPage: &types.ErrorPage{Backend: "error", Query: "/{status}", Status: []string{"503"}}, - backendCode: http.StatusServiceUnavailable, - errorPageForwarder: http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { - if r.URL.RequestURI() == "/"+strconv.Itoa(503) { - fmt.Fprintln(w, "My 503 page.") - } else { - fmt.Fprintln(w, "Failed") - } - }), - validate: func(t *testing.T, recorder *httptest.ResponseRecorder) { - assert.Equal(t, http.StatusServiceUnavailable, recorder.Code, "HTTP status") - assert.Contains(t, recorder.Body.String(), "My 503 page.") - assert.NotContains(t, recorder.Body.String(), "oops", "Should not return the oops page") - }, - }, - } - - req := testhelpers.MustNewRequest(http.MethodGet, "http://localhost/test", nil) - - for _, test := range testCases { - test := test - t.Run(test.desc, func(t *testing.T) { - t.Parallel() - - errorPageHandler, err := NewHandler(test.errorPage, "test") - require.NoError(t, err) - errorPageHandler.FallbackURL = "http://localhost" - - err = errorPageHandler.PostLoad(test.errorPageForwarder) - require.NoError(t, err) - - handler := http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { - w.WriteHeader(test.backendCode) - fmt.Fprintln(w, http.StatusText(test.backendCode)) - }) - - n := negroni.New() - n.Use(errorPageHandler) - n.UseHandler(handler) - - recorder := httptest.NewRecorder() - n.ServeHTTP(recorder, req) - - test.validate(t, recorder) - }) - } -} - -func TestHandlerOldWayIntegration(t *testing.T) { - errorPagesServer := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { - if r.URL.RequestURI() == "/503" { - fmt.Fprintln(w, "My 503 page.") - } else { - fmt.Fprintln(w, "Test Server") - } - })) - defer errorPagesServer.Close() - - testCases := []struct { - desc string - errorPage *types.ErrorPage - backendCode int - validate func(t *testing.T, recorder *httptest.ResponseRecorder) - }{ - { - desc: "no error", - errorPage: &types.ErrorPage{Backend: "error", Query: "/test", Status: []string{"500-501", "503-599"}}, - backendCode: http.StatusOK, - validate: func(t *testing.T, recorder *httptest.ResponseRecorder) { - assert.Equal(t, http.StatusOK, recorder.Code, "HTTP status") - assert.Contains(t, recorder.Body.String(), "OK") - }, - }, - { - desc: "in the range", - errorPage: &types.ErrorPage{Backend: "error", Query: "/test", Status: []string{"500-501", "503-599"}}, - backendCode: http.StatusInternalServerError, - validate: func(t *testing.T, recorder *httptest.ResponseRecorder) { - assert.Equal(t, http.StatusInternalServerError, recorder.Code) - assert.Contains(t, recorder.Body.String(), "Test Server") - assert.NotContains(t, recorder.Body.String(), http.StatusText(http.StatusInternalServerError), "Should not return the oops page") - }, - }, - { - desc: "not in the range", - errorPage: &types.ErrorPage{Backend: "error", Query: "/test", Status: []string{"500-501", "503-599"}}, - backendCode: http.StatusBadGateway, - validate: func(t *testing.T, recorder *httptest.ResponseRecorder) { - assert.Equal(t, http.StatusBadGateway, recorder.Code) - assert.Contains(t, recorder.Body.String(), http.StatusText(http.StatusBadGateway)) - assert.NotContains(t, recorder.Body.String(), "Test Server", "Should return the oops page since we have not configured the 502 code") - }, - }, - { - desc: "query replacement", - errorPage: &types.ErrorPage{Backend: "error", Query: "/{status}", Status: []string{"503-503"}}, - backendCode: http.StatusServiceUnavailable, - validate: func(t *testing.T, recorder *httptest.ResponseRecorder) { - assert.Equal(t, http.StatusServiceUnavailable, recorder.Code, "HTTP status") - assert.Contains(t, recorder.Body.String(), "My 503 page.") - assert.NotContains(t, recorder.Body.String(), "oops", "Should not return the oops page") - }, - }, - { - desc: "Single code", - errorPage: &types.ErrorPage{Backend: "error", Query: "/{status}", Status: []string{"503"}}, - backendCode: http.StatusServiceUnavailable, - validate: func(t *testing.T, recorder *httptest.ResponseRecorder) { - assert.Equal(t, http.StatusServiceUnavailable, recorder.Code, "HTTP status") - assert.Contains(t, recorder.Body.String(), "My 503 page.") - assert.NotContains(t, recorder.Body.String(), "oops", "Should not return the oops page") - }, - }, - } - - req := testhelpers.MustNewRequest(http.MethodGet, errorPagesServer.URL+"/test", nil) - - for _, test := range testCases { - test := test - t.Run(test.desc, func(t *testing.T) { - - errorPageHandler, err := NewHandler(test.errorPage, "test") - require.NoError(t, err) - errorPageHandler.FallbackURL = errorPagesServer.URL - - err = errorPageHandler.PostLoad(nil) - require.NoError(t, err) - - handler := http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { - w.WriteHeader(test.backendCode) - fmt.Fprintln(w, http.StatusText(test.backendCode)) - }) - - n := negroni.New() - n.Use(errorPageHandler) - n.UseHandler(handler) - - recorder := httptest.NewRecorder() - n.ServeHTTP(recorder, req) - - test.validate(t, recorder) - }) - } -} - -func TestNewResponseRecorder(t *testing.T) { - testCases := []struct { - desc string - rw http.ResponseWriter - expected http.ResponseWriter - }{ - { - desc: "Without Close Notify", - rw: httptest.NewRecorder(), - expected: &responseRecorderWithoutCloseNotify{}, - }, - { - desc: "With Close Notify", - rw: &mockRWCloseNotify{}, - expected: &responseRecorderWithCloseNotify{}, - }, - } - - for _, test := range testCases { - test := test - t.Run(test.desc, func(t *testing.T) { - t.Parallel() - - rec := newResponseRecorder(test.rw) - - assert.IsType(t, rec, test.expected) - }) - } -} - -type mockRWCloseNotify struct{} - -func (m *mockRWCloseNotify) CloseNotify() <-chan bool { - panic("implement me") -} - -func (m *mockRWCloseNotify) Header() http.Header { - panic("implement me") -} - -func (m *mockRWCloseNotify) Write([]byte) (int, error) { - panic("implement me") -} - -func (m *mockRWCloseNotify) WriteHeader(int) { - panic("implement me") -} diff --git a/old/middlewares/forwardedheaders/forwarded_header.go b/old/middlewares/forwardedheaders/forwarded_header.go deleted file mode 100644 index 7584e1052..000000000 --- a/old/middlewares/forwardedheaders/forwarded_header.go +++ /dev/null @@ -1,52 +0,0 @@ -package forwardedheaders - -import ( - "net/http" - - "github.com/containous/traefik/pkg/ip" - "github.com/vulcand/oxy/forward" - "github.com/vulcand/oxy/utils" -) - -// XForwarded filter for XForwarded headers -type XForwarded struct { - insecure bool - trustedIps []string - ipChecker *ip.Checker -} - -// NewXforwarded creates a new XForwarded -func NewXforwarded(insecure bool, trustedIps []string) (*XForwarded, error) { - var ipChecker *ip.Checker - if len(trustedIps) > 0 { - var err error - ipChecker, err = ip.NewChecker(trustedIps) - if err != nil { - return nil, err - } - } - - return &XForwarded{ - insecure: insecure, - trustedIps: trustedIps, - ipChecker: ipChecker, - }, nil -} - -func (x *XForwarded) isTrustedIP(ip string) bool { - if x.ipChecker == nil { - return false - } - return x.ipChecker.IsAuthorized(ip) == nil -} - -func (x *XForwarded) ServeHTTP(w http.ResponseWriter, r *http.Request, next http.HandlerFunc) { - if !x.insecure && !x.isTrustedIP(r.RemoteAddr) { - utils.RemoveHeaders(r.Header, forward.XHeaders...) - } - - // If there is a next, call it. - if next != nil { - next(w, r) - } -} diff --git a/old/middlewares/forwardedheaders/forwarded_header_test.go b/old/middlewares/forwardedheaders/forwarded_header_test.go deleted file mode 100644 index f59107798..000000000 --- a/old/middlewares/forwardedheaders/forwarded_header_test.go +++ /dev/null @@ -1,128 +0,0 @@ -package forwardedheaders - -import ( - "net/http" - "testing" - - "github.com/stretchr/testify/assert" - "github.com/stretchr/testify/require" -) - -func TestServeHTTP(t *testing.T) { - testCases := []struct { - desc string - insecure bool - trustedIps []string - incomingHeaders map[string]string - remoteAddr string - expectedHeaders map[string]string - }{ - { - desc: "all Empty", - insecure: true, - trustedIps: nil, - remoteAddr: "", - incomingHeaders: map[string]string{}, - expectedHeaders: map[string]string{ - "X-Forwarded-for": "", - }, - }, - { - desc: "insecure true with incoming X-Forwarded-For", - insecure: true, - trustedIps: nil, - remoteAddr: "", - incomingHeaders: map[string]string{ - "X-Forwarded-for": "10.0.1.0, 10.0.1.12", - }, - expectedHeaders: map[string]string{ - "X-Forwarded-for": "10.0.1.0, 10.0.1.12", - }, - }, - { - desc: "insecure false with incoming X-Forwarded-For", - insecure: false, - trustedIps: nil, - remoteAddr: "", - incomingHeaders: map[string]string{ - "X-Forwarded-for": "10.0.1.0, 10.0.1.12", - }, - expectedHeaders: map[string]string{ - "X-Forwarded-for": "", - }, - }, - { - desc: "insecure false with incoming X-Forwarded-For and valid Trusted Ips", - insecure: false, - trustedIps: []string{"10.0.1.100"}, - remoteAddr: "10.0.1.100:80", - incomingHeaders: map[string]string{ - "X-Forwarded-for": "10.0.1.0, 10.0.1.12", - }, - expectedHeaders: map[string]string{ - "X-Forwarded-for": "10.0.1.0, 10.0.1.12", - }, - }, - { - desc: "insecure false with incoming X-Forwarded-For and invalid Trusted Ips", - insecure: false, - trustedIps: []string{"10.0.1.100"}, - remoteAddr: "10.0.1.101:80", - incomingHeaders: map[string]string{ - "X-Forwarded-for": "10.0.1.0, 10.0.1.12", - }, - expectedHeaders: map[string]string{ - "X-Forwarded-for": "", - }, - }, - { - desc: "insecure false with incoming X-Forwarded-For and valid Trusted Ips CIDR", - insecure: false, - trustedIps: []string{"1.2.3.4/24"}, - remoteAddr: "1.2.3.156:80", - incomingHeaders: map[string]string{ - "X-Forwarded-for": "10.0.1.0, 10.0.1.12", - }, - expectedHeaders: map[string]string{ - "X-Forwarded-for": "10.0.1.0, 10.0.1.12", - }, - }, - { - desc: "insecure false with incoming X-Forwarded-For and invalid Trusted Ips CIDR", - insecure: false, - trustedIps: []string{"1.2.3.4/24"}, - remoteAddr: "10.0.1.101:80", - incomingHeaders: map[string]string{ - "X-Forwarded-for": "10.0.1.0, 10.0.1.12", - }, - expectedHeaders: map[string]string{ - "X-Forwarded-for": "", - }, - }, - } - - for _, test := range testCases { - test := test - t.Run(test.desc, func(t *testing.T) { - t.Parallel() - - req, err := http.NewRequest(http.MethodGet, "", nil) - require.NoError(t, err) - - req.RemoteAddr = test.remoteAddr - - for k, v := range test.incomingHeaders { - req.Header.Set(k, v) - } - - m, err := NewXforwarded(test.insecure, test.trustedIps) - require.NoError(t, err) - - m.ServeHTTP(nil, req, nil) - - for k, v := range test.expectedHeaders { - assert.Equal(t, v, req.Header.Get(k)) - } - }) - } -} diff --git a/old/middlewares/handlerSwitcher.go b/old/middlewares/handlerSwitcher.go deleted file mode 100644 index bbc045107..000000000 --- a/old/middlewares/handlerSwitcher.go +++ /dev/null @@ -1,36 +0,0 @@ -package middlewares - -import ( - "net/http" - - "github.com/containous/mux" - "github.com/containous/traefik/pkg/safe" -) - -// HandlerSwitcher allows hot switching of http.ServeMux -type HandlerSwitcher struct { - handler *safe.Safe -} - -// NewHandlerSwitcher builds a new instance of HandlerSwitcher -func NewHandlerSwitcher(newHandler *mux.Router) (hs *HandlerSwitcher) { - return &HandlerSwitcher{ - handler: safe.New(newHandler), - } -} - -func (hs *HandlerSwitcher) ServeHTTP(rw http.ResponseWriter, r *http.Request) { - handlerBackup := hs.handler.Get().(*mux.Router) - handlerBackup.ServeHTTP(rw, r) -} - -// GetHandler returns the current http.ServeMux -func (hs *HandlerSwitcher) GetHandler() (newHandler *mux.Router) { - handler := hs.handler.Get().(*mux.Router) - return handler -} - -// UpdateHandler safely updates the current http.ServeMux with a new one -func (hs *HandlerSwitcher) UpdateHandler(newHandler *mux.Router) { - hs.handler.Set(newHandler) -} diff --git a/old/middlewares/headers.go b/old/middlewares/headers.go deleted file mode 100644 index dee13fc5c..000000000 --- a/old/middlewares/headers.go +++ /dev/null @@ -1,71 +0,0 @@ -package middlewares - -// Middleware based on https://github.com/unrolled/secure - -import ( - "net/http" - - "github.com/containous/traefik/old/types" -) - -// HeaderOptions is a struct for specifying configuration options for the headers middleware. -type HeaderOptions struct { - // If Custom request headers are set, these will be added to the request - CustomRequestHeaders map[string]string - // If Custom response headers are set, these will be added to the ResponseWriter - CustomResponseHeaders map[string]string -} - -// HeaderStruct is a middleware that helps setup a few basic security features. A single headerOptions struct can be -// provided to configure which features should be enabled, and the ability to override a few of the default values. -type HeaderStruct struct { - // Customize headers with a headerOptions struct. - opt HeaderOptions -} - -// NewHeaderFromStruct constructs a new header instance from supplied frontend header struct. -func NewHeaderFromStruct(headers *types.Headers) *HeaderStruct { - if headers == nil || !headers.HasCustomHeadersDefined() { - return nil - } - - return &HeaderStruct{ - opt: HeaderOptions{ - CustomRequestHeaders: headers.CustomRequestHeaders, - CustomResponseHeaders: headers.CustomResponseHeaders, - }, - } -} - -func (s *HeaderStruct) ServeHTTP(w http.ResponseWriter, r *http.Request, next http.HandlerFunc) { - s.ModifyRequestHeaders(r) - // If there is a next, call it. - if next != nil { - next(w, r) - } -} - -// ModifyRequestHeaders set or delete request headers -func (s *HeaderStruct) ModifyRequestHeaders(r *http.Request) { - // Loop through Custom request headers - for header, value := range s.opt.CustomRequestHeaders { - if value == "" { - r.Header.Del(header) - } else { - r.Header.Set(header, value) - } - } -} - -// ModifyResponseHeaders set or delete response headers -func (s *HeaderStruct) ModifyResponseHeaders(res *http.Response) error { - // Loop through Custom response headers - for header, value := range s.opt.CustomResponseHeaders { - if value == "" { - res.Header.Del(header) - } else { - res.Header.Set(header, value) - } - } - return nil -} diff --git a/old/middlewares/headers_test.go b/old/middlewares/headers_test.go deleted file mode 100644 index e88495eb8..000000000 --- a/old/middlewares/headers_test.go +++ /dev/null @@ -1,118 +0,0 @@ -package middlewares - -// Middleware tests based on https://github.com/unrolled/secure - -import ( - "net/http" - "net/http/httptest" - "testing" - - "github.com/containous/traefik/pkg/testhelpers" - "github.com/stretchr/testify/assert" -) - -var myHandler = http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { - w.Write([]byte("bar")) -}) - -// newHeader constructs a new header instance with supplied options. -func newHeader(options ...HeaderOptions) *HeaderStruct { - var opt HeaderOptions - if len(options) == 0 { - opt = HeaderOptions{} - } else { - opt = options[0] - } - - return &HeaderStruct{opt: opt} -} - -func TestNoConfig(t *testing.T) { - header := newHeader() - - res := httptest.NewRecorder() - req := testhelpers.MustNewRequest(http.MethodGet, "http://example.com/foo", nil) - - header.ServeHTTP(res, req, myHandler) - - assert.Equal(t, http.StatusOK, res.Code, "Status not OK") - assert.Equal(t, "bar", res.Body.String(), "Body not the expected") -} - -func TestModifyResponseHeaders(t *testing.T) { - header := newHeader(HeaderOptions{ - CustomResponseHeaders: map[string]string{ - "X-Custom-Response-Header": "test_response", - }, - }) - - res := httptest.NewRecorder() - res.HeaderMap.Add("X-Custom-Response-Header", "test_response") - - err := header.ModifyResponseHeaders(res.Result()) - assert.NoError(t, err) - - assert.Equal(t, http.StatusOK, res.Code, "Status not OK") - assert.Equal(t, "test_response", res.Header().Get("X-Custom-Response-Header"), "Did not get expected header") - - res = httptest.NewRecorder() - res.HeaderMap.Add("X-Custom-Response-Header", "") - - err = header.ModifyResponseHeaders(res.Result()) - assert.NoError(t, err) - - assert.Equal(t, http.StatusOK, res.Code, "Status not OK") - assert.Equal(t, "", res.Header().Get("X-Custom-Response-Header"), "Did not get expected header") - - res = httptest.NewRecorder() - res.HeaderMap.Add("X-Custom-Response-Header", "test_override") - - err = header.ModifyResponseHeaders(res.Result()) - assert.NoError(t, err) - - assert.Equal(t, http.StatusOK, res.Code, "Status not OK") - assert.Equal(t, "test_override", res.Header().Get("X-Custom-Response-Header"), "Did not get expected header") -} - -func TestCustomRequestHeader(t *testing.T) { - header := newHeader(HeaderOptions{ - CustomRequestHeaders: map[string]string{ - "X-Custom-Request-Header": "test_request", - }, - }) - - res := httptest.NewRecorder() - req := testhelpers.MustNewRequest(http.MethodGet, "/foo", nil) - - header.ServeHTTP(res, req, nil) - - assert.Equal(t, http.StatusOK, res.Code, "Status not OK") - assert.Equal(t, "test_request", req.Header.Get("X-Custom-Request-Header"), "Did not get expected header") -} - -func TestCustomRequestHeaderEmptyValue(t *testing.T) { - header := newHeader(HeaderOptions{ - CustomRequestHeaders: map[string]string{ - "X-Custom-Request-Header": "test_request", - }, - }) - - res := httptest.NewRecorder() - req := testhelpers.MustNewRequest(http.MethodGet, "/foo", nil) - - header.ServeHTTP(res, req, nil) - - assert.Equal(t, http.StatusOK, res.Code, "Status not OK") - assert.Equal(t, "test_request", req.Header.Get("X-Custom-Request-Header"), "Did not get expected header") - - header = newHeader(HeaderOptions{ - CustomRequestHeaders: map[string]string{ - "X-Custom-Request-Header": "", - }, - }) - - header.ServeHTTP(res, req, nil) - - assert.Equal(t, http.StatusOK, res.Code, "Status not OK") - assert.Equal(t, "", req.Header.Get("X-Custom-Request-Header"), "This header is not expected") -} diff --git a/old/middlewares/ip_whitelister.go b/old/middlewares/ip_whitelister.go deleted file mode 100644 index 61f29d9a1..000000000 --- a/old/middlewares/ip_whitelister.go +++ /dev/null @@ -1,67 +0,0 @@ -package middlewares - -import ( - "fmt" - "net/http" - - "github.com/containous/traefik/old/log" - "github.com/containous/traefik/old/middlewares/tracing" - "github.com/containous/traefik/pkg/ip" - "github.com/pkg/errors" - "github.com/urfave/negroni" -) - -// IPWhiteLister is a middleware that provides Checks of the Requesting IP against a set of Whitelists -type IPWhiteLister struct { - handler negroni.Handler - whiteLister *ip.Checker - strategy ip.Strategy -} - -// NewIPWhiteLister builds a new IPWhiteLister given a list of CIDR-Strings to whitelist -func NewIPWhiteLister(whiteList []string, strategy ip.Strategy) (*IPWhiteLister, error) { - if len(whiteList) == 0 { - return nil, errors.New("no white list provided") - } - - checker, err := ip.NewChecker(whiteList) - if err != nil { - return nil, fmt.Errorf("parsing CIDR whitelist %s: %v", whiteList, err) - } - - whiteLister := IPWhiteLister{ - strategy: strategy, - whiteLister: checker, - } - - whiteLister.handler = negroni.HandlerFunc(whiteLister.handle) - log.Debugf("configured IP white list: %s", whiteList) - - return &whiteLister, nil -} - -func (wl *IPWhiteLister) handle(w http.ResponseWriter, r *http.Request, next http.HandlerFunc) { - err := wl.whiteLister.IsAuthorized(wl.strategy.GetIP(r)) - if err != nil { - tracing.SetErrorAndDebugLog(r, "request %+v - rejecting: %v", r, err) - reject(w) - return - } - log.Debugf("Accept %s: %+v", wl.strategy.GetIP(r), r) - tracing.SetErrorAndDebugLog(r, "request %+v matched white list %v - passing", r, wl.whiteLister) - next.ServeHTTP(w, r) -} - -func (wl *IPWhiteLister) ServeHTTP(rw http.ResponseWriter, r *http.Request, next http.HandlerFunc) { - wl.handler.ServeHTTP(rw, r, next) -} - -func reject(w http.ResponseWriter) { - statusCode := http.StatusForbidden - - w.WriteHeader(statusCode) - _, err := w.Write([]byte(http.StatusText(statusCode))) - if err != nil { - log.Error(err) - } -} diff --git a/old/middlewares/ip_whitelister_test.go b/old/middlewares/ip_whitelister_test.go deleted file mode 100644 index 5f9eb5179..000000000 --- a/old/middlewares/ip_whitelister_test.go +++ /dev/null @@ -1,92 +0,0 @@ -package middlewares - -import ( - "net/http" - "net/http/httptest" - "testing" - - "github.com/containous/traefik/pkg/ip" - "github.com/stretchr/testify/assert" - "github.com/stretchr/testify/require" -) - -func TestNewIPWhiteLister(t *testing.T) { - testCases := []struct { - desc string - whiteList []string - expectedError string - }{ - { - desc: "invalid IP", - whiteList: []string{"foo"}, - expectedError: "parsing CIDR whitelist [foo]: parsing CIDR trusted IPs : invalid CIDR address: foo", - }, - { - desc: "valid IP", - whiteList: []string{"10.10.10.10"}, - expectedError: "", - }, - } - - for _, test := range testCases { - test := test - t.Run(test.desc, func(t *testing.T) { - t.Parallel() - - whiteLister, err := NewIPWhiteLister(test.whiteList, &ip.RemoteAddrStrategy{}) - - if len(test.expectedError) > 0 { - assert.EqualError(t, err, test.expectedError) - } else { - require.NoError(t, err) - assert.NotNil(t, whiteLister) - } - }) - } -} - -func TestIPWhiteLister_ServeHTTP(t *testing.T) { - testCases := []struct { - desc string - whiteList []string - remoteAddr string - expected int - }{ - { - desc: "authorized with remote address", - whiteList: []string{"20.20.20.20"}, - remoteAddr: "20.20.20.20:1234", - expected: 200, - }, - { - desc: "non authorized with remote address", - whiteList: []string{"20.20.20.20"}, - remoteAddr: "20.20.20.21:1234", - expected: 403, - }, - } - - for _, test := range testCases { - test := test - t.Run(test.desc, func(t *testing.T) { - t.Parallel() - - whiteLister, err := NewIPWhiteLister(test.whiteList, &ip.RemoteAddrStrategy{}) - require.NoError(t, err) - - recorder := httptest.NewRecorder() - - req := httptest.NewRequest(http.MethodGet, "http://10.10.10.10", nil) - - if len(test.remoteAddr) > 0 { - req.RemoteAddr = test.remoteAddr - } - - next := http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {}) - - whiteLister.ServeHTTP(recorder, req, next) - - assert.Equal(t, test.expected, recorder.Code) - }) - } -} diff --git a/old/middlewares/recover.go b/old/middlewares/recover.go deleted file mode 100644 index 916f77ec5..000000000 --- a/old/middlewares/recover.go +++ /dev/null @@ -1,51 +0,0 @@ -package middlewares - -import ( - "net/http" - "runtime" - - "github.com/containous/traefik/old/log" - "github.com/urfave/negroni" -) - -// RecoverHandler recovers from a panic in http handlers -func RecoverHandler(next http.Handler) http.Handler { - fn := func(w http.ResponseWriter, r *http.Request) { - defer recoverFunc(w, r) - next.ServeHTTP(w, r) - } - return http.HandlerFunc(fn) -} - -// NegroniRecoverHandler recovers from a panic in negroni handlers -func NegroniRecoverHandler() negroni.Handler { - fn := func(w http.ResponseWriter, r *http.Request, next http.HandlerFunc) { - defer recoverFunc(w, r) - next.ServeHTTP(w, r) - } - return negroni.HandlerFunc(fn) -} - -func recoverFunc(w http.ResponseWriter, r *http.Request) { - if err := recover(); err != nil { - if !shouldLogPanic(err) { - log.Debugf("Request has been aborted [%s - %s]: %v", r.RemoteAddr, r.URL, err) - return - } - - log.Errorf("Recovered from panic in HTTP handler [%s - %s]: %+v", r.RemoteAddr, r.URL, err) - - const size = 64 << 10 - buf := make([]byte, size) - buf = buf[:runtime.Stack(buf, false)] - log.Errorf("Stack: %s", buf) - - http.Error(w, http.StatusText(http.StatusInternalServerError), http.StatusInternalServerError) - } -} - -// https://github.com/golang/go/blob/a0d6420d8be2ae7164797051ec74fa2a2df466a1/src/net/http/server.go#L1761-L1775 -// https://github.com/golang/go/blob/c33153f7b416c03983324b3e8f869ce1116d84bc/src/net/http/httputil/reverseproxy.go#L284 -func shouldLogPanic(panicValue interface{}) bool { - return panicValue != nil && panicValue != http.ErrAbortHandler -} diff --git a/old/middlewares/recover_test.go b/old/middlewares/recover_test.go deleted file mode 100644 index 3d5b70aa4..000000000 --- a/old/middlewares/recover_test.go +++ /dev/null @@ -1,45 +0,0 @@ -package middlewares - -import ( - "net/http" - "net/http/httptest" - "testing" - - "github.com/urfave/negroni" -) - -func TestRecoverHandler(t *testing.T) { - fn := func(w http.ResponseWriter, r *http.Request) { - panic("I love panicing!") - } - recoverHandler := RecoverHandler(http.HandlerFunc(fn)) - server := httptest.NewServer(recoverHandler) - defer server.Close() - - resp, err := http.Get(server.URL) - if err != nil { - t.Fatal(err) - } - if resp.StatusCode != http.StatusInternalServerError { - t.Fatalf("Received non-%d response: %d\n", http.StatusInternalServerError, resp.StatusCode) - } -} - -func TestNegroniRecoverHandler(t *testing.T) { - n := negroni.New() - n.Use(NegroniRecoverHandler()) - panicHandler := func(w http.ResponseWriter, r *http.Request, next http.HandlerFunc) { - panic("I love panicing!") - } - n.UseFunc(negroni.HandlerFunc(panicHandler)) - server := httptest.NewServer(n) - defer server.Close() - - resp, err := http.Get(server.URL) - if err != nil { - t.Fatal(err) - } - if resp.StatusCode != http.StatusInternalServerError { - t.Fatalf("Received non-%d response: %d\n", http.StatusInternalServerError, resp.StatusCode) - } -} diff --git a/old/middlewares/redirect/redirect.go b/old/middlewares/redirect/redirect.go deleted file mode 100644 index 58ade5064..000000000 --- a/old/middlewares/redirect/redirect.go +++ /dev/null @@ -1,163 +0,0 @@ -package redirect - -import ( - "bytes" - "fmt" - "io" - "net/http" - "net/url" - "regexp" - "strings" - "text/template" - - "github.com/containous/traefik/old/configuration" - "github.com/containous/traefik/old/middlewares" - "github.com/urfave/negroni" - "github.com/vulcand/oxy/utils" -) - -const ( - defaultRedirectRegex = `^(?:https?:\/\/)?([\w\._-]+)(?::\d+)?(.*)$` -) - -// NewEntryPointHandler create a new redirection handler base on entry point -func NewEntryPointHandler(dstEntryPoint *configuration.EntryPoint, permanent bool) (negroni.Handler, error) { - exp := regexp.MustCompile(`(:\d+)`) - match := exp.FindStringSubmatch(dstEntryPoint.Address) - if len(match) == 0 { - return nil, fmt.Errorf("bad Address format %q", dstEntryPoint.Address) - } - - protocol := "http" - if dstEntryPoint.TLS != nil { - protocol = "https" - } - - replacement := protocol + "://${1}" + match[0] + "${2}" - - return NewRegexHandler(defaultRedirectRegex, replacement, permanent) -} - -// NewRegexHandler create a new redirection handler base on regex -func NewRegexHandler(exp string, replacement string, permanent bool) (negroni.Handler, error) { - re, err := regexp.Compile(exp) - if err != nil { - return nil, err - } - - return &handler{ - regexp: re, - replacement: replacement, - permanent: permanent, - errHandler: utils.DefaultHandler, - }, nil -} - -type handler struct { - regexp *regexp.Regexp - replacement string - permanent bool - errHandler utils.ErrorHandler -} - -func (h *handler) ServeHTTP(rw http.ResponseWriter, req *http.Request, next http.HandlerFunc) { - oldURL := rawURL(req) - - // only continue if the Regexp param matches the URL - if !h.regexp.MatchString(oldURL) { - next.ServeHTTP(rw, req) - return - } - - // apply a rewrite regexp to the URL - newURL := h.regexp.ReplaceAllString(oldURL, h.replacement) - - // replace any variables that may be in there - rewrittenURL := &bytes.Buffer{} - if err := applyString(newURL, rewrittenURL, req); err != nil { - h.errHandler.ServeHTTP(rw, req, err) - return - } - - // parse the rewritten URL and replace request URL with it - parsedURL, err := url.Parse(rewrittenURL.String()) - if err != nil { - h.errHandler.ServeHTTP(rw, req, err) - return - } - - if stripPrefix, stripPrefixOk := req.Context().Value(middlewares.StripPrefixKey).(string); stripPrefixOk { - if len(stripPrefix) > 0 { - parsedURL.Path = stripPrefix - } - } - - if addPrefix, addPrefixOk := req.Context().Value(middlewares.AddPrefixKey).(string); addPrefixOk { - if len(addPrefix) > 0 { - parsedURL.Path = strings.Replace(parsedURL.Path, addPrefix, "", 1) - } - } - - if replacePath, replacePathOk := req.Context().Value(middlewares.ReplacePathKey).(string); replacePathOk { - if len(replacePath) > 0 { - parsedURL.Path = replacePath - } - } - - if newURL != oldURL { - handler := &moveHandler{location: parsedURL, permanent: h.permanent} - handler.ServeHTTP(rw, req) - return - } - - req.URL = parsedURL - - // make sure the request URI corresponds the rewritten URL - req.RequestURI = req.URL.RequestURI() - next.ServeHTTP(rw, req) -} - -type moveHandler struct { - location *url.URL - permanent bool -} - -func (m *moveHandler) ServeHTTP(rw http.ResponseWriter, req *http.Request) { - rw.Header().Set("Location", m.location.String()) - status := http.StatusFound - if m.permanent { - status = http.StatusMovedPermanently - } - rw.WriteHeader(status) - rw.Write([]byte(http.StatusText(status))) -} - -func rawURL(request *http.Request) string { - scheme := "http" - if request.TLS != nil || isXForwardedHTTPS(request) { - scheme = "https" - } - - return strings.Join([]string{scheme, "://", request.Host, request.RequestURI}, "") -} - -func isXForwardedHTTPS(request *http.Request) bool { - xForwardedProto := request.Header.Get("X-Forwarded-Proto") - - return len(xForwardedProto) > 0 && xForwardedProto == "https" -} - -func applyString(in string, out io.Writer, request *http.Request) error { - t, err := template.New("t").Parse(in) - if err != nil { - return err - } - - data := struct { - Request *http.Request - }{ - Request: request, - } - - return t.Execute(out, data) -} diff --git a/old/middlewares/replace_path.go b/old/middlewares/replace_path.go deleted file mode 100644 index d5bd03eac..000000000 --- a/old/middlewares/replace_path.go +++ /dev/null @@ -1,28 +0,0 @@ -package middlewares - -import ( - "context" - "net/http" -) - -const ( - // ReplacePathKey is the key within the request context used to - // store the replaced path - ReplacePathKey key = "ReplacePath" - // ReplacedPathHeader is the default header to set the old path to - ReplacedPathHeader = "X-Replaced-Path" -) - -// ReplacePath is a middleware used to replace the path of a URL request -type ReplacePath struct { - Handler http.Handler - Path string -} - -func (s *ReplacePath) ServeHTTP(w http.ResponseWriter, r *http.Request) { - r = r.WithContext(context.WithValue(r.Context(), ReplacePathKey, r.URL.Path)) - r.Header.Add(ReplacedPathHeader, r.URL.Path) - r.URL.Path = s.Path - r.RequestURI = r.URL.RequestURI() - s.Handler.ServeHTTP(w, r) -} diff --git a/old/middlewares/replace_path_regex.go b/old/middlewares/replace_path_regex.go deleted file mode 100644 index ce2e96f93..000000000 --- a/old/middlewares/replace_path_regex.go +++ /dev/null @@ -1,40 +0,0 @@ -package middlewares - -import ( - "context" - "net/http" - "regexp" - "strings" - - "github.com/containous/traefik/old/log" -) - -// ReplacePathRegex is a middleware used to replace the path of a URL request with a regular expression -type ReplacePathRegex struct { - Handler http.Handler - Regexp *regexp.Regexp - Replacement string -} - -// NewReplacePathRegexHandler returns a new ReplacePathRegex -func NewReplacePathRegexHandler(regex string, replacement string, handler http.Handler) http.Handler { - exp, err := regexp.Compile(strings.TrimSpace(regex)) - if err != nil { - log.Errorf("Error compiling regular expression %s: %s", regex, err) - } - return &ReplacePathRegex{ - Regexp: exp, - Replacement: strings.TrimSpace(replacement), - Handler: handler, - } -} - -func (s *ReplacePathRegex) ServeHTTP(w http.ResponseWriter, r *http.Request) { - if s.Regexp != nil && len(s.Replacement) > 0 && s.Regexp.MatchString(r.URL.Path) { - r = r.WithContext(context.WithValue(r.Context(), ReplacePathKey, r.URL.Path)) - r.Header.Add(ReplacedPathHeader, r.URL.Path) - r.URL.Path = s.Regexp.ReplaceAllString(r.URL.Path, s.Replacement) - r.RequestURI = r.URL.RequestURI() - } - s.Handler.ServeHTTP(w, r) -} diff --git a/old/middlewares/replace_path_regex_test.go b/old/middlewares/replace_path_regex_test.go deleted file mode 100644 index 7b7fac778..000000000 --- a/old/middlewares/replace_path_regex_test.go +++ /dev/null @@ -1,80 +0,0 @@ -package middlewares - -import ( - "net/http" - "testing" - - "github.com/containous/traefik/pkg/testhelpers" - "github.com/stretchr/testify/assert" -) - -func TestReplacePathRegex(t *testing.T) { - testCases := []struct { - desc string - path string - replacement string - regex string - expectedPath string - expectedHeader string - }{ - { - desc: "simple regex", - path: "/whoami/and/whoami", - replacement: "/who-am-i/$1", - regex: `^/whoami/(.*)`, - expectedPath: "/who-am-i/and/whoami", - expectedHeader: "/whoami/and/whoami", - }, - { - desc: "simple replace (no regex)", - path: "/whoami/and/whoami", - replacement: "/who-am-i", - regex: `/whoami`, - expectedPath: "/who-am-i/and/who-am-i", - expectedHeader: "/whoami/and/whoami", - }, - { - desc: "multiple replacement", - path: "/downloads/src/source.go", - replacement: "/downloads/$1-$2", - regex: `^(?i)/downloads/([^/]+)/([^/]+)$`, - expectedPath: "/downloads/src-source.go", - expectedHeader: "/downloads/src/source.go", - }, - { - desc: "invalid regular expression", - path: "/invalid/regexp/test", - replacement: "/valid/regexp/$1", - regex: `^(?err)/invalid/regexp/([^/]+)$`, - expectedPath: "/invalid/regexp/test", - }, - } - - for _, test := range testCases { - test := test - t.Run(test.desc, func(t *testing.T) { - t.Parallel() - - var actualPath, actualHeader, requestURI string - handler := NewReplacePathRegexHandler( - test.regex, - test.replacement, - http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { - actualPath = r.URL.Path - actualHeader = r.Header.Get(ReplacedPathHeader) - requestURI = r.RequestURI - }), - ) - - req := testhelpers.MustNewRequest(http.MethodGet, "http://localhost"+test.path, nil) - - handler.ServeHTTP(nil, req) - - assert.Equal(t, test.expectedPath, actualPath, "Unexpected path.") - assert.Equal(t, test.expectedHeader, actualHeader, "Unexpected '%s' header.", ReplacedPathHeader) - if test.expectedHeader != "" { - assert.Equal(t, actualPath, requestURI, "Unexpected request URI.") - } - }) - } -} diff --git a/old/middlewares/replace_path_test.go b/old/middlewares/replace_path_test.go deleted file mode 100644 index e7d78f271..000000000 --- a/old/middlewares/replace_path_test.go +++ /dev/null @@ -1,41 +0,0 @@ -package middlewares - -import ( - "net/http" - "testing" - - "github.com/containous/traefik/pkg/testhelpers" - "github.com/stretchr/testify/assert" -) - -func TestReplacePath(t *testing.T) { - const replacementPath = "/replacement-path" - - paths := []string{ - "/example", - "/some/really/long/path", - } - - for _, path := range paths { - t.Run(path, func(t *testing.T) { - - var expectedPath, actualHeader, requestURI string - handler := &ReplacePath{ - Path: replacementPath, - Handler: http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { - expectedPath = r.URL.Path - actualHeader = r.Header.Get(ReplacedPathHeader) - requestURI = r.RequestURI - }), - } - - req := testhelpers.MustNewRequest(http.MethodGet, "http://localhost"+path, nil) - - handler.ServeHTTP(nil, req) - - assert.Equal(t, expectedPath, replacementPath, "Unexpected path.") - assert.Equal(t, path, actualHeader, "Unexpected '%s' header.", ReplacedPathHeader) - assert.Equal(t, expectedPath, requestURI, "Unexpected request URI.") - }) - } -} diff --git a/old/middlewares/request_host.go b/old/middlewares/request_host.go deleted file mode 100644 index f92d5636c..000000000 --- a/old/middlewares/request_host.go +++ /dev/null @@ -1,45 +0,0 @@ -package middlewares - -import ( - "context" - "net" - "net/http" - "strings" - - "github.com/containous/traefik/old/log" - "github.com/containous/traefik/old/types" -) - -var requestHostKey struct{} - -// RequestHost is the struct for the middleware that adds the CanonicalDomain of the request Host into a context for later use. -type RequestHost struct{} - -func (rh *RequestHost) ServeHTTP(rw http.ResponseWriter, r *http.Request, next http.HandlerFunc) { - if next != nil { - host := types.CanonicalDomain(parseHost(r.Host)) - next.ServeHTTP(rw, r.WithContext(context.WithValue(r.Context(), requestHostKey, host))) - } -} - -func parseHost(addr string) string { - if !strings.Contains(addr, ":") { - return addr - } - - host, _, err := net.SplitHostPort(addr) - if err != nil { - return addr - } - return host -} - -// GetCanonizedHost plucks the canonized host key from the request of a context that was put through the middleware -func GetCanonizedHost(ctx context.Context) string { - if val, ok := ctx.Value(requestHostKey).(string); ok { - return val - } - - log.Warn("RequestHost is missing in the middleware chain") - return "" -} diff --git a/old/middlewares/request_host_test.go b/old/middlewares/request_host_test.go deleted file mode 100644 index eb17a9f72..000000000 --- a/old/middlewares/request_host_test.go +++ /dev/null @@ -1,94 +0,0 @@ -package middlewares - -import ( - "net/http" - "testing" - - "github.com/containous/traefik/pkg/testhelpers" - "github.com/stretchr/testify/assert" -) - -func TestRequestHost(t *testing.T) { - testCases := []struct { - desc string - url string - expected string - }{ - { - desc: "host without :", - url: "http://host", - expected: "host", - }, - { - desc: "host with : and without port", - url: "http://host:", - expected: "host", - }, - { - desc: "IP host with : and with port", - url: "http://127.0.0.1:123", - expected: "127.0.0.1", - }, - { - desc: "IP host with : and without port", - url: "http://127.0.0.1:", - expected: "127.0.0.1", - }, - } - - rh := &RequestHost{} - - for _, test := range testCases { - test := test - t.Run(test.desc, func(t *testing.T) { - t.Parallel() - - req := testhelpers.MustNewRequest(http.MethodGet, test.url, nil) - - rh.ServeHTTP(nil, req, func(_ http.ResponseWriter, r *http.Request) { - host := GetCanonizedHost(r.Context()) - assert.Equal(t, test.expected, host) - }) - }) - } -} - -func TestRequestHostParseHost(t *testing.T) { - testCases := []struct { - desc string - host string - expected string - }{ - { - desc: "host without :", - host: "host", - expected: "host", - }, - { - desc: "host with : and without port", - host: "host:", - expected: "host", - }, - { - desc: "IP host with : and with port", - host: "127.0.0.1:123", - expected: "127.0.0.1", - }, - { - desc: "IP host with : and without port", - host: "127.0.0.1:", - expected: "127.0.0.1", - }, - } - - for _, test := range testCases { - test := test - t.Run(test.desc, func(t *testing.T) { - t.Parallel() - - actual := parseHost(test.host) - - assert.Equal(t, test.expected, actual) - }) - } -} diff --git a/old/middlewares/retry.go b/old/middlewares/retry.go deleted file mode 100644 index 062bd2240..000000000 --- a/old/middlewares/retry.go +++ /dev/null @@ -1,185 +0,0 @@ -package middlewares - -import ( - "bufio" - "fmt" - "io/ioutil" - "net" - "net/http" - "net/http/httptrace" - - "github.com/containous/traefik/old/log" -) - -// Compile time validation that the response writer implements http interfaces correctly. -var _ Stateful = &retryResponseWriterWithCloseNotify{} - -// Retry is a middleware that retries requests -type Retry struct { - attempts int - next http.Handler - listener RetryListener -} - -// NewRetry returns a new Retry instance -func NewRetry(attempts int, next http.Handler, listener RetryListener) *Retry { - return &Retry{ - attempts: attempts, - next: next, - listener: listener, - } -} - -func (retry *Retry) ServeHTTP(rw http.ResponseWriter, r *http.Request) { - // if we might make multiple attempts, swap the body for an ioutil.NopCloser - // cf https://github.com/containous/traefik/issues/1008 - if retry.attempts > 1 { - body := r.Body - if body == nil { - body = http.NoBody - } - defer body.Close() - r.Body = ioutil.NopCloser(body) - } - - attempts := 1 - for { - shouldRetry := attempts < retry.attempts - retryResponseWriter := newRetryResponseWriter(rw, shouldRetry) - - // Disable retries when the backend already received request data - trace := &httptrace.ClientTrace{ - WroteHeaders: func() { - retryResponseWriter.DisableRetries() - }, - WroteRequest: func(httptrace.WroteRequestInfo) { - retryResponseWriter.DisableRetries() - }, - } - newCtx := httptrace.WithClientTrace(r.Context(), trace) - - retry.next.ServeHTTP(retryResponseWriter, r.WithContext(newCtx)) - if !retryResponseWriter.ShouldRetry() { - break - } - - attempts++ - log.Debugf("New attempt %d for request: %v", attempts, r.URL) - retry.listener.Retried(r, attempts) - } -} - -// RetryListener is used to inform about retry attempts. -type RetryListener interface { - // Retried will be called when a retry happens, with the request attempt passed to it. - // For the first retry this will be attempt 2. - Retried(req *http.Request, attempt int) -} - -// RetryListeners is a convenience type to construct a list of RetryListener and notify -// each of them about a retry attempt. -type RetryListeners []RetryListener - -// Retried exists to implement the RetryListener interface. It calls Retried on each of its slice entries. -func (l RetryListeners) Retried(req *http.Request, attempt int) { - for _, retryListener := range l { - retryListener.Retried(req, attempt) - } -} - -type retryResponseWriter interface { - http.ResponseWriter - http.Flusher - ShouldRetry() bool - DisableRetries() -} - -func newRetryResponseWriter(rw http.ResponseWriter, shouldRetry bool) retryResponseWriter { - responseWriter := &retryResponseWriterWithoutCloseNotify{ - responseWriter: rw, - headers: make(http.Header), - shouldRetry: shouldRetry, - } - if _, ok := rw.(http.CloseNotifier); ok { - return &retryResponseWriterWithCloseNotify{responseWriter} - } - return responseWriter -} - -type retryResponseWriterWithoutCloseNotify struct { - responseWriter http.ResponseWriter - headers http.Header - shouldRetry bool - written bool -} - -func (rr *retryResponseWriterWithoutCloseNotify) ShouldRetry() bool { - return rr.shouldRetry -} - -func (rr *retryResponseWriterWithoutCloseNotify) DisableRetries() { - rr.shouldRetry = false -} - -func (rr *retryResponseWriterWithoutCloseNotify) Header() http.Header { - if rr.written { - return rr.responseWriter.Header() - } - return rr.headers -} - -func (rr *retryResponseWriterWithoutCloseNotify) Write(buf []byte) (int, error) { - if rr.ShouldRetry() { - return len(buf), nil - } - return rr.responseWriter.Write(buf) -} - -func (rr *retryResponseWriterWithoutCloseNotify) WriteHeader(code int) { - if rr.ShouldRetry() && code == http.StatusServiceUnavailable { - // We get a 503 HTTP Status Code when there is no backend server in the pool - // to which the request could be sent. Also, note that rr.ShouldRetry() - // will never return true in case there was a connection established to - // the backend server and so we can be sure that the 503 was produced - // inside Traefik already and we don't have to retry in this cases. - rr.DisableRetries() - } - - if rr.ShouldRetry() { - return - } - - // In that case retry case is set to false which means we at least managed - // to write headers to the backend : we are not going to perform any further retry. - // So it is now safe to alter current response headers with headers collected during - // the latest try before writing headers to client. - headers := rr.responseWriter.Header() - for header, value := range rr.headers { - headers[header] = value - } - - rr.responseWriter.WriteHeader(code) - rr.written = true -} - -func (rr *retryResponseWriterWithoutCloseNotify) Hijack() (net.Conn, *bufio.ReadWriter, error) { - hijacker, ok := rr.responseWriter.(http.Hijacker) - if !ok { - return nil, nil, fmt.Errorf("%T is not a http.Hijacker", rr.responseWriter) - } - return hijacker.Hijack() -} - -func (rr *retryResponseWriterWithoutCloseNotify) Flush() { - if flusher, ok := rr.responseWriter.(http.Flusher); ok { - flusher.Flush() - } -} - -type retryResponseWriterWithCloseNotify struct { - *retryResponseWriterWithoutCloseNotify -} - -func (rr *retryResponseWriterWithCloseNotify) CloseNotify() <-chan bool { - return rr.responseWriter.(http.CloseNotifier).CloseNotify() -} diff --git a/old/middlewares/retry_test.go b/old/middlewares/retry_test.go deleted file mode 100644 index 71886465d..000000000 --- a/old/middlewares/retry_test.go +++ /dev/null @@ -1,304 +0,0 @@ -package middlewares - -import ( - "fmt" - "net/http" - "net/http/httptest" - "net/http/httptrace" - "strings" - "testing" - - "github.com/containous/traefik/pkg/testhelpers" - "github.com/gorilla/websocket" - "github.com/stretchr/testify/assert" - "github.com/stretchr/testify/require" - "github.com/vulcand/oxy/forward" - "github.com/vulcand/oxy/roundrobin" -) - -func TestRetry(t *testing.T) { - testCases := []struct { - desc string - maxRequestAttempts int - wantRetryAttempts int - wantResponseStatus int - amountFaultyEndpoints int - }{ - { - desc: "no retry on success", - maxRequestAttempts: 1, - wantRetryAttempts: 0, - wantResponseStatus: http.StatusOK, - amountFaultyEndpoints: 0, - }, - { - desc: "no retry when max request attempts is one", - maxRequestAttempts: 1, - wantRetryAttempts: 0, - wantResponseStatus: http.StatusInternalServerError, - amountFaultyEndpoints: 1, - }, - { - desc: "one retry when one server is faulty", - maxRequestAttempts: 2, - wantRetryAttempts: 1, - wantResponseStatus: http.StatusOK, - amountFaultyEndpoints: 1, - }, - { - desc: "two retries when two servers are faulty", - maxRequestAttempts: 3, - wantRetryAttempts: 2, - wantResponseStatus: http.StatusOK, - amountFaultyEndpoints: 2, - }, - { - desc: "max attempts exhausted delivers the 5xx response", - maxRequestAttempts: 3, - wantRetryAttempts: 2, - wantResponseStatus: http.StatusInternalServerError, - amountFaultyEndpoints: 3, - }, - } - - backendServer := httptest.NewServer(http.HandlerFunc(func(rw http.ResponseWriter, req *http.Request) { - rw.WriteHeader(http.StatusOK) - rw.Write([]byte("OK")) - })) - - forwarder, err := forward.New() - if err != nil { - t.Fatalf("Error creating forwarder: %s", err) - } - - for _, test := range testCases { - test := test - - t.Run(test.desc, func(t *testing.T) { - t.Parallel() - - loadBalancer, err := roundrobin.New(forwarder) - if err != nil { - t.Fatalf("Error creating load balancer: %s", err) - } - - basePort := 33444 - for i := 0; i < test.amountFaultyEndpoints; i++ { - // 192.0.2.0 is a non-routable IP for testing purposes. - // See: https://stackoverflow.com/questions/528538/non-routable-ip-address/18436928#18436928 - // We only use the port specification here because the URL is used as identifier - // in the load balancer and using the exact same URL would not add a new server. - err = loadBalancer.UpsertServer(testhelpers.MustParseURL("http://192.0.2.0:" + string(basePort+i))) - assert.NoError(t, err) - } - - // add the functioning server to the end of the load balancer list - err = loadBalancer.UpsertServer(testhelpers.MustParseURL(backendServer.URL)) - assert.NoError(t, err) - - retryListener := &countingRetryListener{} - retry := NewRetry(test.maxRequestAttempts, loadBalancer, retryListener) - - recorder := httptest.NewRecorder() - req := httptest.NewRequest(http.MethodGet, "http://localhost:3000/ok", nil) - - retry.ServeHTTP(recorder, req) - - assert.Equal(t, test.wantResponseStatus, recorder.Code) - assert.Equal(t, test.wantRetryAttempts, retryListener.timesCalled) - }) - } -} - -func TestRetryWebsocket(t *testing.T) { - testCases := []struct { - desc string - maxRequestAttempts int - expectedRetryAttempts int - expectedResponseStatus int - expectedError bool - amountFaultyEndpoints int - }{ - { - desc: "Switching ok after 2 retries", - maxRequestAttempts: 3, - expectedRetryAttempts: 2, - amountFaultyEndpoints: 2, - expectedResponseStatus: http.StatusSwitchingProtocols, - }, - { - desc: "Switching failed", - maxRequestAttempts: 2, - expectedRetryAttempts: 1, - amountFaultyEndpoints: 2, - expectedResponseStatus: http.StatusBadGateway, - expectedError: true, - }, - } - - forwarder, err := forward.New() - if err != nil { - t.Fatalf("Error creating forwarder: %s", err) - } - - backendServer := httptest.NewServer(http.HandlerFunc(func(rw http.ResponseWriter, req *http.Request) { - upgrader := websocket.Upgrader{} - upgrader.Upgrade(rw, req, nil) - })) - - for _, test := range testCases { - test := test - t.Run(test.desc, func(t *testing.T) { - t.Parallel() - - loadBalancer, err := roundrobin.New(forwarder) - if err != nil { - t.Fatalf("Error creating load balancer: %s", err) - } - - basePort := 33444 - for i := 0; i < test.amountFaultyEndpoints; i++ { - // 192.0.2.0 is a non-routable IP for testing purposes. - // See: https://stackoverflow.com/questions/528538/non-routable-ip-address/18436928#18436928 - // We only use the port specification here because the URL is used as identifier - // in the load balancer and using the exact same URL would not add a new server. - loadBalancer.UpsertServer(testhelpers.MustParseURL("http://192.0.2.0:" + string(basePort+i))) - } - - // add the functioning server to the end of the load balancer list - loadBalancer.UpsertServer(testhelpers.MustParseURL(backendServer.URL)) - - retryListener := &countingRetryListener{} - retry := NewRetry(test.maxRequestAttempts, loadBalancer, retryListener) - - retryServer := httptest.NewServer(retry) - - url := strings.Replace(retryServer.URL, "http", "ws", 1) - _, response, err := websocket.DefaultDialer.Dial(url, nil) - - if !test.expectedError { - require.NoError(t, err) - } - - assert.Equal(t, test.expectedResponseStatus, response.StatusCode) - assert.Equal(t, test.expectedRetryAttempts, retryListener.timesCalled) - }) - } -} - -func TestRetryEmptyServerList(t *testing.T) { - forwarder, err := forward.New() - if err != nil { - t.Fatalf("Error creating forwarder: %s", err) - } - - loadBalancer, err := roundrobin.New(forwarder) - if err != nil { - t.Fatalf("Error creating load balancer: %s", err) - } - - // The EmptyBackendHandler middleware ensures that there is a 503 - // response status set when there is no backend server in the pool. - next := NewEmptyBackendHandler(loadBalancer) - - retryListener := &countingRetryListener{} - retry := NewRetry(3, next, retryListener) - - recorder := httptest.NewRecorder() - req := httptest.NewRequest(http.MethodGet, "http://localhost:3000/ok", nil) - - retry.ServeHTTP(recorder, req) - - const wantResponseStatus = http.StatusServiceUnavailable - if wantResponseStatus != recorder.Code { - t.Errorf("got status code %d, want %d", recorder.Code, wantResponseStatus) - } - const wantRetryAttempts = 0 - if wantRetryAttempts != retryListener.timesCalled { - t.Errorf("retry listener called %d time(s), want %d time(s)", retryListener.timesCalled, wantRetryAttempts) - } -} - -func TestRetryListeners(t *testing.T) { - req := httptest.NewRequest(http.MethodGet, "/", nil) - retryListeners := RetryListeners{&countingRetryListener{}, &countingRetryListener{}} - - retryListeners.Retried(req, 1) - retryListeners.Retried(req, 1) - - for _, retryListener := range retryListeners { - listener := retryListener.(*countingRetryListener) - if listener.timesCalled != 2 { - t.Errorf("retry listener was called %d time(s), want %d time(s)", listener.timesCalled, 2) - } - } -} - -// countingRetryListener is a RetryListener implementation to count the times the Retried fn is called. -type countingRetryListener struct { - timesCalled int -} - -func (l *countingRetryListener) Retried(req *http.Request, attempt int) { - l.timesCalled++ -} - -func TestRetryWithFlush(t *testing.T) { - next := http.HandlerFunc(func(rw http.ResponseWriter, req *http.Request) { - rw.WriteHeader(200) - rw.Write([]byte("FULL ")) - rw.(http.Flusher).Flush() - rw.Write([]byte("DATA")) - }) - - retry := NewRetry(1, next, &countingRetryListener{}) - responseRecorder := httptest.NewRecorder() - - retry.ServeHTTP(responseRecorder, &http.Request{}) - - if responseRecorder.Body.String() != "FULL DATA" { - t.Errorf("Wrong body %q want %q", responseRecorder.Body.String(), "FULL DATA") - } -} - -func TestMultipleRetriesShouldNotLooseHeaders(t *testing.T) { - attempt := 0 - expectedHeaderName := "X-Foo-Test-2" - expectedHeaderValue := "bar" - - next := http.HandlerFunc(func(rw http.ResponseWriter, req *http.Request) { - headerName := fmt.Sprintf("X-Foo-Test-%d", attempt) - rw.Header().Add(headerName, expectedHeaderValue) - if attempt < 2 { - attempt++ - return - } - - // Request has been successfully written to backend - trace := httptrace.ContextClientTrace(req.Context()) - trace.WroteHeaders() - - // And we decide to answer to client - rw.WriteHeader(http.StatusNoContent) - }) - - retry := NewRetry(3, next, &countingRetryListener{}) - responseRecorder := httptest.NewRecorder() - retry.ServeHTTP(responseRecorder, &http.Request{}) - - headerValue := responseRecorder.Header().Get(expectedHeaderName) - - // Validate if we have the correct header - if headerValue != expectedHeaderValue { - t.Errorf("Expected to have %s for header %s, got %s", expectedHeaderValue, expectedHeaderName, headerValue) - } - - // Validate that we don't have headers from previous attempts - for i := 0; i < attempt; i++ { - headerName := fmt.Sprintf("X-Foo-Test-%d", i) - headerValue = responseRecorder.Header().Get("headerName") - if headerValue != "" { - t.Errorf("Expected no value for header %s, got %s", headerName, headerValue) - } - } -} diff --git a/old/middlewares/routes.go b/old/middlewares/routes.go deleted file mode 100644 index 86be13c0c..000000000 --- a/old/middlewares/routes.go +++ /dev/null @@ -1,28 +0,0 @@ -package middlewares - -import ( - "encoding/json" - "log" - "net/http" - - "github.com/containous/mux" -) - -// Routes holds the gorilla mux routes (for the API & co). -type Routes struct { - router *mux.Router -} - -// NewRoutes return a Routes based on the given router. -func NewRoutes(router *mux.Router) *Routes { - return &Routes{router} -} - -func (router *Routes) ServeHTTP(rw http.ResponseWriter, r *http.Request, next http.HandlerFunc) { - routeMatch := mux.RouteMatch{} - if router.router.Match(r, &routeMatch) { - rt, _ := json.Marshal(routeMatch.Handler) - log.Println("Request match route ", rt) - } - next(rw, r) -} diff --git a/old/middlewares/secure.go b/old/middlewares/secure.go deleted file mode 100644 index 2ee4858d9..000000000 --- a/old/middlewares/secure.go +++ /dev/null @@ -1,36 +0,0 @@ -package middlewares - -import ( - "github.com/containous/traefik/old/types" - "github.com/unrolled/secure" -) - -// NewSecure constructs a new Secure instance with supplied options. -func NewSecure(headers *types.Headers) *secure.Secure { - if headers == nil || !headers.HasSecureHeadersDefined() { - return nil - } - - opt := secure.Options{ - AllowedHosts: headers.AllowedHosts, - HostsProxyHeaders: headers.HostsProxyHeaders, - SSLRedirect: headers.SSLRedirect, - SSLTemporaryRedirect: headers.SSLTemporaryRedirect, - SSLHost: headers.SSLHost, - SSLProxyHeaders: headers.SSLProxyHeaders, - STSSeconds: headers.STSSeconds, - STSIncludeSubdomains: headers.STSIncludeSubdomains, - STSPreload: headers.STSPreload, - ForceSTSHeader: headers.ForceSTSHeader, - FrameDeny: headers.FrameDeny, - CustomFrameOptionsValue: headers.CustomFrameOptionsValue, - ContentTypeNosniff: headers.ContentTypeNosniff, - BrowserXssFilter: headers.BrowserXSSFilter, - CustomBrowserXssValue: headers.CustomBrowserXSSValue, - ContentSecurityPolicy: headers.ContentSecurityPolicy, - PublicKey: headers.PublicKey, - ReferrerPolicy: headers.ReferrerPolicy, - IsDevelopment: headers.IsDevelopment, - } - return secure.New(opt) -} diff --git a/old/middlewares/stateful.go b/old/middlewares/stateful.go deleted file mode 100644 index 4762d97a1..000000000 --- a/old/middlewares/stateful.go +++ /dev/null @@ -1,12 +0,0 @@ -package middlewares - -import "net/http" - -// Stateful interface groups all http interfaces that must be -// implemented by a stateful middleware (ie: recorders) -type Stateful interface { - http.ResponseWriter - http.Hijacker - http.Flusher - http.CloseNotifier -} diff --git a/old/middlewares/stats.go b/old/middlewares/stats.go deleted file mode 100644 index 61cafdf3d..000000000 --- a/old/middlewares/stats.go +++ /dev/null @@ -1,115 +0,0 @@ -package middlewares - -import ( - "bufio" - "net" - "net/http" - "sync" - "time" -) - -var ( - _ Stateful = &responseRecorder{} -) - -// StatsRecorder is an optional middleware that records more details statistics -// about requests and how they are processed. This currently consists of recent -// requests that have caused errors (4xx and 5xx status codes), making it easy -// to pinpoint problems. -type StatsRecorder struct { - mutex sync.RWMutex - numRecentErrors int - recentErrors []*statsError -} - -// NewStatsRecorder returns a new StatsRecorder -func NewStatsRecorder(numRecentErrors int) *StatsRecorder { - return &StatsRecorder{ - numRecentErrors: numRecentErrors, - } -} - -// Stats includes all of the stats gathered by the recorder. -type Stats struct { - RecentErrors []*statsError `json:"recent_errors"` -} - -// statsError represents an error that has occurred during request processing. -type statsError struct { - StatusCode int `json:"status_code"` - Status string `json:"status"` - Method string `json:"method"` - Host string `json:"host"` - Path string `json:"path"` - Time time.Time `json:"time"` -} - -// responseRecorder captures information from the response and preserves it for -// later analysis. -type responseRecorder struct { - http.ResponseWriter - statusCode int -} - -// WriteHeader captures the status code for later retrieval. -func (r *responseRecorder) WriteHeader(status int) { - r.ResponseWriter.WriteHeader(status) - r.statusCode = status -} - -// Hijack hijacks the connection -func (r *responseRecorder) Hijack() (net.Conn, *bufio.ReadWriter, error) { - return r.ResponseWriter.(http.Hijacker).Hijack() -} - -// CloseNotify returns a channel that receives at most a -// single value (true) when the client connection has gone -// away. -func (r *responseRecorder) CloseNotify() <-chan bool { - return r.ResponseWriter.(http.CloseNotifier).CloseNotify() -} - -// Flush sends any buffered data to the client. -func (r *responseRecorder) Flush() { - r.ResponseWriter.(http.Flusher).Flush() -} - -// ServeHTTP silently extracts information from the request and response as it -// is processed. If the response is 4xx or 5xx, add it to the list of 10 most -// recent errors. -func (s *StatsRecorder) ServeHTTP(w http.ResponseWriter, r *http.Request, next http.HandlerFunc) { - recorder := &responseRecorder{w, http.StatusOK} - next(recorder, r) - if recorder.statusCode >= http.StatusBadRequest { - s.mutex.Lock() - defer s.mutex.Unlock() - s.recentErrors = append([]*statsError{ - { - StatusCode: recorder.statusCode, - Status: http.StatusText(recorder.statusCode), - Method: r.Method, - Host: r.Host, - Path: r.URL.Path, - Time: time.Now(), - }, - }, s.recentErrors...) - // Limit the size of the list to numRecentErrors - if len(s.recentErrors) > s.numRecentErrors { - s.recentErrors = s.recentErrors[:s.numRecentErrors] - } - } -} - -// Data returns a copy of the statistics that have been gathered. -func (s *StatsRecorder) Data() *Stats { - s.mutex.RLock() - defer s.mutex.RUnlock() - - // We can't return the slice directly or a race condition might develop - recentErrors := make([]*statsError, len(s.recentErrors)) - copy(recentErrors, s.recentErrors) - - return &Stats{ - RecentErrors: recentErrors, - } -} diff --git a/old/middlewares/stripPrefix.go b/old/middlewares/stripPrefix.go deleted file mode 100644 index 222eb33cd..000000000 --- a/old/middlewares/stripPrefix.go +++ /dev/null @@ -1,56 +0,0 @@ -package middlewares - -import ( - "context" - "net/http" - "strings" -) - -const ( - // StripPrefixKey is the key within the request context used to - // store the stripped prefix - StripPrefixKey key = "StripPrefix" - // ForwardedPrefixHeader is the default header to set prefix - ForwardedPrefixHeader = "X-Forwarded-Prefix" -) - -// StripPrefix is a middleware used to strip prefix from an URL request -type StripPrefix struct { - Handler http.Handler - Prefixes []string -} - -func (s *StripPrefix) ServeHTTP(w http.ResponseWriter, r *http.Request) { - for _, prefix := range s.Prefixes { - if strings.HasPrefix(r.URL.Path, prefix) { - rawReqPath := r.URL.Path - r.URL.Path = stripPrefix(r.URL.Path, prefix) - if r.URL.RawPath != "" { - r.URL.RawPath = stripPrefix(r.URL.RawPath, prefix) - } - s.serveRequest(w, r, strings.TrimSpace(prefix), rawReqPath) - return - } - } - http.NotFound(w, r) -} - -func (s *StripPrefix) serveRequest(w http.ResponseWriter, r *http.Request, prefix string, rawReqPath string) { - r = r.WithContext(context.WithValue(r.Context(), StripPrefixKey, rawReqPath)) - r.Header.Add(ForwardedPrefixHeader, prefix) - r.RequestURI = r.URL.RequestURI() - s.Handler.ServeHTTP(w, r) -} - -// SetHandler sets handler -func (s *StripPrefix) SetHandler(Handler http.Handler) { - s.Handler = Handler -} - -func stripPrefix(s, prefix string) string { - return ensureLeadingSlash(strings.TrimPrefix(s, prefix)) -} - -func ensureLeadingSlash(str string) string { - return "/" + strings.TrimPrefix(str, "/") -} diff --git a/old/middlewares/stripPrefixRegex.go b/old/middlewares/stripPrefixRegex.go deleted file mode 100644 index 9c3d19ff1..000000000 --- a/old/middlewares/stripPrefixRegex.go +++ /dev/null @@ -1,59 +0,0 @@ -package middlewares - -import ( - "context" - "net/http" - - "github.com/containous/mux" - "github.com/containous/traefik/old/log" -) - -// StripPrefixRegex is a middleware used to strip prefix from an URL request -type StripPrefixRegex struct { - Handler http.Handler - router *mux.Router -} - -// NewStripPrefixRegex builds a new StripPrefixRegex given a handler and prefixes -func NewStripPrefixRegex(handler http.Handler, prefixes []string) *StripPrefixRegex { - stripPrefix := StripPrefixRegex{Handler: handler, router: mux.NewRouter()} - - for _, prefix := range prefixes { - stripPrefix.router.PathPrefix(prefix) - } - - return &stripPrefix -} - -func (s *StripPrefixRegex) ServeHTTP(w http.ResponseWriter, r *http.Request) { - var match mux.RouteMatch - if s.router.Match(r, &match) { - params := make([]string, 0, len(match.Vars)*2) - for key, val := range match.Vars { - params = append(params, key) - params = append(params, val) - } - - prefix, err := match.Route.URL(params...) - if err != nil || len(prefix.Path) > len(r.URL.Path) { - log.Error("Error in stripPrefix middleware", err) - return - } - rawReqPath := r.URL.Path - r.URL.Path = r.URL.Path[len(prefix.Path):] - if r.URL.RawPath != "" { - r.URL.RawPath = r.URL.RawPath[len(prefix.Path):] - } - r = r.WithContext(context.WithValue(r.Context(), StripPrefixKey, rawReqPath)) - r.Header.Add(ForwardedPrefixHeader, prefix.Path) - r.RequestURI = ensureLeadingSlash(r.URL.RequestURI()) - s.Handler.ServeHTTP(w, r) - return - } - http.NotFound(w, r) -} - -// SetHandler sets handler -func (s *StripPrefixRegex) SetHandler(Handler http.Handler) { - s.Handler = Handler -} diff --git a/old/middlewares/stripPrefixRegex_test.go b/old/middlewares/stripPrefixRegex_test.go deleted file mode 100644 index 57c75bb62..000000000 --- a/old/middlewares/stripPrefixRegex_test.go +++ /dev/null @@ -1,97 +0,0 @@ -package middlewares - -import ( - "net/http" - "net/http/httptest" - "testing" - - "github.com/containous/traefik/pkg/testhelpers" - "github.com/stretchr/testify/assert" -) - -func TestStripPrefixRegex(t *testing.T) { - testPrefixRegex := []string{"/a/api/", "/b/{regex}/", "/c/{category}/{id:[0-9]+}/"} - - tests := []struct { - path string - expectedStatusCode int - expectedPath string - expectedRawPath string - expectedHeader string - }{ - { - path: "/a/test", - expectedStatusCode: http.StatusNotFound, - }, - { - path: "/a/api/test", - expectedStatusCode: http.StatusOK, - expectedPath: "test", - expectedHeader: "/a/api/", - }, - { - path: "/b/api/", - expectedStatusCode: http.StatusOK, - expectedHeader: "/b/api/", - }, - { - path: "/b/api/test1", - expectedStatusCode: http.StatusOK, - expectedPath: "test1", - expectedHeader: "/b/api/", - }, - { - path: "/b/api2/test2", - expectedStatusCode: http.StatusOK, - expectedPath: "test2", - expectedHeader: "/b/api2/", - }, - { - path: "/c/api/123/", - expectedStatusCode: http.StatusOK, - expectedHeader: "/c/api/123/", - }, - { - path: "/c/api/123/test3", - expectedStatusCode: http.StatusOK, - expectedPath: "test3", - expectedHeader: "/c/api/123/", - }, - { - path: "/c/api/abc/test4", - expectedStatusCode: http.StatusNotFound, - }, - { - path: "/a/api/a%2Fb", - expectedStatusCode: http.StatusOK, - expectedPath: "a/b", - expectedRawPath: "a%2Fb", - expectedHeader: "/a/api/", - }, - } - - for _, test := range tests { - test := test - t.Run(test.path, func(t *testing.T) { - t.Parallel() - - var actualPath, actualRawPath, actualHeader string - handlerPath := http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { - actualPath = r.URL.Path - actualRawPath = r.URL.RawPath - actualHeader = r.Header.Get(ForwardedPrefixHeader) - }) - handler := NewStripPrefixRegex(handlerPath, testPrefixRegex) - - req := testhelpers.MustNewRequest(http.MethodGet, "http://localhost"+test.path, nil) - resp := &httptest.ResponseRecorder{Code: http.StatusOK} - - handler.ServeHTTP(resp, req) - - assert.Equal(t, test.expectedStatusCode, resp.Code, "Unexpected status code.") - assert.Equal(t, test.expectedPath, actualPath, "Unexpected path.") - assert.Equal(t, test.expectedRawPath, actualRawPath, "Unexpected raw path.") - assert.Equal(t, test.expectedHeader, actualHeader, "Unexpected '%s' header.", ForwardedPrefixHeader) - }) - } -} diff --git a/old/middlewares/stripPrefix_test.go b/old/middlewares/stripPrefix_test.go deleted file mode 100644 index 41708d382..000000000 --- a/old/middlewares/stripPrefix_test.go +++ /dev/null @@ -1,143 +0,0 @@ -package middlewares - -import ( - "net/http" - "net/http/httptest" - "testing" - - "github.com/containous/traefik/pkg/testhelpers" - "github.com/stretchr/testify/assert" -) - -func TestStripPrefix(t *testing.T) { - tests := []struct { - desc string - prefixes []string - path string - expectedStatusCode int - expectedPath string - expectedRawPath string - expectedHeader string - }{ - { - desc: "no prefixes configured", - prefixes: []string{}, - path: "/noprefixes", - expectedStatusCode: http.StatusNotFound, - }, - { - desc: "wildcard (.*) requests", - prefixes: []string{"/"}, - path: "/", - expectedStatusCode: http.StatusOK, - expectedPath: "/", - expectedHeader: "/", - }, - { - desc: "prefix and path matching", - prefixes: []string{"/stat"}, - path: "/stat", - expectedStatusCode: http.StatusOK, - expectedPath: "/", - expectedHeader: "/stat", - }, - { - desc: "path prefix on exactly matching path", - prefixes: []string{"/stat/"}, - path: "/stat/", - expectedStatusCode: http.StatusOK, - expectedPath: "/", - expectedHeader: "/stat/", - }, - { - desc: "path prefix on matching longer path", - prefixes: []string{"/stat/"}, - path: "/stat/us", - expectedStatusCode: http.StatusOK, - expectedPath: "/us", - expectedHeader: "/stat/", - }, - { - desc: "path prefix on mismatching path", - prefixes: []string{"/stat/"}, - path: "/status", - expectedStatusCode: http.StatusNotFound, - }, - { - desc: "general prefix on matching path", - prefixes: []string{"/stat"}, - path: "/stat/", - expectedStatusCode: http.StatusOK, - expectedPath: "/", - expectedHeader: "/stat", - }, - { - desc: "earlier prefix matching", - prefixes: []string{"/stat", "/stat/us"}, - path: "/stat/us", - expectedStatusCode: http.StatusOK, - expectedPath: "/us", - expectedHeader: "/stat", - }, - { - desc: "later prefix matching", - prefixes: []string{"/mismatch", "/stat"}, - path: "/stat", - expectedStatusCode: http.StatusOK, - expectedPath: "/", - expectedHeader: "/stat", - }, - { - desc: "prefix matching within slash boundaries", - prefixes: []string{"/stat"}, - path: "/status", - expectedStatusCode: http.StatusOK, - expectedPath: "/us", - expectedHeader: "/stat", - }, - { - desc: "raw path is also stripped", - prefixes: []string{"/stat"}, - path: "/stat/a%2Fb", - expectedStatusCode: http.StatusOK, - expectedPath: "/a/b", - expectedRawPath: "/a%2Fb", - expectedHeader: "/stat", - }, - } - - for _, test := range tests { - test := test - t.Run(test.desc, func(t *testing.T) { - t.Parallel() - - var actualPath, actualRawPath, actualHeader, requestURI string - handler := &StripPrefix{ - Prefixes: test.prefixes, - Handler: http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { - actualPath = r.URL.Path - actualRawPath = r.URL.RawPath - actualHeader = r.Header.Get(ForwardedPrefixHeader) - requestURI = r.RequestURI - }), - } - - req := testhelpers.MustNewRequest(http.MethodGet, "http://localhost"+test.path, nil) - resp := &httptest.ResponseRecorder{Code: http.StatusOK} - - handler.ServeHTTP(resp, req) - - assert.Equal(t, test.expectedStatusCode, resp.Code, "Unexpected status code.") - assert.Equal(t, test.expectedPath, actualPath, "Unexpected path.") - assert.Equal(t, test.expectedRawPath, actualRawPath, "Unexpected raw path.") - assert.Equal(t, test.expectedHeader, actualHeader, "Unexpected '%s' header.", ForwardedPrefixHeader) - - expectedURI := test.expectedPath - if test.expectedRawPath != "" { - // go HTTP uses the raw path when existent in the RequestURI - expectedURI = test.expectedRawPath - } - assert.Equal(t, expectedURI, requestURI, "Unexpected request URI.") - }) - } -} diff --git a/old/middlewares/tlsClientHeaders.go b/old/middlewares/tlsClientHeaders.go deleted file mode 100644 index 8004d9db8..000000000 --- a/old/middlewares/tlsClientHeaders.go +++ /dev/null @@ -1,289 +0,0 @@ -package middlewares - -import ( - "crypto/x509" - "crypto/x509/pkix" - "encoding/pem" - "fmt" - "net/http" - "net/url" - "strings" - - "github.com/containous/traefik/old/log" - "github.com/containous/traefik/old/types" -) - -const ( - xForwardedTLSClientCert = "X-Forwarded-Tls-Client-Cert" - xForwardedTLSClientCertInfos = "X-Forwarded-Tls-Client-Cert-Infos" -) - -var attributeTypeNames = map[string]string{ - "0.9.2342.19200300.100.1.25": "DC", // Domain component OID - RFC 2247 -} - -// TLSClientCertificateInfos is a struct for specifying the configuration for the tlsClientHeaders middleware. -type TLSClientCertificateInfos struct { - Issuer *DistinguishedNameOptions - NotAfter bool - NotBefore bool - Sans bool - Subject *DistinguishedNameOptions -} - -// DistinguishedNameOptions is a struct for specifying the configuration for the distinguished name info. -type DistinguishedNameOptions struct { - CommonName bool - CountryName bool - DomainComponent bool - LocalityName bool - OrganizationName bool - SerialNumber bool - StateOrProvinceName bool -} - -// TLSClientHeaders is a middleware that helps setup a few tls info features. -type TLSClientHeaders struct { - Infos *TLSClientCertificateInfos // pass selected informations from the client certificate - PEM bool // pass the sanitized pem to the backend in a specific header -} - -func newDistinguishedNameOptions(infos *types.TLSCLientCertificateDNInfos) *DistinguishedNameOptions { - if infos == nil { - return nil - } - - return &DistinguishedNameOptions{ - CommonName: infos.CommonName, - CountryName: infos.Country, - DomainComponent: infos.DomainComponent, - LocalityName: infos.Locality, - OrganizationName: infos.Organization, - SerialNumber: infos.SerialNumber, - StateOrProvinceName: infos.Province, - } -} - -func newTLSClientInfos(infos *types.TLSClientCertificateInfos) *TLSClientCertificateInfos { - if infos == nil { - return nil - } - - return &TLSClientCertificateInfos{ - Issuer: newDistinguishedNameOptions(infos.Issuer), - NotAfter: infos.NotAfter, - NotBefore: infos.NotBefore, - Sans: infos.Sans, - Subject: newDistinguishedNameOptions(infos.Subject), - } -} - -// NewTLSClientHeaders constructs a new TLSClientHeaders instance from supplied frontend header struct. -func NewTLSClientHeaders(frontend *types.Frontend) *TLSClientHeaders { - if frontend == nil { - return nil - } - - var addPEM bool - var infos *TLSClientCertificateInfos - - if frontend.PassTLSClientCert != nil { - conf := frontend.PassTLSClientCert - addPEM = conf.PEM - infos = newTLSClientInfos(conf.Infos) - } - - return &TLSClientHeaders{ - Infos: infos, - PEM: addPEM, - } -} - -func (s *TLSClientHeaders) ServeHTTP(w http.ResponseWriter, r *http.Request, next http.HandlerFunc) { - s.ModifyRequestHeaders(r) - // If there is a next, call it. - if next != nil { - next(w, r) - } -} - -// sanitize As we pass the raw certificates, remove the useless data and make it http request compliant -func sanitize(cert []byte) string { - s := string(cert) - r := strings.NewReplacer("-----BEGIN CERTIFICATE-----", "", - "-----END CERTIFICATE-----", "", - "\n", "") - cleaned := r.Replace(s) - - return url.QueryEscape(cleaned) -} - -// extractCertificate extract the certificate from the request -func extractCertificate(cert *x509.Certificate) string { - b := pem.Block{Type: "CERTIFICATE", Bytes: cert.Raw} - certPEM := pem.EncodeToMemory(&b) - if certPEM == nil { - log.Error("Cannot extract the certificate content") - return "" - } - return sanitize(certPEM) -} - -// getXForwardedTLSClientCert Build a string with the client certificates -func getXForwardedTLSClientCert(certs []*x509.Certificate) string { - var headerValues []string - - for _, peerCert := range certs { - headerValues = append(headerValues, extractCertificate(peerCert)) - } - - return strings.Join(headerValues, ",") -} - -// getSANs get the Subject Alternate Name values -func getSANs(cert *x509.Certificate) []string { - var sans []string - if cert == nil { - return sans - } - - sans = append(cert.DNSNames, cert.EmailAddresses...) - - var ips []string - for _, ip := range cert.IPAddresses { - ips = append(ips, ip.String()) - } - sans = append(sans, ips...) - - var uris []string - for _, uri := range cert.URIs { - uris = append(uris, uri.String()) - } - - return append(sans, uris...) -} - -func getDNInfos(prefix string, options *DistinguishedNameOptions, cs *pkix.Name) string { - if options == nil { - return "" - } - - content := &strings.Builder{} - - // Manage non standard attributes - for _, name := range cs.Names { - // Domain Component - RFC 2247 - if options.DomainComponent && attributeTypeNames[name.Type.String()] == "DC" { - content.WriteString(fmt.Sprintf("DC=%s,", name.Value)) - } - } - - if options.CountryName { - writeParts(content, cs.Country, "C") - } - - if options.StateOrProvinceName { - writeParts(content, cs.Province, "ST") - } - - if options.LocalityName { - writeParts(content, cs.Locality, "L") - } - - if options.OrganizationName { - writeParts(content, cs.Organization, "O") - } - - if options.SerialNumber { - writePart(content, cs.SerialNumber, "SN") - } - - if options.CommonName { - writePart(content, cs.CommonName, "CN") - } - - if content.Len() > 0 { - return prefix + `="` + strings.TrimSuffix(content.String(), ",") + `"` - } - - return "" -} - -func writeParts(content *strings.Builder, entries []string, prefix string) { - for _, entry := range entries { - writePart(content, entry, prefix) - } -} - -func writePart(content *strings.Builder, entry string, prefix string) { - if len(entry) > 0 { - content.WriteString(fmt.Sprintf("%s=%s,", prefix, entry)) - } -} - -// getXForwardedTLSClientCertInfo Build a string with the wanted client certificates informations -// like Subject="DC=%s,C=%s,ST=%s,L=%s,O=%s,CN=%s",NB=%d,NA=%d,SAN=%s; -func (s *TLSClientHeaders) getXForwardedTLSClientCertInfo(certs []*x509.Certificate) string { - var headerValues []string - - for _, peerCert := range certs { - var values []string - var sans string - var nb string - var na string - - if s.Infos != nil { - subject := getDNInfos("Subject", s.Infos.Subject, &peerCert.Subject) - if len(subject) > 0 { - values = append(values, subject) - } - - issuer := getDNInfos("Issuer", s.Infos.Issuer, &peerCert.Issuer) - if len(issuer) > 0 { - values = append(values, issuer) - } - } - - ci := s.Infos - if ci != nil { - if ci.NotBefore { - nb = fmt.Sprintf("NB=%d", uint64(peerCert.NotBefore.Unix())) - values = append(values, nb) - } - if ci.NotAfter { - na = fmt.Sprintf("NA=%d", uint64(peerCert.NotAfter.Unix())) - values = append(values, na) - } - - if ci.Sans { - sans = fmt.Sprintf("SAN=%s", strings.Join(getSANs(peerCert), ",")) - values = append(values, sans) - } - } - - value := strings.Join(values, ",") - headerValues = append(headerValues, value) - } - - return strings.Join(headerValues, ";") -} - -// ModifyRequestHeaders set the wanted headers with the certificates informations -func (s *TLSClientHeaders) ModifyRequestHeaders(r *http.Request) { - if s.PEM { - if r.TLS != nil && len(r.TLS.PeerCertificates) > 0 { - r.Header.Set(xForwardedTLSClientCert, getXForwardedTLSClientCert(r.TLS.PeerCertificates)) - } else { - log.Warn("Try to extract certificate on a request without TLS") - } - } - - if s.Infos != nil { - if r.TLS != nil && len(r.TLS.PeerCertificates) > 0 { - headerContent := s.getXForwardedTLSClientCertInfo(r.TLS.PeerCertificates) - r.Header.Set(xForwardedTLSClientCertInfos, url.QueryEscape(headerContent)) - } else { - log.Warn("Try to extract certificate on a request without TLS") - } - } -} diff --git a/old/middlewares/tlsClientHeaders_test.go b/old/middlewares/tlsClientHeaders_test.go deleted file mode 100644 index 441ce60bc..000000000 --- a/old/middlewares/tlsClientHeaders_test.go +++ /dev/null @@ -1,1011 +0,0 @@ -package middlewares - -import ( - "crypto/tls" - "crypto/x509" - "encoding/pem" - "net" - "net/http" - "net/http/httptest" - "net/url" - "regexp" - "strings" - "testing" - - "github.com/containous/traefik/old/types" - "github.com/containous/traefik/pkg/testhelpers" - "github.com/stretchr/testify/assert" - "github.com/stretchr/testify/require" -) - -const ( - signingCA = `Certificate: - Data: - Version: 3 (0x2) - Serial Number: 2 (0x2) - Signature Algorithm: sha1WithRSAEncryption - Issuer: DC=org, DC=cheese, O=Cheese, O=Cheese 2, OU=Cheese Section, OU=Cheese Section 2, CN=Simple Root CA, CN=Simple Root CA 2, C=FR, C=US, L=TOULOUSE, L=LYON, ST=Root State, ST=Root State 2/emailAddress=root@signing.com/emailAddress=root2@signing.com - Validity - Not Before: Dec 6 11:10:09 2018 GMT - Not After : Dec 5 11:10:09 2028 GMT - Subject: DC=org, DC=cheese, O=Cheese, O=Cheese 2, OU=Simple Signing Section, OU=Simple Signing Section 2, CN=Simple Signing CA, CN=Simple Signing CA 2, C=FR, C=US, L=TOULOUSE, L=LYON, ST=Signing State, ST=Signing State 2/emailAddress=simple@signing.com/emailAddress=simple2@signing.com - Subject Public Key Info: - Public Key Algorithm: rsaEncryption - RSA Public-Key: (2048 bit) - Modulus: - 00:c3:9d:9f:61:15:57:3f:78:cc:e7:5d:20:e2:3e: - 2e:79:4a:c3:3a:0c:26:40:18:db:87:08:85:c2:f7: - af:87:13:1a:ff:67:8a:b7:2b:58:a7:cc:89:dd:77: - ff:5e:27:65:11:80:82:8f:af:a0:af:25:86:ec:a2: - 4f:20:0e:14:15:16:12:d7:74:5a:c3:99:bd:3b:81: - c8:63:6f:fc:90:14:86:d2:39:ee:87:b2:ff:6d:a5: - 69:da:ab:5a:3a:97:cd:23:37:6a:4b:ba:63:cd:a1: - a9:e6:79:aa:37:b8:d1:90:c9:24:b5:e8:70:fc:15: - ad:39:97:28:73:47:66:f6:22:79:5a:b0:03:83:8a: - f1:ca:ae:8b:50:1e:c8:fa:0d:9f:76:2e:00:c2:0e: - 75:bc:47:5a:b6:d8:05:ed:5a:bc:6d:50:50:36:6b: - ab:ab:69:f6:9b:1b:6c:7e:a8:9f:b2:33:3a:3c:8c: - 6d:5e:83:ce:17:82:9e:10:51:a6:39:ec:98:4e:50: - b7:b1:aa:8b:ac:bb:a1:60:1b:ea:31:3b:b8:0a:ea: - 63:41:79:b5:ec:ee:19:e9:85:8e:f3:6d:93:80:da: - 98:58:a2:40:93:a5:53:eb:1d:24:b6:66:07:ec:58: - 10:63:e7:fa:6e:18:60:74:76:15:39:3c:f4:95:95: - 7e:df - Exponent: 65537 (0x10001) - X509v3 extensions: - X509v3 Key Usage: critical - Certificate Sign, CRL Sign - X509v3 Basic Constraints: critical - CA:TRUE, pathlen:0 - X509v3 Subject Key Identifier: - 1E:52:A2:E8:54:D5:37:EB:D5:A8:1D:E4:C2:04:1D:37:E2:F7:70:03 - X509v3 Authority Key Identifier: - keyid:36:70:35:AA:F0:F6:93:B2:86:5D:32:73:F9:41:5A:3F:3B:C8:BC:8B - - Signature Algorithm: sha1WithRSAEncryption - 76:f3:16:21:27:6d:a2:2e:e8:18:49:aa:54:1e:f8:3b:07:fa: - 65:50:d8:1f:a2:cf:64:6c:15:e0:0f:c8:46:b2:d7:b8:0e:cd: - 05:3b:06:fb:dd:c6:2f:01:ae:bd:69:d3:bb:55:47:a9:f6:e5: - ba:be:4b:45:fb:2e:3c:33:e0:57:d4:3e:8e:3e:11:f2:0a:f1: - 7d:06:ab:04:2e:a5:76:20:c2:db:a4:68:5a:39:00:62:2a:1d: - c2:12:b1:90:66:8c:36:a8:fd:83:d1:1b:da:23:a7:1d:5b:e6: - 9b:40:c4:78:25:c7:b7:6b:75:35:cf:bb:37:4a:4f:fc:7e:32: - 1f:8c:cf:12:d2:c9:c8:99:d9:4a:55:0a:1e:ac:de:b4:cb:7c: - bf:c4:fb:60:2c:a8:f7:e7:63:5c:b0:1c:62:af:01:3c:fe:4d: - 3c:0b:18:37:4c:25:fc:d0:b2:f6:b2:f1:c3:f4:0f:53:d6:1e: - b5:fa:bc:d8:ad:dd:1c:f5:45:9f:af:fe:0a:01:79:92:9a:d8: - 71:db:37:f3:1e:bd:fb:c7:1e:0a:0f:97:2a:61:f3:7b:19:93: - 9c:a6:8a:69:cd:b0:f5:91:02:a5:1b:10:f4:80:5d:42:af:4e: - 82:12:30:3e:d3:a7:11:14:ce:50:91:04:80:d7:2a:03:ef:71: - 10:b8:db:a5 ------BEGIN CERTIFICATE----- -MIIFzTCCBLWgAwIBAgIBAjANBgkqhkiG9w0BAQUFADCCAWQxEzARBgoJkiaJk/Is -ZAEZFgNvcmcxFjAUBgoJkiaJk/IsZAEZFgZjaGVlc2UxDzANBgNVBAoMBkNoZWVz -ZTERMA8GA1UECgwIQ2hlZXNlIDIxFzAVBgNVBAsMDkNoZWVzZSBTZWN0aW9uMRkw -FwYDVQQLDBBDaGVlc2UgU2VjdGlvbiAyMRcwFQYDVQQDDA5TaW1wbGUgUm9vdCBD -QTEZMBcGA1UEAwwQU2ltcGxlIFJvb3QgQ0EgMjELMAkGA1UEBhMCRlIxCzAJBgNV -BAYTAlVTMREwDwYDVQQHDAhUT1VMT1VTRTENMAsGA1UEBwwETFlPTjETMBEGA1UE -CAwKUm9vdCBTdGF0ZTEVMBMGA1UECAwMUm9vdCBTdGF0ZSAyMR8wHQYJKoZIhvcN -AQkBFhByb290QHNpZ25pbmcuY29tMSAwHgYJKoZIhvcNAQkBFhFyb290MkBzaWdu -aW5nLmNvbTAeFw0xODEyMDYxMTEwMDlaFw0yODEyMDUxMTEwMDlaMIIBhDETMBEG -CgmSJomT8ixkARkWA29yZzEWMBQGCgmSJomT8ixkARkWBmNoZWVzZTEPMA0GA1UE -CgwGQ2hlZXNlMREwDwYDVQQKDAhDaGVlc2UgMjEfMB0GA1UECwwWU2ltcGxlIFNp -Z25pbmcgU2VjdGlvbjEhMB8GA1UECwwYU2ltcGxlIFNpZ25pbmcgU2VjdGlvbiAy -MRowGAYDVQQDDBFTaW1wbGUgU2lnbmluZyBDQTEcMBoGA1UEAwwTU2ltcGxlIFNp -Z25pbmcgQ0EgMjELMAkGA1UEBhMCRlIxCzAJBgNVBAYTAlVTMREwDwYDVQQHDAhU -T1VMT1VTRTENMAsGA1UEBwwETFlPTjEWMBQGA1UECAwNU2lnbmluZyBTdGF0ZTEY -MBYGA1UECAwPU2lnbmluZyBTdGF0ZSAyMSEwHwYJKoZIhvcNAQkBFhJzaW1wbGVA -c2lnbmluZy5jb20xIjAgBgkqhkiG9w0BCQEWE3NpbXBsZTJAc2lnbmluZy5jb20w -ggEiMA0GCSqGSIb3DQEBAQUAA4IBDwAwggEKAoIBAQDDnZ9hFVc/eMznXSDiPi55 -SsM6DCZAGNuHCIXC96+HExr/Z4q3K1inzIndd/9eJ2URgIKPr6CvJYbsok8gDhQV -FhLXdFrDmb07gchjb/yQFIbSOe6Hsv9tpWnaq1o6l80jN2pLumPNoanmeao3uNGQ -ySS16HD8Fa05lyhzR2b2InlasAODivHKrotQHsj6DZ92LgDCDnW8R1q22AXtWrxt -UFA2a6urafabG2x+qJ+yMzo8jG1eg84Xgp4QUaY57JhOULexqousu6FgG+oxO7gK -6mNBebXs7hnphY7zbZOA2phYokCTpVPrHSS2ZgfsWBBj5/puGGB0dhU5PPSVlX7f -AgMBAAGjZjBkMA4GA1UdDwEB/wQEAwIBBjASBgNVHRMBAf8ECDAGAQH/AgEAMB0G -A1UdDgQWBBQeUqLoVNU369WoHeTCBB034vdwAzAfBgNVHSMEGDAWgBQ2cDWq8PaT -soZdMnP5QVo/O8i8izANBgkqhkiG9w0BAQUFAAOCAQEAdvMWISdtoi7oGEmqVB74 -Owf6ZVDYH6LPZGwV4A/IRrLXuA7NBTsG+93GLwGuvWnTu1VHqfblur5LRfsuPDPg -V9Q+jj4R8grxfQarBC6ldiDC26RoWjkAYiodwhKxkGaMNqj9g9Eb2iOnHVvmm0DE -eCXHt2t1Nc+7N0pP/H4yH4zPEtLJyJnZSlUKHqzetMt8v8T7YCyo9+djXLAcYq8B -PP5NPAsYN0wl/NCy9rLxw/QPU9Yetfq82K3dHPVFn6/+CgF5kprYcds38x69+8ce -Cg+XKmHzexmTnKaKac2w9ZECpRsQ9IBdQq9OghIwPtOnERTOUJEEgNcqA+9xELjb -pQ== ------END CERTIFICATE----- -` - minimalCheeseCrt = `-----BEGIN CERTIFICATE----- -MIIEQDCCAygCFFRY0OBk/L5Se0IZRj3CMljawL2UMA0GCSqGSIb3DQEBCwUAMIIB -hDETMBEGCgmSJomT8ixkARkWA29yZzEWMBQGCgmSJomT8ixkARkWBmNoZWVzZTEP -MA0GA1UECgwGQ2hlZXNlMREwDwYDVQQKDAhDaGVlc2UgMjEfMB0GA1UECwwWU2lt -cGxlIFNpZ25pbmcgU2VjdGlvbjEhMB8GA1UECwwYU2ltcGxlIFNpZ25pbmcgU2Vj -dGlvbiAyMRowGAYDVQQDDBFTaW1wbGUgU2lnbmluZyBDQTEcMBoGA1UEAwwTU2lt -cGxlIFNpZ25pbmcgQ0EgMjELMAkGA1UEBhMCRlIxCzAJBgNVBAYTAlVTMREwDwYD -VQQHDAhUT1VMT1VTRTENMAsGA1UEBwwETFlPTjEWMBQGA1UECAwNU2lnbmluZyBT -dGF0ZTEYMBYGA1UECAwPU2lnbmluZyBTdGF0ZSAyMSEwHwYJKoZIhvcNAQkBFhJz -aW1wbGVAc2lnbmluZy5jb20xIjAgBgkqhkiG9w0BCQEWE3NpbXBsZTJAc2lnbmlu -Zy5jb20wHhcNMTgxMjA2MTExMDM2WhcNMjEwOTI1MTExMDM2WjAzMQswCQYDVQQG -EwJGUjETMBEGA1UECAwKU29tZS1TdGF0ZTEPMA0GA1UECgwGQ2hlZXNlMIIBIjAN -BgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEAskX/bUtwFo1gF2BTPNaNcTUMaRFu -FMZozK8IgLjccZ4kZ0R9oFO6Yp8Zl/IvPaf7tE26PI7XP7eHriUdhnQzX7iioDd0 -RZa68waIhAGc+xPzRFrP3b3yj3S2a9Rve3c0K+SCV+EtKAwsxMqQDhoo9PcBfo5B -RHfht07uD5MncUcGirwN+/pxHV5xzAGPcc7On0/5L7bq/G+63nhu78zw9XyuLaHC -PM5VbOUvpyIESJHbMMzTdFGL8ob9VKO+Kr1kVGdEA9i8FLGl3xz/GBKuW/JD0xyW -DrU29mri5vYWHmkuv7ZWHGXnpXjTtPHwveE9/0/ArnmpMyR9JtqFr1oEvQIDAQAB -MA0GCSqGSIb3DQEBCwUAA4IBAQBHta+NWXI08UHeOkGzOTGRiWXsOH2dqdX6gTe9 -xF1AIjyoQ0gvpoGVvlnChSzmlUj+vnx/nOYGIt1poE3hZA3ZHZD/awsvGyp3GwWD -IfXrEViSCIyF+8tNNKYyUcEO3xdAsAUGgfUwwF/mZ6MBV5+A/ZEEILlTq8zFt9dV -vdKzIt7fZYxYBBHFSarl1x8pDgWXlf3hAufevGJXip9xGYmznF0T5cq1RbWJ4be3 -/9K7yuWhuBYC3sbTbCneHBa91M82za+PIISc1ygCYtWSBoZKSAqLk0rkZpHaekDP -WqeUSNGYV//RunTeuRDAf5OxehERb1srzBXhRZ3cZdzXbgR/ ------END CERTIFICATE----- -` - - completeCheeseCrt = `Certificate: - Data: - Version: 3 (0x2) - Serial Number: 1 (0x1) - Signature Algorithm: sha1WithRSAEncryption - Issuer: DC=org, DC=cheese, O=Cheese, O=Cheese 2, OU=Simple Signing Section, OU=Simple Signing Section 2, CN=Simple Signing CA, CN=Simple Signing CA 2, C=FR, C=US, L=TOULOUSE, L=LYON, ST=Signing State, ST=Signing State 2/emailAddress=simple@signing.com/emailAddress=simple2@signing.com - Validity - Not Before: Dec 6 11:10:16 2018 GMT - Not After : Dec 5 11:10:16 2020 GMT - Subject: DC=org, DC=cheese, O=Cheese, O=Cheese 2, OU=Simple Signing Section, OU=Simple Signing Section 2, CN=*.cheese.org, CN=*.cheese.com, C=FR, C=US, L=TOULOUSE, L=LYON, ST=Cheese org state, ST=Cheese com state/emailAddress=cert@cheese.org/emailAddress=cert@scheese.com - Subject Public Key Info: - Public Key Algorithm: rsaEncryption - RSA Public-Key: (2048 bit) - Modulus: - 00:de:77:fa:8d:03:70:30:39:dd:51:1b:cc:60:db: - a9:5a:13:b1:af:fe:2c:c6:38:9b:88:0a:0f:8e:d9: - 1b:a1:1d:af:0d:66:e4:13:5b:bc:5d:36:92:d7:5e: - d0:fa:88:29:d3:78:e1:81:de:98:b2:a9:22:3f:bf: - 8a:af:12:92:63:d4:a9:c3:f2:e4:7e:d2:dc:a2:c5: - 39:1c:7a:eb:d7:12:70:63:2e:41:47:e0:f0:08:e8: - dc:be:09:01:ec:28:09:af:35:d7:79:9c:50:35:d1: - 6b:e5:87:7b:34:f6:d2:31:65:1d:18:42:69:6c:04: - 11:83:fe:44:ae:90:92:2d:0b:75:39:57:62:e6:17: - 2f:47:2b:c7:53:dd:10:2d:c9:e3:06:13:d2:b9:ba: - 63:2e:3c:7d:83:6b:d6:89:c9:cc:9d:4d:bf:9f:e8: - a3:7b:da:c8:99:2b:ba:66:d6:8e:f8:41:41:a0:c9: - d0:5e:c8:11:a4:55:4a:93:83:87:63:04:63:41:9c: - fb:68:04:67:c2:71:2f:f2:65:1d:02:5d:15:db:2c: - d9:04:69:85:c2:7d:0d:ea:3b:ac:85:f8:d4:8f:0f: - c5:70:b2:45:e1:ec:b2:54:0b:e9:f7:82:b4:9b:1b: - 2d:b9:25:d4:ab:ca:8f:5b:44:3e:15:dd:b8:7f:b7: - ee:f9 - Exponent: 65537 (0x10001) - X509v3 extensions: - X509v3 Key Usage: critical - Digital Signature, Key Encipherment - X509v3 Basic Constraints: - CA:FALSE - X509v3 Extended Key Usage: - TLS Web Server Authentication, TLS Web Client Authentication - X509v3 Subject Key Identifier: - 94:BA:73:78:A2:87:FB:58:28:28:CF:98:3B:C2:45:70:16:6E:29:2F - X509v3 Authority Key Identifier: - keyid:1E:52:A2:E8:54:D5:37:EB:D5:A8:1D:E4:C2:04:1D:37:E2:F7:70:03 - - X509v3 Subject Alternative Name: - DNS:*.cheese.org, DNS:*.cheese.net, DNS:*.cheese.com, IP Address:10.0.1.0, IP Address:10.0.1.2, email:test@cheese.org, email:test@cheese.net - Signature Algorithm: sha1WithRSAEncryption - 76:6b:05:b0:0e:34:11:b1:83:99:91:dc:ae:1b:e2:08:15:8b: - 16:b2:9b:27:1c:02:ac:b5:df:1b:d0:d0:75:a4:2b:2c:5c:65: - ed:99:ab:f7:cd:fe:38:3f:c3:9a:22:31:1b:ac:8c:1c:c2:f9: - 5d:d4:75:7a:2e:72:c7:85:a9:04:af:9f:2a:cc:d3:96:75:f0: - 8e:c7:c6:76:48:ac:45:a4:b9:02:1e:2f:c0:15:c4:07:08:92: - cb:27:50:67:a1:c8:05:c5:3a:b3:a6:48:be:eb:d5:59:ab:a2: - 1b:95:30:71:13:5b:0a:9a:73:3b:60:cc:10:d0:6a:c7:e5:d7: - 8b:2f:f9:2e:98:f2:ff:81:14:24:09:e3:4b:55:57:09:1a:22: - 74:f1:f6:40:13:31:43:89:71:0a:96:1a:05:82:1f:83:3a:87: - 9b:17:25:ef:5a:55:f2:2d:cd:0d:4d:e4:81:58:b6:e3:8d:09: - 62:9a:0c:bd:e4:e5:5c:f0:95:da:cb:c7:34:2c:34:5f:6d:fc: - 60:7b:12:5b:86:fd:df:21:89:3b:48:08:30:bf:67:ff:8c:e6: - 9b:53:cc:87:36:47:70:40:3b:d9:90:2a:d2:d2:82:c6:9c:f5: - d1:d8:e0:e6:fd:aa:2f:95:7e:39:ac:fc:4e:d4:ce:65:b3:ec: - c6:98:8a:31 ------BEGIN CERTIFICATE----- -MIIGWjCCBUKgAwIBAgIBATANBgkqhkiG9w0BAQUFADCCAYQxEzARBgoJkiaJk/Is -ZAEZFgNvcmcxFjAUBgoJkiaJk/IsZAEZFgZjaGVlc2UxDzANBgNVBAoMBkNoZWVz -ZTERMA8GA1UECgwIQ2hlZXNlIDIxHzAdBgNVBAsMFlNpbXBsZSBTaWduaW5nIFNl -Y3Rpb24xITAfBgNVBAsMGFNpbXBsZSBTaWduaW5nIFNlY3Rpb24gMjEaMBgGA1UE -AwwRU2ltcGxlIFNpZ25pbmcgQ0ExHDAaBgNVBAMME1NpbXBsZSBTaWduaW5nIENB -IDIxCzAJBgNVBAYTAkZSMQswCQYDVQQGEwJVUzERMA8GA1UEBwwIVE9VTE9VU0Ux -DTALBgNVBAcMBExZT04xFjAUBgNVBAgMDVNpZ25pbmcgU3RhdGUxGDAWBgNVBAgM -D1NpZ25pbmcgU3RhdGUgMjEhMB8GCSqGSIb3DQEJARYSc2ltcGxlQHNpZ25pbmcu -Y29tMSIwIAYJKoZIhvcNAQkBFhNzaW1wbGUyQHNpZ25pbmcuY29tMB4XDTE4MTIw -NjExMTAxNloXDTIwMTIwNTExMTAxNlowggF2MRMwEQYKCZImiZPyLGQBGRYDb3Jn -MRYwFAYKCZImiZPyLGQBGRYGY2hlZXNlMQ8wDQYDVQQKDAZDaGVlc2UxETAPBgNV -BAoMCENoZWVzZSAyMR8wHQYDVQQLDBZTaW1wbGUgU2lnbmluZyBTZWN0aW9uMSEw -HwYDVQQLDBhTaW1wbGUgU2lnbmluZyBTZWN0aW9uIDIxFTATBgNVBAMMDCouY2hl -ZXNlLm9yZzEVMBMGA1UEAwwMKi5jaGVlc2UuY29tMQswCQYDVQQGEwJGUjELMAkG -A1UEBhMCVVMxETAPBgNVBAcMCFRPVUxPVVNFMQ0wCwYDVQQHDARMWU9OMRkwFwYD -VQQIDBBDaGVlc2Ugb3JnIHN0YXRlMRkwFwYDVQQIDBBDaGVlc2UgY29tIHN0YXRl -MR4wHAYJKoZIhvcNAQkBFg9jZXJ0QGNoZWVzZS5vcmcxHzAdBgkqhkiG9w0BCQEW -EGNlcnRAc2NoZWVzZS5jb20wggEiMA0GCSqGSIb3DQEBAQUAA4IBDwAwggEKAoIB -AQDed/qNA3AwOd1RG8xg26laE7Gv/izGOJuICg+O2RuhHa8NZuQTW7xdNpLXXtD6 -iCnTeOGB3piyqSI/v4qvEpJj1KnD8uR+0tyixTkceuvXEnBjLkFH4PAI6Ny+CQHs -KAmvNdd5nFA10Wvlh3s09tIxZR0YQmlsBBGD/kSukJItC3U5V2LmFy9HK8dT3RAt -yeMGE9K5umMuPH2Da9aJycydTb+f6KN72siZK7pm1o74QUGgydBeyBGkVUqTg4dj -BGNBnPtoBGfCcS/yZR0CXRXbLNkEaYXCfQ3qO6yF+NSPD8VwskXh7LJUC+n3grSb -Gy25JdSryo9bRD4V3bh/t+75AgMBAAGjgeAwgd0wDgYDVR0PAQH/BAQDAgWgMAkG -A1UdEwQCMAAwHQYDVR0lBBYwFAYIKwYBBQUHAwEGCCsGAQUFBwMCMB0GA1UdDgQW -BBSUunN4oof7WCgoz5g7wkVwFm4pLzAfBgNVHSMEGDAWgBQeUqLoVNU369WoHeTC -BB034vdwAzBhBgNVHREEWjBYggwqLmNoZWVzZS5vcmeCDCouY2hlZXNlLm5ldIIM -Ki5jaGVlc2UuY29thwQKAAEAhwQKAAECgQ90ZXN0QGNoZWVzZS5vcmeBD3Rlc3RA -Y2hlZXNlLm5ldDANBgkqhkiG9w0BAQUFAAOCAQEAdmsFsA40EbGDmZHcrhviCBWL -FrKbJxwCrLXfG9DQdaQrLFxl7Zmr983+OD/DmiIxG6yMHML5XdR1ei5yx4WpBK+f -KszTlnXwjsfGdkisRaS5Ah4vwBXEBwiSyydQZ6HIBcU6s6ZIvuvVWauiG5UwcRNb -CppzO2DMENBqx+XXiy/5Lpjy/4EUJAnjS1VXCRoidPH2QBMxQ4lxCpYaBYIfgzqH -mxcl71pV8i3NDU3kgVi2440JYpoMveTlXPCV2svHNCw0X238YHsSW4b93yGJO0gI -ML9n/4zmm1PMhzZHcEA72ZAq0tKCxpz10djg5v2qL5V+Oaz8TtTOZbPsxpiKMQ== ------END CERTIFICATE----- -` - - minimalCert = `-----BEGIN CERTIFICATE----- -MIIDGTCCAgECCQCqLd75YLi2kDANBgkqhkiG9w0BAQsFADBYMQswCQYDVQQGEwJG -UjETMBEGA1UECAwKU29tZS1TdGF0ZTERMA8GA1UEBwwIVG91bG91c2UxITAfBgNV -BAoMGEludGVybmV0IFdpZGdpdHMgUHR5IEx0ZDAeFw0xODA3MTgwODI4MTZaFw0x -ODA4MTcwODI4MTZaMEUxCzAJBgNVBAYTAkZSMRMwEQYDVQQIDApTb21lLVN0YXRl -MSEwHwYDVQQKDBhJbnRlcm5ldCBXaWRnaXRzIFB0eSBMdGQwggEiMA0GCSqGSIb3 -DQEBAQUAA4IBDwAwggEKAoIBAQC/+frDMMTLQyXG34F68BPhQq0kzK4LIq9Y0/gl -FjySZNn1C0QDWA1ubVCAcA6yY204I9cxcQDPNrhC7JlS5QA8Y5rhIBrqQlzZizAi -Rj3NTrRjtGUtOScnHuJaWjLy03DWD+aMwb7q718xt5SEABmmUvLwQK+EjW2MeDwj -y8/UEIpvrRDmdhGaqv7IFpIDkcIF7FowJ/hwDvx3PMc+z/JWK0ovzpvgbx69AVbw -ZxCimeha65rOqVi+lEetD26le+WnOdYsdJ2IkmpPNTXGdfb15xuAc+gFXfMCh7Iw -3Ynl6dZtZM/Ok2kiA7/OsmVnRKkWrtBfGYkI9HcNGb3zrk6nAgMBAAEwDQYJKoZI -hvcNAQELBQADggEBAC/R+Yvhh1VUhcbK49olWsk/JKqfS3VIDQYZg1Eo+JCPbwgS -I1BSYVfMcGzuJTX6ua3m/AHzGF3Tap4GhF4tX12jeIx4R4utnjj7/YKkTvuEM2f4 -xT56YqI7zalGScIB0iMeyNz1QcimRl+M/49au8ow9hNX8C2tcA2cwd/9OIj/6T8q -SBRHc6ojvbqZSJCO0jziGDT1L3D+EDgTjED4nd77v/NRdP+egb0q3P0s4dnQ/5AV -aQlQADUn61j3ScbGJ4NSeZFFvsl38jeRi/MEzp0bGgNBcPj6JHi7qbbauZcZfQ05 -jECvgAY7Nfd9mZ1KtyNaW31is+kag7NsvjxU/kM= ------END CERTIFICATE-----` -) - -func getCleanCertContents(certContents []string) string { - var re = regexp.MustCompile("-----BEGIN CERTIFICATE-----(?s)(.*)") - - var cleanedCertContent []string - for _, certContent := range certContents { - cert := re.FindString(string(certContent)) - cleanedCertContent = append(cleanedCertContent, sanitize([]byte(cert))) - } - - return strings.Join(cleanedCertContent, ",") -} - -func getCertificate(certContent string) *x509.Certificate { - roots := x509.NewCertPool() - ok := roots.AppendCertsFromPEM([]byte(signingCA)) - if !ok { - panic("failed to parse root certificate") - } - - block, _ := pem.Decode([]byte(certContent)) - if block == nil { - panic("failed to parse certificate PEM") - } - cert, err := x509.ParseCertificate(block.Bytes) - if err != nil { - panic("failed to parse certificate: " + err.Error()) - } - - return cert -} - -func buildTLSWith(certContents []string) *tls.ConnectionState { - var peerCertificates []*x509.Certificate - - for _, certContent := range certContents { - peerCertificates = append(peerCertificates, getCertificate(certContent)) - } - - return &tls.ConnectionState{PeerCertificates: peerCertificates} -} - -var myPassTLSClientCustomHandler = http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { - w.Write([]byte("bar")) -}) - -func getExpectedSanitized(s string) string { - return url.QueryEscape(strings.Replace(s, "\n", "", -1)) -} - -func TestSanitize(t *testing.T) { - testCases := []struct { - desc string - toSanitize []byte - expected string - }{ - { - desc: "Empty", - }, - { - desc: "With a minimal cert", - toSanitize: []byte(minimalCheeseCrt), - expected: getExpectedSanitized(`MIIEQDCCAygCFFRY0OBk/L5Se0IZRj3CMljawL2UMA0GCSqGSIb3DQEBCwUAMIIB -hDETMBEGCgmSJomT8ixkARkWA29yZzEWMBQGCgmSJomT8ixkARkWBmNoZWVzZTEP -MA0GA1UECgwGQ2hlZXNlMREwDwYDVQQKDAhDaGVlc2UgMjEfMB0GA1UECwwWU2lt -cGxlIFNpZ25pbmcgU2VjdGlvbjEhMB8GA1UECwwYU2ltcGxlIFNpZ25pbmcgU2Vj -dGlvbiAyMRowGAYDVQQDDBFTaW1wbGUgU2lnbmluZyBDQTEcMBoGA1UEAwwTU2lt -cGxlIFNpZ25pbmcgQ0EgMjELMAkGA1UEBhMCRlIxCzAJBgNVBAYTAlVTMREwDwYD -VQQHDAhUT1VMT1VTRTENMAsGA1UEBwwETFlPTjEWMBQGA1UECAwNU2lnbmluZyBT -dGF0ZTEYMBYGA1UECAwPU2lnbmluZyBTdGF0ZSAyMSEwHwYJKoZIhvcNAQkBFhJz -aW1wbGVAc2lnbmluZy5jb20xIjAgBgkqhkiG9w0BCQEWE3NpbXBsZTJAc2lnbmlu -Zy5jb20wHhcNMTgxMjA2MTExMDM2WhcNMjEwOTI1MTExMDM2WjAzMQswCQYDVQQG -EwJGUjETMBEGA1UECAwKU29tZS1TdGF0ZTEPMA0GA1UECgwGQ2hlZXNlMIIBIjAN -BgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEAskX/bUtwFo1gF2BTPNaNcTUMaRFu -FMZozK8IgLjccZ4kZ0R9oFO6Yp8Zl/IvPaf7tE26PI7XP7eHriUdhnQzX7iioDd0 -RZa68waIhAGc+xPzRFrP3b3yj3S2a9Rve3c0K+SCV+EtKAwsxMqQDhoo9PcBfo5B -RHfht07uD5MncUcGirwN+/pxHV5xzAGPcc7On0/5L7bq/G+63nhu78zw9XyuLaHC -PM5VbOUvpyIESJHbMMzTdFGL8ob9VKO+Kr1kVGdEA9i8FLGl3xz/GBKuW/JD0xyW -DrU29mri5vYWHmkuv7ZWHGXnpXjTtPHwveE9/0/ArnmpMyR9JtqFr1oEvQIDAQAB -MA0GCSqGSIb3DQEBCwUAA4IBAQBHta+NWXI08UHeOkGzOTGRiWXsOH2dqdX6gTe9 -xF1AIjyoQ0gvpoGVvlnChSzmlUj+vnx/nOYGIt1poE3hZA3ZHZD/awsvGyp3GwWD -IfXrEViSCIyF+8tNNKYyUcEO3xdAsAUGgfUwwF/mZ6MBV5+A/ZEEILlTq8zFt9dV -vdKzIt7fZYxYBBHFSarl1x8pDgWXlf3hAufevGJXip9xGYmznF0T5cq1RbWJ4be3 -/9K7yuWhuBYC3sbTbCneHBa91M82za+PIISc1ygCYtWSBoZKSAqLk0rkZpHaekDP -WqeUSNGYV//RunTeuRDAf5OxehERb1srzBXhRZ3cZdzXbgR/`), - }, - } - - for _, test := range testCases { - test := test - t.Run(test.desc, func(t *testing.T) { - t.Parallel() - - assert.Equal(t, test.expected, sanitize(test.toSanitize), "The sanitized certificates should be equal") - }) - } - -} - -func TestTlsClientheadersWithPEM(t *testing.T) { - testCases := []struct { - desc string - certContents []string // set the request TLS attribute if defined - tlsClientCertHeaders *types.TLSClientHeaders - expectedHeader string - }{ - { - desc: "No TLS, no option", - }, - { - desc: "TLS, no option", - certContents: []string{minimalCheeseCrt}, - }, - { - desc: "No TLS, with pem option true", - tlsClientCertHeaders: &types.TLSClientHeaders{PEM: true}, - }, - { - desc: "TLS with simple certificate, with pem option true", - certContents: []string{minimalCheeseCrt}, - tlsClientCertHeaders: &types.TLSClientHeaders{PEM: true}, - expectedHeader: getCleanCertContents([]string{minimalCheeseCrt}), - }, - { - desc: "TLS with complete certificate, with pem option true", - certContents: []string{completeCheeseCrt}, - tlsClientCertHeaders: &types.TLSClientHeaders{PEM: true}, - expectedHeader: getCleanCertContents([]string{completeCheeseCrt}), - }, - { - desc: "TLS with two certificate, with pem option true", - certContents: []string{minimalCheeseCrt, completeCheeseCrt}, - tlsClientCertHeaders: &types.TLSClientHeaders{PEM: true}, - expectedHeader: getCleanCertContents([]string{minimalCheeseCrt, completeCheeseCrt}), - }, - } - - for _, test := range testCases { - tlsClientHeaders := NewTLSClientHeaders(&types.Frontend{PassTLSClientCert: test.tlsClientCertHeaders}) - - res := httptest.NewRecorder() - req := testhelpers.MustNewRequest(http.MethodGet, "http://example.com/foo", nil) - - if test.certContents != nil && len(test.certContents) > 0 { - req.TLS = buildTLSWith(test.certContents) - } - - tlsClientHeaders.ServeHTTP(res, req, myPassTLSClientCustomHandler) - - test := test - t.Run(test.desc, func(t *testing.T) { - t.Parallel() - - require.Equal(t, http.StatusOK, res.Code, "Http Status should be OK") - require.Equal(t, "bar", res.Body.String(), "Should be the expected body") - - if test.expectedHeader != "" { - assert.Equal(t, test.expectedHeader, req.Header.Get(xForwardedTLSClientCert), "The request header should contain the cleaned certificate") - } else { - assert.Empty(t, req.Header.Get(xForwardedTLSClientCert)) - } - assert.Empty(t, res.Header().Get(xForwardedTLSClientCert), "The response header should be always empty") - }) - } - -} - -func TestGetSans(t *testing.T) { - urlFoo, err := url.Parse("my.foo.com") - require.NoError(t, err) - urlBar, err := url.Parse("my.bar.com") - require.NoError(t, err) - - testCases := []struct { - desc string - cert *x509.Certificate // set the request TLS attribute if defined - expected []string - }{ - { - desc: "With nil", - }, - { - desc: "Certificate without Sans", - cert: &x509.Certificate{}, - }, - { - desc: "Certificate with all Sans", - cert: &x509.Certificate{ - DNSNames: []string{"foo", "bar"}, - EmailAddresses: []string{"test@test.com", "test2@test.com"}, - IPAddresses: []net.IP{net.IPv4(10, 0, 0, 1), net.IPv4(10, 0, 0, 2)}, - URIs: []*url.URL{urlFoo, urlBar}, - }, - expected: []string{"foo", "bar", "test@test.com", "test2@test.com", "10.0.0.1", "10.0.0.2", urlFoo.String(), urlBar.String()}, - }, - } - - for _, test := range testCases { - sans := getSANs(test.cert) - test := test - - t.Run(test.desc, func(t *testing.T) { - t.Parallel() - - if len(test.expected) > 0 { - for i, expected := range test.expected { - assert.Equal(t, expected, sans[i]) - } - } else { - assert.Empty(t, sans) - } - }) - } - -} - -func TestTlsClientheadersWithCertInfos(t *testing.T) { - minimalCheeseCertAllInfos := `Subject="C=FR,ST=Some-State,O=Cheese",Issuer="DC=org,DC=cheese,C=FR,C=US,ST=Signing State,ST=Signing State 2,L=TOULOUSE,L=LYON,O=Cheese,O=Cheese 2,CN=Simple Signing CA 2",NB=1544094636,NA=1632568236,SAN=` - completeCertAllInfos := `Subject="DC=org,DC=cheese,C=FR,C=US,ST=Cheese org state,ST=Cheese com state,L=TOULOUSE,L=LYON,O=Cheese,O=Cheese 2,CN=*.cheese.com",Issuer="DC=org,DC=cheese,C=FR,C=US,ST=Signing State,ST=Signing State 2,L=TOULOUSE,L=LYON,O=Cheese,O=Cheese 2,CN=Simple Signing CA 2",NB=1544094616,NA=1607166616,SAN=*.cheese.org,*.cheese.net,*.cheese.com,test@cheese.org,test@cheese.net,10.0.1.0,10.0.1.2` - - testCases := []struct { - desc string - certContents []string // set the request TLS attribute if defined - tlsClientCertHeaders *types.TLSClientHeaders - expectedHeader string - }{ - { - desc: "No TLS, no option", - }, - { - desc: "TLS, no option", - certContents: []string{minimalCert}, - }, - { - desc: "No TLS, with pem option true", - tlsClientCertHeaders: &types.TLSClientHeaders{ - Infos: &types.TLSClientCertificateInfos{ - Subject: &types.TLSCLientCertificateDNInfos{ - CommonName: true, - Organization: true, - Locality: true, - Province: true, - Country: true, - SerialNumber: true, - }, - }, - }, - }, - { - desc: "No TLS, with pem option true with no flag", - tlsClientCertHeaders: &types.TLSClientHeaders{ - PEM: false, - Infos: &types.TLSClientCertificateInfos{ - Subject: &types.TLSCLientCertificateDNInfos{}, - }, - }, - }, - { - desc: "TLS with simple certificate, with all infos", - certContents: []string{minimalCheeseCrt}, - tlsClientCertHeaders: &types.TLSClientHeaders{ - Infos: &types.TLSClientCertificateInfos{ - NotAfter: true, - NotBefore: true, - Subject: &types.TLSCLientCertificateDNInfos{ - CommonName: true, - Country: true, - DomainComponent: true, - Locality: true, - Organization: true, - Province: true, - SerialNumber: true, - }, - Issuer: &types.TLSCLientCertificateDNInfos{ - CommonName: true, - Country: true, - DomainComponent: true, - Locality: true, - Organization: true, - Province: true, - SerialNumber: true, - }, - Sans: true, - }, - }, - expectedHeader: url.QueryEscape(minimalCheeseCertAllInfos), - }, - { - desc: "TLS with simple certificate, with some infos", - certContents: []string{minimalCheeseCrt}, - tlsClientCertHeaders: &types.TLSClientHeaders{ - Infos: &types.TLSClientCertificateInfos{ - NotAfter: true, - Subject: &types.TLSCLientCertificateDNInfos{ - Organization: true, - }, - Issuer: &types.TLSCLientCertificateDNInfos{ - Country: true, - }, - Sans: true, - }, - }, - expectedHeader: url.QueryEscape(`Subject="O=Cheese",Issuer="C=FR,C=US",NA=1632568236,SAN=`), - }, - { - desc: "TLS with complete certificate, with all infos", - certContents: []string{completeCheeseCrt}, - tlsClientCertHeaders: &types.TLSClientHeaders{ - Infos: &types.TLSClientCertificateInfos{ - NotAfter: true, - NotBefore: true, - Subject: &types.TLSCLientCertificateDNInfos{ - CommonName: true, - Country: true, - DomainComponent: true, - Locality: true, - Organization: true, - Province: true, - SerialNumber: true, - }, - Issuer: &types.TLSCLientCertificateDNInfos{ - CommonName: true, - Country: true, - DomainComponent: true, - Locality: true, - Organization: true, - Province: true, - SerialNumber: true, - }, - Sans: true, - }, - }, - expectedHeader: url.QueryEscape(completeCertAllInfos), - }, - { - desc: "TLS with 2 certificates, with all infos", - certContents: []string{minimalCheeseCrt, completeCheeseCrt}, - tlsClientCertHeaders: &types.TLSClientHeaders{ - Infos: &types.TLSClientCertificateInfos{ - NotAfter: true, - NotBefore: true, - Subject: &types.TLSCLientCertificateDNInfos{ - CommonName: true, - Country: true, - DomainComponent: true, - Locality: true, - Organization: true, - Province: true, - SerialNumber: true, - }, - Issuer: &types.TLSCLientCertificateDNInfos{ - CommonName: true, - Country: true, - DomainComponent: true, - Locality: true, - Organization: true, - Province: true, - SerialNumber: true, - }, - Sans: true, - }, - }, - expectedHeader: url.QueryEscape(strings.Join([]string{minimalCheeseCertAllInfos, completeCertAllInfos}, ";")), - }, - } - for _, test := range testCases { - tlsClientHeaders := NewTLSClientHeaders(&types.Frontend{PassTLSClientCert: test.tlsClientCertHeaders}) - - res := httptest.NewRecorder() - req := testhelpers.MustNewRequest(http.MethodGet, "http://example.com/foo", nil) - - if test.certContents != nil && len(test.certContents) > 0 { - req.TLS = buildTLSWith(test.certContents) - } - - tlsClientHeaders.ServeHTTP(res, req, myPassTLSClientCustomHandler) - - test := test - t.Run(test.desc, func(t *testing.T) { - t.Parallel() - - require.Equal(t, http.StatusOK, res.Code, "Http Status should be OK") - require.Equal(t, "bar", res.Body.String(), "Should be the expected body") - - if test.expectedHeader != "" { - expected, err := url.QueryUnescape(test.expectedHeader) - require.NoError(t, err) - - actual, err2 := url.QueryUnescape(req.Header.Get(xForwardedTLSClientCertInfos)) - require.NoError(t, err2) - - require.Equal(t, expected, actual, "The request header should contain the cleaned certificate") - } else { - require.Empty(t, req.Header.Get(xForwardedTLSClientCertInfos)) - } - require.Empty(t, res.Header().Get(xForwardedTLSClientCertInfos), "The response header should be always empty") - }) - } - -} - -func TestNewTLSClientHeadersFromStruct(t *testing.T) { - testCases := []struct { - desc string - frontend *types.Frontend - expected *TLSClientHeaders - }{ - { - desc: "Without frontend", - }, - { - desc: "frontend without the option", - frontend: &types.Frontend{}, - expected: &TLSClientHeaders{}, - }, - { - desc: "frontend with the pem set false", - frontend: &types.Frontend{ - PassTLSClientCert: &types.TLSClientHeaders{ - PEM: false, - }, - }, - expected: &TLSClientHeaders{PEM: false}, - }, - { - desc: "frontend with the pem set true", - frontend: &types.Frontend{ - PassTLSClientCert: &types.TLSClientHeaders{ - PEM: true, - }, - }, - expected: &TLSClientHeaders{PEM: true}, - }, - { - desc: "frontend with the Infos with no flag", - frontend: &types.Frontend{ - PassTLSClientCert: &types.TLSClientHeaders{ - Infos: &types.TLSClientCertificateInfos{ - NotAfter: false, - NotBefore: false, - Sans: false, - }, - }, - }, - expected: &TLSClientHeaders{ - PEM: false, - Infos: &TLSClientCertificateInfos{}, - }, - }, - { - desc: "frontend with the Infos basic", - frontend: &types.Frontend{ - PassTLSClientCert: &types.TLSClientHeaders{ - Infos: &types.TLSClientCertificateInfos{ - NotAfter: true, - NotBefore: true, - Sans: true, - }, - }, - }, - expected: &TLSClientHeaders{ - PEM: false, - Infos: &TLSClientCertificateInfos{ - NotBefore: true, - NotAfter: true, - Sans: true, - }, - }, - }, - { - desc: "frontend with the Infos NotAfter", - frontend: &types.Frontend{ - PassTLSClientCert: &types.TLSClientHeaders{ - Infos: &types.TLSClientCertificateInfos{ - NotAfter: true, - }, - }, - }, - expected: &TLSClientHeaders{ - PEM: false, - Infos: &TLSClientCertificateInfos{ - NotAfter: true, - }, - }, - }, - { - desc: "frontend with the Infos NotBefore", - frontend: &types.Frontend{ - PassTLSClientCert: &types.TLSClientHeaders{ - Infos: &types.TLSClientCertificateInfos{ - NotBefore: true, - }, - }, - }, - expected: &TLSClientHeaders{ - PEM: false, - Infos: &TLSClientCertificateInfos{ - NotBefore: true, - }, - }, - }, - { - desc: "frontend with the Infos Sans", - frontend: &types.Frontend{ - PassTLSClientCert: &types.TLSClientHeaders{ - Infos: &types.TLSClientCertificateInfos{ - Sans: true, - }, - }, - }, - expected: &TLSClientHeaders{ - PEM: false, - Infos: &TLSClientCertificateInfos{ - Sans: true, - }, - }, - }, - { - desc: "frontend with the Infos Subject Organization", - frontend: &types.Frontend{ - PassTLSClientCert: &types.TLSClientHeaders{ - Infos: &types.TLSClientCertificateInfos{ - Subject: &types.TLSCLientCertificateDNInfos{ - Organization: true, - }, - }, - }, - }, - expected: &TLSClientHeaders{ - PEM: false, - Infos: &TLSClientCertificateInfos{ - Subject: &DistinguishedNameOptions{ - OrganizationName: true, - }, - }, - }, - }, - { - desc: "frontend with the Infos Subject Country", - frontend: &types.Frontend{ - PassTLSClientCert: &types.TLSClientHeaders{ - Infos: &types.TLSClientCertificateInfos{ - Subject: &types.TLSCLientCertificateDNInfos{ - Country: true, - }, - }, - }, - }, - expected: &TLSClientHeaders{ - PEM: false, - Infos: &TLSClientCertificateInfos{ - Subject: &DistinguishedNameOptions{ - CountryName: true, - }, - }, - }, - }, - { - desc: "frontend with the Infos Subject SerialNumber", - frontend: &types.Frontend{ - PassTLSClientCert: &types.TLSClientHeaders{ - Infos: &types.TLSClientCertificateInfos{ - Subject: &types.TLSCLientCertificateDNInfos{ - SerialNumber: true, - }, - }, - }, - }, - expected: &TLSClientHeaders{ - PEM: false, - Infos: &TLSClientCertificateInfos{ - Subject: &DistinguishedNameOptions{ - SerialNumber: true, - }, - }, - }, - }, - { - desc: "frontend with the Infos Subject Province", - frontend: &types.Frontend{ - PassTLSClientCert: &types.TLSClientHeaders{ - Infos: &types.TLSClientCertificateInfos{ - Subject: &types.TLSCLientCertificateDNInfos{ - Province: true, - }, - }, - }, - }, - expected: &TLSClientHeaders{ - PEM: false, - Infos: &TLSClientCertificateInfos{ - Subject: &DistinguishedNameOptions{ - StateOrProvinceName: true, - }, - }, - }, - }, - { - desc: "frontend with the Infos Subject Locality", - frontend: &types.Frontend{ - PassTLSClientCert: &types.TLSClientHeaders{ - Infos: &types.TLSClientCertificateInfos{ - Subject: &types.TLSCLientCertificateDNInfos{ - Locality: true, - }, - }, - }, - }, - expected: &TLSClientHeaders{ - PEM: false, - Infos: &TLSClientCertificateInfos{ - Subject: &DistinguishedNameOptions{ - LocalityName: true, - }, - }, - }, - }, - { - desc: "frontend with the Infos Subject CommonName", - frontend: &types.Frontend{ - PassTLSClientCert: &types.TLSClientHeaders{ - Infos: &types.TLSClientCertificateInfos{ - Subject: &types.TLSCLientCertificateDNInfos{ - CommonName: true, - }, - }, - }, - }, - expected: &TLSClientHeaders{ - PEM: false, - Infos: &TLSClientCertificateInfos{ - Subject: &DistinguishedNameOptions{ - CommonName: true, - }, - }, - }, - }, - { - desc: "frontend with the Infos Issuer", - frontend: &types.Frontend{ - PassTLSClientCert: &types.TLSClientHeaders{ - Infos: &types.TLSClientCertificateInfos{ - Issuer: &types.TLSCLientCertificateDNInfos{ - CommonName: true, - Country: true, - DomainComponent: true, - Locality: true, - Organization: true, - SerialNumber: true, - Province: true, - }, - }, - }, - }, - expected: &TLSClientHeaders{ - PEM: false, - Infos: &TLSClientCertificateInfos{ - Issuer: &DistinguishedNameOptions{ - CommonName: true, - CountryName: true, - DomainComponent: true, - LocalityName: true, - OrganizationName: true, - SerialNumber: true, - StateOrProvinceName: true, - }, - }, - }, - }, - { - desc: "frontend with the Sans Infos", - frontend: &types.Frontend{ - PassTLSClientCert: &types.TLSClientHeaders{ - Infos: &types.TLSClientCertificateInfos{ - Sans: true, - }, - }, - }, - expected: &TLSClientHeaders{ - PEM: false, - Infos: &TLSClientCertificateInfos{ - Sans: true, - }, - }, - }, - { - desc: "frontend with the Infos all", - frontend: &types.Frontend{ - PassTLSClientCert: &types.TLSClientHeaders{ - Infos: &types.TLSClientCertificateInfos{ - NotAfter: true, - NotBefore: true, - Subject: &types.TLSCLientCertificateDNInfos{ - CommonName: true, - Country: true, - Locality: true, - Organization: true, - Province: true, - SerialNumber: true, - }, - Issuer: &types.TLSCLientCertificateDNInfos{ - Country: true, - DomainComponent: true, - Locality: true, - Organization: true, - SerialNumber: true, - Province: true, - }, - Sans: true, - }, - }, - }, - expected: &TLSClientHeaders{ - PEM: false, - Infos: &TLSClientCertificateInfos{ - NotBefore: true, - NotAfter: true, - Sans: true, - Subject: &DistinguishedNameOptions{ - CountryName: true, - StateOrProvinceName: true, - LocalityName: true, - OrganizationName: true, - CommonName: true, - SerialNumber: true, - }, - Issuer: &DistinguishedNameOptions{ - CountryName: true, - DomainComponent: true, - LocalityName: true, - OrganizationName: true, - SerialNumber: true, - StateOrProvinceName: true, - }, - }}, - }, - } - - for _, test := range testCases { - test := test - t.Run(test.desc, func(t *testing.T) { - t.Parallel() - - require.Equal(t, test.expected, NewTLSClientHeaders(test.frontend)) - }) - } - -} diff --git a/old/middlewares/tracing/carrier.go b/old/middlewares/tracing/carrier.go deleted file mode 100644 index 57f54865d..000000000 --- a/old/middlewares/tracing/carrier.go +++ /dev/null @@ -1,25 +0,0 @@ -package tracing - -import "net/http" - -// HTTPHeadersCarrier custom implementation to fix duplicated headers -// It has been fixed in https://github.com/opentracing/opentracing-go/pull/191 -type HTTPHeadersCarrier http.Header - -// Set conforms to the TextMapWriter interface. -func (c HTTPHeadersCarrier) Set(key, val string) { - h := http.Header(c) - h.Set(key, val) -} - -// ForeachKey conforms to the TextMapReader interface. -func (c HTTPHeadersCarrier) ForeachKey(handler func(key, val string) error) error { - for k, vals := range c { - for _, v := range vals { - if err := handler(k, v); err != nil { - return err - } - } - } - return nil -} diff --git a/old/middlewares/tracing/datadog/datadog.go b/old/middlewares/tracing/datadog/datadog.go deleted file mode 100644 index 601023691..000000000 --- a/old/middlewares/tracing/datadog/datadog.go +++ /dev/null @@ -1,50 +0,0 @@ -package datadog - -import ( - "io" - "strings" - - "github.com/containous/traefik/old/log" - "github.com/opentracing/opentracing-go" - ddtracer "gopkg.in/DataDog/dd-trace-go.v1/ddtrace/opentracer" - datadog "gopkg.in/DataDog/dd-trace-go.v1/ddtrace/tracer" -) - -// Name sets the name of this tracer -const Name = "datadog" - -// Config provides configuration settings for a datadog tracer -type Config struct { - LocalAgentHostPort string `description:"Set datadog-agent's host:port that the reporter will used. Defaults to localhost:8126" export:"false"` - GlobalTag string `description:"Key:Value tag to be set on all the spans." export:"true"` - Debug bool `description:"Enable DataDog debug." export:"true"` - PrioritySampling bool `description:"Enable priority sampling. When using distributed tracing, this option must be enabled in order to get all the parts of a distributed trace sampled."` -} - -// Setup sets up the tracer -func (c *Config) Setup(serviceName string) (opentracing.Tracer, io.Closer, error) { - tag := strings.SplitN(c.GlobalTag, ":", 2) - - value := "" - if len(tag) == 2 { - value = tag[1] - } - - opts := []datadog.StartOption{ - datadog.WithAgentAddr(c.LocalAgentHostPort), - datadog.WithServiceName(serviceName), - datadog.WithGlobalTag(tag[0], value), - datadog.WithDebugMode(c.Debug), - } - if c.PrioritySampling { - opts = append(opts, datadog.WithPrioritySampling()) - } - tracer := ddtracer.New(opts...) - - // Without this, child spans are getting the NOOP tracer - opentracing.SetGlobalTracer(tracer) - - log.Debug("DataDog tracer configured") - - return tracer, nil, nil -} diff --git a/old/middlewares/tracing/entrypoint.go b/old/middlewares/tracing/entrypoint.go deleted file mode 100644 index 40d090a3a..000000000 --- a/old/middlewares/tracing/entrypoint.go +++ /dev/null @@ -1,57 +0,0 @@ -package tracing - -import ( - "fmt" - "net/http" - - "github.com/containous/traefik/old/log" - "github.com/opentracing/opentracing-go" - "github.com/opentracing/opentracing-go/ext" - "github.com/urfave/negroni" -) - -type entryPointMiddleware struct { - entryPoint string - *Tracing -} - -// NewEntryPoint creates a new middleware that the incoming request -func (t *Tracing) NewEntryPoint(name string) negroni.Handler { - log.Debug("Added entrypoint tracing middleware") - return &entryPointMiddleware{Tracing: t, entryPoint: name} -} - -func (e *entryPointMiddleware) ServeHTTP(w http.ResponseWriter, r *http.Request, next http.HandlerFunc) { - opNameFunc := generateEntryPointSpanName - - ctx, _ := e.Extract(opentracing.HTTPHeaders, HTTPHeadersCarrier(r.Header)) - span := e.StartSpan(opNameFunc(r, e.entryPoint, e.SpanNameLimit), ext.RPCServerOption(ctx)) - ext.Component.Set(span, e.ServiceName) - LogRequest(span, r) - ext.SpanKindRPCServer.Set(span) - - r = r.WithContext(opentracing.ContextWithSpan(r.Context(), span)) - - recorder := newStatusCodeRecoder(w, 200) - next(recorder, r) - - LogResponseCode(span, recorder.Status()) - span.Finish() -} - -// generateEntryPointSpanName will return a Span name of an appropriate lenth based on the 'spanLimit' argument. If needed, it will be truncated, but will not be less than 24 characters. -func generateEntryPointSpanName(r *http.Request, entryPoint string, spanLimit int) string { - name := fmt.Sprintf("Entrypoint %s %s", entryPoint, r.Host) - - if spanLimit > 0 && len(name) > spanLimit { - if spanLimit < EntryPointMaxLengthNumber { - log.Warnf("SpanNameLimit is set to be less than required static number of characters, defaulting to %d + 3", EntryPointMaxLengthNumber) - spanLimit = EntryPointMaxLengthNumber + 3 - } - hash := computeHash(name) - limit := (spanLimit - EntryPointMaxLengthNumber) / 2 - name = fmt.Sprintf("Entrypoint %s %s %s", truncateString(entryPoint, limit), truncateString(r.Host, limit), hash) - } - - return name -} diff --git a/old/middlewares/tracing/entrypoint_test.go b/old/middlewares/tracing/entrypoint_test.go deleted file mode 100644 index 865bcfc09..000000000 --- a/old/middlewares/tracing/entrypoint_test.go +++ /dev/null @@ -1,70 +0,0 @@ -package tracing - -import ( - "net/http" - "net/http/httptest" - "testing" - - "github.com/opentracing/opentracing-go/ext" - "github.com/stretchr/testify/assert" -) - -func TestEntryPointMiddlewareServeHTTP(t *testing.T) { - expectedTags := map[string]interface{}{ - "span.kind": ext.SpanKindRPCServerEnum, - "http.method": "GET", - "component": "", - "http.url": "http://www.test.com", - "http.host": "www.test.com", - } - - testCases := []struct { - desc string - entryPoint string - tracing *Tracing - expectedTags map[string]interface{} - expectedName string - }{ - { - desc: "no truncation test", - entryPoint: "test", - tracing: &Tracing{ - SpanNameLimit: 0, - tracer: &MockTracer{Span: &MockSpan{Tags: make(map[string]interface{})}}, - }, - expectedTags: expectedTags, - expectedName: "Entrypoint test www.test.com", - }, { - desc: "basic test", - entryPoint: "test", - tracing: &Tracing{ - SpanNameLimit: 25, - tracer: &MockTracer{Span: &MockSpan{Tags: make(map[string]interface{})}}, - }, - expectedTags: expectedTags, - expectedName: "Entrypoint te... ww... 39b97e58", - }, - } - - for _, test := range testCases { - test := test - t.Run(test.desc, func(t *testing.T) { - t.Parallel() - - e := &entryPointMiddleware{ - entryPoint: test.entryPoint, - Tracing: test.tracing, - } - - next := func(http.ResponseWriter, *http.Request) { - span := test.tracing.tracer.(*MockTracer).Span - - actual := span.Tags - assert.Equal(t, test.expectedTags, actual) - assert.Equal(t, test.expectedName, span.OpName) - } - - e.ServeHTTP(httptest.NewRecorder(), httptest.NewRequest(http.MethodGet, "http://www.test.com", nil), next) - }) - } -} diff --git a/old/middlewares/tracing/forwarder.go b/old/middlewares/tracing/forwarder.go deleted file mode 100644 index d2ff48583..000000000 --- a/old/middlewares/tracing/forwarder.go +++ /dev/null @@ -1,63 +0,0 @@ -package tracing - -import ( - "fmt" - "net/http" - - "github.com/containous/traefik/old/log" - "github.com/opentracing/opentracing-go/ext" - "github.com/urfave/negroni" -) - -type forwarderMiddleware struct { - frontend string - backend string - opName string - *Tracing -} - -// NewForwarderMiddleware creates a new forwarder middleware that traces the outgoing request -func (t *Tracing) NewForwarderMiddleware(frontend, backend string) negroni.Handler { - log.Debugf("Added outgoing tracing middleware %s", frontend) - return &forwarderMiddleware{ - Tracing: t, - frontend: frontend, - backend: backend, - opName: generateForwardSpanName(frontend, backend, t.SpanNameLimit), - } -} - -func (f *forwarderMiddleware) ServeHTTP(w http.ResponseWriter, r *http.Request, next http.HandlerFunc) { - span, r, finish := StartSpan(r, f.opName, true) - defer finish() - span.SetTag("frontend.name", f.frontend) - span.SetTag("backend.name", f.backend) - ext.HTTPMethod.Set(span, r.Method) - ext.HTTPUrl.Set(span, fmt.Sprintf("%s%s", r.URL.String(), r.RequestURI)) - span.SetTag("http.host", r.Host) - - InjectRequestHeaders(r) - - recorder := newStatusCodeRecoder(w, 200) - - next(recorder, r) - - LogResponseCode(span, recorder.Status()) -} - -// generateForwardSpanName will return a Span name of an appropriate lenth based on the 'spanLimit' argument. If needed, it will be truncated, but will not be less than 21 characters -func generateForwardSpanName(frontend, backend string, spanLimit int) string { - name := fmt.Sprintf("forward %s/%s", frontend, backend) - - if spanLimit > 0 && len(name) > spanLimit { - if spanLimit < ForwardMaxLengthNumber { - log.Warnf("SpanNameLimit is set to be less than required static number of characters, defaulting to %d + 3", ForwardMaxLengthNumber) - spanLimit = ForwardMaxLengthNumber + 3 - } - hash := computeHash(name) - limit := (spanLimit - ForwardMaxLengthNumber) / 2 - name = fmt.Sprintf("forward %s/%s/%s", truncateString(frontend, limit), truncateString(backend, limit), hash) - } - - return name -} diff --git a/old/middlewares/tracing/forwarder_test.go b/old/middlewares/tracing/forwarder_test.go deleted file mode 100644 index 00c90c293..000000000 --- a/old/middlewares/tracing/forwarder_test.go +++ /dev/null @@ -1,93 +0,0 @@ -package tracing - -import ( - "testing" - - "github.com/stretchr/testify/assert" -) - -func TestTracingNewForwarderMiddleware(t *testing.T) { - testCases := []struct { - desc string - tracer *Tracing - frontend string - backend string - expected *forwarderMiddleware - }{ - { - desc: "Simple Forward Tracer without truncation and hashing", - tracer: &Tracing{ - SpanNameLimit: 101, - }, - frontend: "some-service.domain.tld", - backend: "some-service.domain.tld", - expected: &forwarderMiddleware{ - Tracing: &Tracing{ - SpanNameLimit: 101, - }, - frontend: "some-service.domain.tld", - backend: "some-service.domain.tld", - opName: "forward some-service.domain.tld/some-service.domain.tld", - }, - }, { - desc: "Simple Forward Tracer with truncation and hashing", - tracer: &Tracing{ - SpanNameLimit: 101, - }, - frontend: "some-service-100.slug.namespace.environment.domain.tld", - backend: "some-service-100.slug.namespace.environment.domain.tld", - expected: &forwarderMiddleware{ - Tracing: &Tracing{ - SpanNameLimit: 101, - }, - frontend: "some-service-100.slug.namespace.environment.domain.tld", - backend: "some-service-100.slug.namespace.environment.domain.tld", - opName: "forward some-service-100.slug.namespace.enviro.../some-service-100.slug.namespace.enviro.../bc4a0d48", - }, - }, - { - desc: "Exactly 101 chars", - tracer: &Tracing{ - SpanNameLimit: 101, - }, - frontend: "some-service1.namespace.environment.domain.tld", - backend: "some-service1.namespace.environment.domain.tld", - expected: &forwarderMiddleware{ - Tracing: &Tracing{ - SpanNameLimit: 101, - }, - frontend: "some-service1.namespace.environment.domain.tld", - backend: "some-service1.namespace.environment.domain.tld", - opName: "forward some-service1.namespace.environment.domain.tld/some-service1.namespace.environment.domain.tld", - }, - }, - { - desc: "More than 101 chars", - tracer: &Tracing{ - SpanNameLimit: 101, - }, - frontend: "some-service1.frontend.namespace.environment.domain.tld", - backend: "some-service1.backend.namespace.environment.domain.tld", - expected: &forwarderMiddleware{ - Tracing: &Tracing{ - SpanNameLimit: 101, - }, - frontend: "some-service1.frontend.namespace.environment.domain.tld", - backend: "some-service1.backend.namespace.environment.domain.tld", - opName: "forward some-service1.frontend.namespace.envir.../some-service1.backend.namespace.enviro.../fa49dd23", - }, - }, - } - - for _, test := range testCases { - test := test - t.Run(test.desc, func(t *testing.T) { - t.Parallel() - - actual := test.tracer.NewForwarderMiddleware(test.frontend, test.backend) - - assert.Equal(t, test.expected, actual) - assert.True(t, len(test.expected.opName) <= test.tracer.SpanNameLimit) - }) - } -} diff --git a/old/middlewares/tracing/jaeger/jaeger.go b/old/middlewares/tracing/jaeger/jaeger.go deleted file mode 100644 index 77ee85117..000000000 --- a/old/middlewares/tracing/jaeger/jaeger.go +++ /dev/null @@ -1,73 +0,0 @@ -package jaeger - -import ( - "fmt" - "io" - - "github.com/containous/traefik/old/log" - "github.com/opentracing/opentracing-go" - jaegercfg "github.com/uber/jaeger-client-go/config" - "github.com/uber/jaeger-client-go/zipkin" - jaegermet "github.com/uber/jaeger-lib/metrics" -) - -// Name sets the name of this tracer -const Name = "jaeger" - -// Config provides configuration settings for a jaeger tracer -type Config struct { - SamplingServerURL string `description:"set the sampling server url." export:"false"` - SamplingType string `description:"set the sampling type." export:"true"` - SamplingParam float64 `description:"set the sampling parameter." export:"true"` - LocalAgentHostPort string `description:"set jaeger-agent's host:port that the reporter will used." export:"false"` - Gen128Bit bool `description:"generate 128 bit span IDs." export:"true"` - Propagation string `description:"which propgation format to use (jaeger/b3)." export:"true"` -} - -// Setup sets up the tracer -func (c *Config) Setup(componentName string) (opentracing.Tracer, io.Closer, error) { - jcfg := jaegercfg.Configuration{ - Sampler: &jaegercfg.SamplerConfig{ - SamplingServerURL: c.SamplingServerURL, - Type: c.SamplingType, - Param: c.SamplingParam, - }, - Reporter: &jaegercfg.ReporterConfig{ - LogSpans: true, - LocalAgentHostPort: c.LocalAgentHostPort, - }, - } - - jMetricsFactory := jaegermet.NullFactory - - opts := []jaegercfg.Option{ - jaegercfg.Logger(&jaegerLogger{}), - jaegercfg.Metrics(jMetricsFactory), - jaegercfg.Gen128Bit(c.Gen128Bit), - } - - switch c.Propagation { - case "b3": - p := zipkin.NewZipkinB3HTTPHeaderPropagator() - opts = append(opts, - jaegercfg.Injector(opentracing.HTTPHeaders, p), - jaegercfg.Extractor(opentracing.HTTPHeaders, p), - ) - case "jaeger", "": - default: - return nil, nil, fmt.Errorf("unknown propagation format: %s", c.Propagation) - } - - // Initialize tracer with a logger and a metrics factory - closer, err := jcfg.InitGlobalTracer( - componentName, - opts..., - ) - if err != nil { - log.Warnf("Could not initialize jaeger tracer: %s", err.Error()) - return nil, nil, err - } - log.Debug("Jaeger tracer configured") - - return opentracing.GlobalTracer(), closer, nil -} diff --git a/old/middlewares/tracing/jaeger/logger.go b/old/middlewares/tracing/jaeger/logger.go deleted file mode 100644 index 847c1e669..000000000 --- a/old/middlewares/tracing/jaeger/logger.go +++ /dev/null @@ -1,15 +0,0 @@ -package jaeger - -import "github.com/containous/traefik/old/log" - -// jaegerLogger is an implementation of the Logger interface that delegates to traefik log -type jaegerLogger struct{} - -func (l *jaegerLogger) Error(msg string) { - log.Errorf("Tracing jaeger error: %s", msg) -} - -// Infof logs a message at debug priority -func (l *jaegerLogger) Infof(msg string, args ...interface{}) { - log.Debugf(msg, args...) -} diff --git a/old/middlewares/tracing/status_code.go b/old/middlewares/tracing/status_code.go deleted file mode 100644 index ec1802467..000000000 --- a/old/middlewares/tracing/status_code.go +++ /dev/null @@ -1,57 +0,0 @@ -package tracing - -import ( - "bufio" - "net" - "net/http" -) - -type statusCodeRecoder interface { - http.ResponseWriter - Status() int -} - -type statusCodeWithoutCloseNotify struct { - http.ResponseWriter - status int -} - -// WriteHeader captures the status code for later retrieval. -func (s *statusCodeWithoutCloseNotify) WriteHeader(status int) { - s.status = status - s.ResponseWriter.WriteHeader(status) -} - -// Status get response status -func (s *statusCodeWithoutCloseNotify) Status() int { - return s.status -} - -// Hijack hijacks the connection -func (s *statusCodeWithoutCloseNotify) Hijack() (net.Conn, *bufio.ReadWriter, error) { - return s.ResponseWriter.(http.Hijacker).Hijack() -} - -// Flush sends any buffered data to the client. -func (s *statusCodeWithoutCloseNotify) Flush() { - if flusher, ok := s.ResponseWriter.(http.Flusher); ok { - flusher.Flush() - } -} - -type statusCodeWithCloseNotify struct { - *statusCodeWithoutCloseNotify -} - -func (s *statusCodeWithCloseNotify) CloseNotify() <-chan bool { - return s.ResponseWriter.(http.CloseNotifier).CloseNotify() -} - -// newStatusCodeRecoder returns an initialized statusCodeRecoder. -func newStatusCodeRecoder(rw http.ResponseWriter, status int) statusCodeRecoder { - recorder := &statusCodeWithoutCloseNotify{rw, status} - if _, ok := rw.(http.CloseNotifier); ok { - return &statusCodeWithCloseNotify{recorder} - } - return recorder -} diff --git a/old/middlewares/tracing/tracing.go b/old/middlewares/tracing/tracing.go deleted file mode 100644 index 8f3380ec9..000000000 --- a/old/middlewares/tracing/tracing.go +++ /dev/null @@ -1,197 +0,0 @@ -package tracing - -import ( - "crypto/sha256" - "fmt" - "io" - "net/http" - - "github.com/containous/traefik/old/log" - "github.com/containous/traefik/old/middlewares/tracing/datadog" - "github.com/containous/traefik/old/middlewares/tracing/jaeger" - "github.com/containous/traefik/old/middlewares/tracing/zipkin" - "github.com/opentracing/opentracing-go" - "github.com/opentracing/opentracing-go/ext" -) - -// ForwardMaxLengthNumber defines the number of static characters in the Forwarding Span Trace name : 8 chars for 'forward ' + 8 chars for hash + 2 chars for '_'. -const ForwardMaxLengthNumber = 18 - -// EntryPointMaxLengthNumber defines the number of static characters in the Entrypoint Span Trace name : 11 chars for 'Entrypoint ' + 8 chars for hash + 2 chars for '_'. -const EntryPointMaxLengthNumber = 21 - -// TraceNameHashLength defines the number of characters to use from the head of the generated hash. -const TraceNameHashLength = 8 - -// Tracing middleware -type Tracing struct { - Backend string `description:"Selects the tracking backend ('jaeger','zipkin', 'datadog')." export:"true"` - ServiceName string `description:"Set the name for this service" export:"true"` - SpanNameLimit int `description:"Set the maximum character limit for Span names (default 0 = no limit)" export:"true"` - Jaeger *jaeger.Config `description:"Settings for jaeger"` - Zipkin *zipkin.Config `description:"Settings for zipkin"` - DataDog *datadog.Config `description:"Settings for DataDog"` - - tracer opentracing.Tracer - closer io.Closer -} - -// StartSpan delegates to opentracing.Tracer -func (t *Tracing) StartSpan(operationName string, opts ...opentracing.StartSpanOption) opentracing.Span { - return t.tracer.StartSpan(operationName, opts...) -} - -// Inject delegates to opentracing.Tracer -func (t *Tracing) Inject(sm opentracing.SpanContext, format interface{}, carrier interface{}) error { - return t.tracer.Inject(sm, format, carrier) -} - -// Extract delegates to opentracing.Tracer -func (t *Tracing) Extract(format interface{}, carrier interface{}) (opentracing.SpanContext, error) { - return t.tracer.Extract(format, carrier) -} - -// Backend describes things we can use to setup tracing -type Backend interface { - Setup(serviceName string) (opentracing.Tracer, io.Closer, error) -} - -// Setup Tracing middleware -func (t *Tracing) Setup() { - var err error - - switch t.Backend { - case jaeger.Name: - t.tracer, t.closer, err = t.Jaeger.Setup(t.ServiceName) - case zipkin.Name: - t.tracer, t.closer, err = t.Zipkin.Setup(t.ServiceName) - case datadog.Name: - t.tracer, t.closer, err = t.DataDog.Setup(t.ServiceName) - default: - log.Warnf("Unknown tracer %q", t.Backend) - return - } - - if err != nil { - log.Warnf("Could not initialize %s tracing: %v", t.Backend, err) - } -} - -// IsEnabled determines if tracing was successfully activated -func (t *Tracing) IsEnabled() bool { - if t == nil || t.tracer == nil { - return false - } - return true -} - -// Close tracer -func (t *Tracing) Close() { - if t.closer != nil { - err := t.closer.Close() - if err != nil { - log.Warn(err) - } - } -} - -// LogRequest used to create span tags from the request -func LogRequest(span opentracing.Span, r *http.Request) { - if span != nil && r != nil { - ext.HTTPMethod.Set(span, r.Method) - ext.HTTPUrl.Set(span, r.URL.String()) - span.SetTag("http.host", r.Host) - } -} - -// LogResponseCode used to log response code in span -func LogResponseCode(span opentracing.Span, code int) { - if span != nil { - ext.HTTPStatusCode.Set(span, uint16(code)) - if code >= 400 { - ext.Error.Set(span, true) - } - } -} - -// GetSpan used to retrieve span from request context -func GetSpan(r *http.Request) opentracing.Span { - return opentracing.SpanFromContext(r.Context()) -} - -// InjectRequestHeaders used to inject OpenTracing headers into the request -func InjectRequestHeaders(r *http.Request) { - if span := GetSpan(r); span != nil { - err := opentracing.GlobalTracer().Inject( - span.Context(), - opentracing.HTTPHeaders, - HTTPHeadersCarrier(r.Header)) - if err != nil { - log.Error(err) - } - } -} - -// LogEventf logs an event to the span in the request context. -func LogEventf(r *http.Request, format string, args ...interface{}) { - if span := GetSpan(r); span != nil { - span.LogKV("event", fmt.Sprintf(format, args...)) - } -} - -// StartSpan starts a new span from the one in the request context -func StartSpan(r *http.Request, operationName string, spanKinClient bool, opts ...opentracing.StartSpanOption) (opentracing.Span, *http.Request, func()) { - span, ctx := opentracing.StartSpanFromContext(r.Context(), operationName, opts...) - if spanKinClient { - ext.SpanKindRPCClient.Set(span) - } - r = r.WithContext(ctx) - return span, r, func() { - span.Finish() - } -} - -// SetError flags the span associated with this request as in error -func SetError(r *http.Request) { - if span := GetSpan(r); span != nil { - ext.Error.Set(span, true) - } -} - -// SetErrorAndDebugLog flags the span associated with this request as in error and create a debug log. -func SetErrorAndDebugLog(r *http.Request, format string, args ...interface{}) { - SetError(r) - log.Debugf(format, args...) - LogEventf(r, format, args...) -} - -// SetErrorAndWarnLog flags the span associated with this request as in error and create a debug log. -func SetErrorAndWarnLog(r *http.Request, format string, args ...interface{}) { - SetError(r) - log.Warnf(format, args...) - LogEventf(r, format, args...) -} - -// truncateString reduces the length of the 'str' argument to 'num' - 3 and adds a '...' suffix to the tail. -func truncateString(str string, num int) string { - text := str - if len(str) > num { - if num > 3 { - num -= 3 - } - text = str[0:num] + "..." - } - return text -} - -// computeHash returns the first TraceNameHashLength character of the sha256 hash for 'name' argument. -func computeHash(name string) string { - data := []byte(name) - hash := sha256.New() - if _, err := hash.Write(data); err != nil { - // Impossible case - log.Errorf("Fail to create Span name hash for %s: %v", name, err) - } - - return fmt.Sprintf("%x", hash.Sum(nil))[:TraceNameHashLength] -} diff --git a/old/middlewares/tracing/tracing_test.go b/old/middlewares/tracing/tracing_test.go deleted file mode 100644 index d4a631312..000000000 --- a/old/middlewares/tracing/tracing_test.go +++ /dev/null @@ -1,133 +0,0 @@ -package tracing - -import ( - "testing" - - "github.com/opentracing/opentracing-go" - "github.com/opentracing/opentracing-go/log" - "github.com/stretchr/testify/assert" -) - -type MockTracer struct { - Span *MockSpan -} - -type MockSpan struct { - OpName string - Tags map[string]interface{} -} - -type MockSpanContext struct { -} - -// MockSpanContext: -func (n MockSpanContext) ForeachBaggageItem(handler func(k, v string) bool) {} - -// MockSpan: -func (n MockSpan) Context() opentracing.SpanContext { return MockSpanContext{} } -func (n MockSpan) SetBaggageItem(key, val string) opentracing.Span { - return MockSpan{Tags: make(map[string]interface{})} -} -func (n MockSpan) BaggageItem(key string) string { return "" } -func (n MockSpan) SetTag(key string, value interface{}) opentracing.Span { - n.Tags[key] = value - return n -} -func (n MockSpan) LogFields(fields ...log.Field) {} -func (n MockSpan) LogKV(keyVals ...interface{}) {} -func (n MockSpan) Finish() {} -func (n MockSpan) FinishWithOptions(opts opentracing.FinishOptions) {} -func (n MockSpan) SetOperationName(operationName string) opentracing.Span { return n } -func (n MockSpan) Tracer() opentracing.Tracer { return MockTracer{} } -func (n MockSpan) LogEvent(event string) {} -func (n MockSpan) LogEventWithPayload(event string, payload interface{}) {} -func (n MockSpan) Log(data opentracing.LogData) {} -func (n MockSpan) Reset() { - n.Tags = make(map[string]interface{}) -} - -// StartSpan belongs to the Tracer interface. -func (n MockTracer) StartSpan(operationName string, opts ...opentracing.StartSpanOption) opentracing.Span { - n.Span.OpName = operationName - return n.Span -} - -// Inject belongs to the Tracer interface. -func (n MockTracer) Inject(sp opentracing.SpanContext, format interface{}, carrier interface{}) error { - return nil -} - -// Extract belongs to the Tracer interface. -func (n MockTracer) Extract(format interface{}, carrier interface{}) (opentracing.SpanContext, error) { - return nil, opentracing.ErrSpanContextNotFound -} - -func TestTruncateString(t *testing.T) { - testCases := []struct { - desc string - text string - limit int - expected string - }{ - { - desc: "short text less than limit 10", - text: "short", - limit: 10, - expected: "short", - }, - { - desc: "basic truncate with limit 10", - text: "some very long pice of text", - limit: 10, - expected: "some ve...", - }, - { - desc: "truncate long FQDN to 39 chars", - text: "some-service-100.slug.namespace.environment.domain.tld", - limit: 39, - expected: "some-service-100.slug.namespace.envi...", - }, - } - - for _, test := range testCases { - test := test - t.Run(test.desc, func(t *testing.T) { - t.Parallel() - - actual := truncateString(test.text, test.limit) - - assert.Equal(t, test.expected, actual) - assert.True(t, len(actual) <= test.limit) - }) - } -} - -func TestComputeHash(t *testing.T) { - testCases := []struct { - desc string - text string - expected string - }{ - { - desc: "hashing", - text: "some very long pice of text", - expected: "0258ea1c", - }, - { - desc: "short text less than limit 10", - text: "short", - expected: "f9b0078b", - }, - } - - for _, test := range testCases { - test := test - t.Run(test.desc, func(t *testing.T) { - t.Parallel() - - actual := computeHash(test.text) - - assert.Equal(t, test.expected, actual) - }) - } -} diff --git a/old/middlewares/tracing/wrapper.go b/old/middlewares/tracing/wrapper.go deleted file mode 100644 index 8e9c566c1..000000000 --- a/old/middlewares/tracing/wrapper.go +++ /dev/null @@ -1,66 +0,0 @@ -package tracing - -import ( - "net/http" - - "github.com/urfave/negroni" -) - -// NewNegroniHandlerWrapper return a negroni.Handler struct -func (t *Tracing) NewNegroniHandlerWrapper(name string, handler negroni.Handler, clientSpanKind bool) negroni.Handler { - if t.IsEnabled() && handler != nil { - return &NegroniHandlerWrapper{ - name: name, - next: handler, - clientSpanKind: clientSpanKind, - } - } - return handler -} - -// NewHTTPHandlerWrapper return a http.Handler struct -func (t *Tracing) NewHTTPHandlerWrapper(name string, handler http.Handler, clientSpanKind bool) http.Handler { - if t.IsEnabled() && handler != nil { - return &HTTPHandlerWrapper{ - name: name, - handler: handler, - clientSpanKind: clientSpanKind, - } - } - return handler -} - -// NegroniHandlerWrapper is used to wrap negroni handler middleware -type NegroniHandlerWrapper struct { - name string - next negroni.Handler - clientSpanKind bool -} - -func (t *NegroniHandlerWrapper) ServeHTTP(rw http.ResponseWriter, r *http.Request, next http.HandlerFunc) { - var finish func() - _, r, finish = StartSpan(r, t.name, t.clientSpanKind) - defer finish() - - if t.next != nil { - t.next.ServeHTTP(rw, r, next) - } -} - -// HTTPHandlerWrapper is used to wrap http handler middleware -type HTTPHandlerWrapper struct { - name string - handler http.Handler - clientSpanKind bool -} - -func (t *HTTPHandlerWrapper) ServeHTTP(rw http.ResponseWriter, r *http.Request) { - var finish func() - _, r, finish = StartSpan(r, t.name, t.clientSpanKind) - defer finish() - - if t.handler != nil { - t.handler.ServeHTTP(rw, r) - } - -} diff --git a/old/middlewares/tracing/zipkin/zipkin.go b/old/middlewares/tracing/zipkin/zipkin.go deleted file mode 100644 index 79d20b2db..000000000 --- a/old/middlewares/tracing/zipkin/zipkin.go +++ /dev/null @@ -1,49 +0,0 @@ -package zipkin - -import ( - "io" - "time" - - "github.com/containous/traefik/old/log" - "github.com/opentracing/opentracing-go" - zipkin "github.com/openzipkin/zipkin-go-opentracing" -) - -// Name sets the name of this tracer -const Name = "zipkin" - -// Config provides configuration settings for a zipkin tracer -type Config struct { - HTTPEndpoint string `description:"HTTP Endpoint to report traces to." export:"false"` - SameSpan bool `description:"Use Zipkin SameSpan RPC style traces." export:"true"` - ID128Bit bool `description:"Use Zipkin 128 bit root span IDs." export:"true"` - Debug bool `description:"Enable Zipkin debug." export:"true"` - SampleRate float64 `description:"The rate between 0.0 and 1.0 of requests to trace." export:"true"` -} - -// Setup sets up the tracer -func (c *Config) Setup(serviceName string) (opentracing.Tracer, io.Closer, error) { - collector, err := zipkin.NewHTTPCollector(c.HTTPEndpoint) - if err != nil { - return nil, nil, err - } - recorder := zipkin.NewRecorder(collector, c.Debug, "0.0.0.0:0", serviceName) - tracer, err := zipkin.NewTracer( - recorder, - zipkin.ClientServerSameSpan(c.SameSpan), - zipkin.TraceID128Bit(c.ID128Bit), - zipkin.DebugMode(c.Debug), - zipkin.WithSampler(zipkin.NewBoundarySampler(c.SampleRate, time.Now().Unix())), - ) - - if err != nil { - return nil, nil, err - } - - // Without this, child spans are getting the NOOP tracer - opentracing.SetGlobalTracer(tracer) - - log.Debug("Zipkin tracer configured") - - return tracer, collector, nil -} diff --git a/old/ping/ping.go b/old/ping/ping.go deleted file mode 100644 index 1e7ffa860..000000000 --- a/old/ping/ping.go +++ /dev/null @@ -1,36 +0,0 @@ -package ping - -import ( - "context" - "fmt" - "net/http" - - "github.com/containous/mux" -) - -// Handler expose ping routes -type Handler struct { - EntryPoint string `description:"Ping entryPoint" export:"true"` - terminating bool -} - -// WithContext causes the ping endpoint to serve non 200 responses. -func (h *Handler) WithContext(ctx context.Context) { - go func() { - <-ctx.Done() - h.terminating = true - }() -} - -// AddRoutes add ping routes on a router -func (h *Handler) AddRoutes(router *mux.Router) { - router.Methods(http.MethodGet, http.MethodHead).Path("/ping"). - HandlerFunc(func(response http.ResponseWriter, request *http.Request) { - statusCode := http.StatusOK - if h.terminating { - statusCode = http.StatusServiceUnavailable - } - response.WriteHeader(statusCode) - fmt.Fprint(response, http.StatusText(statusCode)) - }) -} diff --git a/old/provider/boltdb/boltdb.go b/old/provider/boltdb/boltdb.go deleted file mode 100644 index 503e73be6..000000000 --- a/old/provider/boltdb/boltdb.go +++ /dev/null @@ -1,48 +0,0 @@ -package boltdb - -import ( - "fmt" - - "github.com/abronan/valkeyrie/store" - "github.com/abronan/valkeyrie/store/boltdb" - "github.com/containous/traefik/old/provider" - "github.com/containous/traefik/old/provider/kv" - "github.com/containous/traefik/old/types" - "github.com/containous/traefik/pkg/safe" -) - -var _ provider.Provider = (*Provider)(nil) - -// Provider holds configurations of the provider. -type Provider struct { - kv.Provider `mapstructure:",squash" export:"true"` -} - -// Init the provider -func (p *Provider) Init(constraints types.Constraints) error { - err := p.Provider.Init(constraints) - if err != nil { - return err - } - - store, err := p.CreateStore() - if err != nil { - return fmt.Errorf("failed to Connect to KV store: %v", err) - } - - p.SetKVClient(store) - return nil -} - -// Provide allows the boltdb provider to Provide configurations to traefik -// using the given configuration channel. -func (p *Provider) Provide(configurationChan chan<- types.ConfigMessage, pool *safe.Pool) error { - return p.Provider.Provide(configurationChan, pool) -} - -// CreateStore creates the KV store -func (p *Provider) CreateStore() (store.Store, error) { - p.SetStoreType(store.BOLTDB) - boltdb.Register() - return p.Provider.CreateStore() -} diff --git a/old/provider/consul/consul.go b/old/provider/consul/consul.go deleted file mode 100644 index 5fac431a4..000000000 --- a/old/provider/consul/consul.go +++ /dev/null @@ -1,48 +0,0 @@ -package consul - -import ( - "fmt" - - "github.com/abronan/valkeyrie/store" - "github.com/abronan/valkeyrie/store/consul" - "github.com/containous/traefik/old/provider" - "github.com/containous/traefik/old/provider/kv" - "github.com/containous/traefik/old/types" - "github.com/containous/traefik/pkg/safe" -) - -var _ provider.Provider = (*Provider)(nil) - -// Provider holds configurations of the p. -type Provider struct { - kv.Provider `mapstructure:",squash" export:"true"` -} - -// Init the provider -func (p *Provider) Init(constraints types.Constraints) error { - err := p.Provider.Init(constraints) - if err != nil { - return err - } - - store, err := p.CreateStore() - if err != nil { - return fmt.Errorf("failed to Connect to KV store: %v", err) - } - - p.SetKVClient(store) - return nil -} - -// Provide allows the consul provider to provide configurations to traefik -// using the given configuration channel. -func (p *Provider) Provide(configurationChan chan<- types.ConfigMessage, pool *safe.Pool) error { - return p.Provider.Provide(configurationChan, pool) -} - -// CreateStore creates the KV store -func (p *Provider) CreateStore() (store.Store, error) { - p.SetStoreType(store.CONSUL) - consul.Register() - return p.Provider.CreateStore() -} diff --git a/old/provider/consulcatalog/config.go b/old/provider/consulcatalog/config.go deleted file mode 100644 index e6eefac96..000000000 --- a/old/provider/consulcatalog/config.go +++ /dev/null @@ -1,227 +0,0 @@ -package consulcatalog - -import ( - "bytes" - "crypto/sha1" - "encoding/base64" - "fmt" - "net" - "sort" - "strconv" - "strings" - "text/template" - - "github.com/containous/traefik/old/log" - "github.com/containous/traefik/old/provider" - "github.com/containous/traefik/old/provider/label" - "github.com/containous/traefik/old/types" - "github.com/hashicorp/consul/api" -) - -func (p *Provider) buildConfiguration(catalog []catalogUpdate) *types.Configuration { - var funcMap = template.FuncMap{ - "getAttribute": p.getAttribute, - "getTag": getTag, - "hasTag": hasTag, - - // Backend functions - "getNodeBackendName": getNodeBackendName, - "getServiceBackendName": getServiceBackendName, - "getBackendAddress": getBackendAddress, - "getServerName": getServerName, - "getCircuitBreaker": label.GetCircuitBreaker, - "getLoadBalancer": label.GetLoadBalancer, - "getMaxConn": label.GetMaxConn, - "getHealthCheck": label.GetHealthCheck, - "getBuffering": label.GetBuffering, - "getResponseForwarding": label.GetResponseForwarding, - "getServer": p.getServer, - - // Frontend functions - "getFrontendRule": p.getFrontendRule, - "getBasicAuth": label.GetFuncSliceString(label.TraefikFrontendAuthBasic), // Deprecated - "getAuth": label.GetAuth, - "getFrontEndEntryPoints": label.GetFuncSliceString(label.TraefikFrontendEntryPoints), - "getPriority": label.GetFuncInt(label.TraefikFrontendPriority, label.DefaultFrontendPriority), - "getPassHostHeader": label.GetFuncBool(label.TraefikFrontendPassHostHeader, label.DefaultPassHostHeader), - "getPassTLSCert": label.GetFuncBool(label.TraefikFrontendPassTLSCert, label.DefaultPassTLSCert), - "getPassTLSClientCert": label.GetTLSClientCert, - "getWhiteList": label.GetWhiteList, - "getRedirect": label.GetRedirect, - "getErrorPages": label.GetErrorPages, - "getRateLimit": label.GetRateLimit, - "getHeaders": label.GetHeaders, - } - - var allNodes []*api.ServiceEntry - var services []*serviceUpdate - for _, info := range catalog { - if len(info.Nodes) > 0 { - services = append(services, p.generateFrontends(info.Service)...) - allNodes = append(allNodes, info.Nodes...) - } - } - // Ensure a stable ordering of nodes so that identical configurations may be detected - sort.Sort(nodeSorter(allNodes)) - - templateObjects := struct { - Services []*serviceUpdate - Nodes []*api.ServiceEntry - }{ - Services: services, - Nodes: allNodes, - } - - configuration, err := p.GetConfiguration("templates/consul_catalog.tmpl", funcMap, templateObjects) - if err != nil { - log.WithError(err).Error("Failed to create config") - } - - return configuration -} - -// Specific functions - -func (p *Provider) getFrontendRule(service serviceUpdate) string { - customFrontendRule := label.GetStringValue(service.TraefikLabels, label.TraefikFrontendRule, "") - if customFrontendRule == "" { - customFrontendRule = p.FrontEndRule - } - - tmpl := p.frontEndRuleTemplate - tmpl, err := tmpl.Parse(customFrontendRule) - if err != nil { - log.Errorf("Failed to parse Consul Catalog custom frontend rule: %v", err) - return "" - } - - templateObjects := struct { - ServiceName string - Domain string - Attributes []string - }{ - ServiceName: service.ServiceName, - Domain: p.Domain, - Attributes: service.Attributes, - } - - var buffer bytes.Buffer - err = tmpl.Execute(&buffer, templateObjects) - if err != nil { - log.Errorf("Failed to execute Consul Catalog custom frontend rule template: %v", err) - return "" - } - - return strings.TrimSuffix(buffer.String(), ".") -} - -func (p *Provider) getServer(node *api.ServiceEntry) types.Server { - scheme := p.getAttribute(label.SuffixProtocol, node.Service.Tags, label.DefaultProtocol) - address := getBackendAddress(node) - - return types.Server{ - URL: fmt.Sprintf("%s://%s", scheme, net.JoinHostPort(address, strconv.Itoa(node.Service.Port))), - Weight: p.getWeight(node.Service.Tags), - } -} - -func (p *Provider) setupFrontEndRuleTemplate() { - var FuncMap = template.FuncMap{ - "getAttribute": p.getAttribute, - "getTag": getTag, - "hasTag": hasTag, - } - p.frontEndRuleTemplate = template.New("consul catalog frontend rule").Funcs(FuncMap) -} - -// Specific functions - -func getServiceBackendName(service *serviceUpdate) string { - if service.ParentServiceName != "" { - return strings.ToLower(service.ParentServiceName) - } - return strings.ToLower(service.ServiceName) -} - -func getNodeBackendName(node *api.ServiceEntry) string { - return strings.ToLower(node.Service.Service) -} - -func getBackendAddress(node *api.ServiceEntry) string { - if node.Service.Address != "" { - return node.Service.Address - } - return node.Node.Address -} - -func getServerName(node *api.ServiceEntry, index int) string { - serviceName := node.Service.Service + node.Service.Address + strconv.Itoa(node.Service.Port) - // TODO sort tags ? - serviceName += strings.Join(node.Service.Tags, "") - - hash := sha1.New() - _, err := hash.Write([]byte(serviceName)) - if err != nil { - // Impossible case - log.Error(err) - } else { - serviceName = base64.URLEncoding.EncodeToString(hash.Sum(nil)) - } - - // unique int at the end - return provider.Normalize(node.Service.Service + "-" + strconv.Itoa(index) + "-" + serviceName) -} - -func (p *Provider) getWeight(tags []string) int { - labels := tagsToNeutralLabels(tags, p.Prefix) - return label.GetIntValue(labels, p.getPrefixedName(label.SuffixWeight), label.DefaultWeight) -} - -// Base functions - -func (p *Provider) getAttribute(name string, tags []string, defaultValue string) string { - return getTag(p.getPrefixedName(name), tags, defaultValue) -} - -func (p *Provider) getPrefixedName(name string) string { - if len(p.Prefix) > 0 && len(name) > 0 { - return p.Prefix + "." + name - } - return name -} - -func hasTag(name string, tags []string) bool { - lowerName := strings.ToLower(name) - - for _, tag := range tags { - lowerTag := strings.ToLower(tag) - - // Given the nature of Consul tags, which could be either singular markers, or key=value pairs - if strings.HasPrefix(lowerTag, lowerName+"=") || lowerTag == lowerName { - return true - } - } - return false -} - -func getTag(name string, tags []string, defaultValue string) string { - lowerName := strings.ToLower(name) - - for _, tag := range tags { - lowerTag := strings.ToLower(tag) - - // Given the nature of Consul tags, which could be either singular markers, or key=value pairs - if strings.HasPrefix(lowerTag, lowerName+"=") || lowerTag == lowerName { - // In case, where a tag might be a key=value, try to split it by the first '=' - kv := strings.SplitN(tag, "=", 2) - - // If the returned result is a key=value pair, return the 'value' component - if len(kv) == 2 { - return kv[1] - } - // If the returned result is a singular marker, return the 'key' component - return kv[0] - } - } - return defaultValue -} diff --git a/old/provider/consulcatalog/config_test.go b/old/provider/consulcatalog/config_test.go deleted file mode 100644 index 94616d60e..000000000 --- a/old/provider/consulcatalog/config_test.go +++ /dev/null @@ -1,1358 +0,0 @@ -// +build ignore - -package consulcatalog - -import ( - "testing" - "text/template" - "time" - - "github.com/containous/flaeg/parse" - "github.com/containous/traefik/old/provider/label" - "github.com/containous/traefik/old/types" - "github.com/hashicorp/consul/api" - "github.com/stretchr/testify/assert" -) - -func TestProviderBuildConfiguration(t *testing.T) { - p := &Provider{ - Domain: "localhost", - Prefix: "traefik", - ExposedByDefault: false, - FrontEndRule: "Host:{{.ServiceName}}.{{.Domain}}", - frontEndRuleTemplate: template.New("consul catalog frontend rule"), - } - - testCases := []struct { - desc string - nodes []catalogUpdate - expectedFrontends map[string]*types.Frontend - expectedBackends map[string]*types.Backend - }{ - { - desc: "Should build config of nothing", - nodes: []catalogUpdate{}, - expectedFrontends: map[string]*types.Frontend{}, - expectedBackends: map[string]*types.Backend{}, - }, - { - desc: "Should build config with no frontend and backend", - nodes: []catalogUpdate{ - { - Service: &serviceUpdate{ - ServiceName: "test", - }, - }, - }, - expectedFrontends: map[string]*types.Frontend{}, - expectedBackends: map[string]*types.Backend{}, - }, - { - desc: "Should build config who contains one frontend and one backend", - nodes: []catalogUpdate{ - { - Service: &serviceUpdate{ - ServiceName: "test", - Attributes: []string{ - "random.foo=bar", - label.TraefikBackendLoadBalancerMethod + "=drr", - label.TraefikBackendCircuitBreakerExpression + "=NetworkErrorRatio() > 0.5", - label.TraefikBackendMaxConnAmount + "=1000", - label.TraefikBackendMaxConnExtractorFunc + "=client.ip", - label.TraefikFrontendAuthBasicUsers + "=test:$apr1$H6uskkkW$IgXLP6ewTrSuBkTrqE8wj/,test2:$apr1$d9hr9HBB$4HxwgUir3HP4EsggP/QNo0", - }, - }, - Nodes: []*api.ServiceEntry{ - { - Service: &api.AgentService{ - Service: "test", - Address: "127.0.0.1", - Port: 80, - Tags: []string{ - "random.foo=bar", - label.TraefikWeight + "=42", - label.TraefikFrontendPassHostHeader + "=true", - label.TraefikProtocol + "=https", - }, - }, - Node: &api.Node{ - Node: "localhost", - Address: "127.0.0.1", - }, - }, - }, - }, - }, - expectedFrontends: map[string]*types.Frontend{ - "frontend-test": { - Backend: "backend-test", - PassHostHeader: true, - Routes: map[string]types.Route{ - "route-host-test": { - Rule: "Host:test.localhost", - }, - }, - Auth: &types.Auth{ - Basic: &types.Basic{ - Users: []string{"test:$apr1$H6uskkkW$IgXLP6ewTrSuBkTrqE8wj/", - "test2:$apr1$d9hr9HBB$4HxwgUir3HP4EsggP/QNo0"}, - }, - }, - EntryPoints: []string{}, - }, - }, - expectedBackends: map[string]*types.Backend{ - "backend-test": { - Servers: map[string]types.Server{ - "test-0-ecTTsmX1vPktQQrl53WhNDy-HEg": { - URL: "https://127.0.0.1:80", - Weight: 42, - }, - }, - LoadBalancer: &types.LoadBalancer{ - Method: "drr", - }, - CircuitBreaker: &types.CircuitBreaker{ - Expression: "NetworkErrorRatio() > 0.5", - }, - MaxConn: &types.MaxConn{ - Amount: 1000, - ExtractorFunc: "client.ip", - }, - }, - }, - }, - { - desc: "Should build config which contains three frontends and one backend", - nodes: []catalogUpdate{ - { - Service: &serviceUpdate{ - ServiceName: "test", - Attributes: []string{ - "random.foo=bar", - label.Prefix + "frontend.rule=Host:A", - label.Prefix + "frontends.test1.rule=Host:B", - label.Prefix + "frontends.test2.rule=Host:C", - }, - }, - Nodes: []*api.ServiceEntry{ - { - Service: &api.AgentService{ - Service: "test", - Address: "127.0.0.1", - Port: 80, - Tags: []string{ - "random.foo=bar", - }, - }, - Node: &api.Node{ - Node: "localhost", - Address: "127.0.0.1", - }, - }, - }, - }, - }, - expectedFrontends: map[string]*types.Frontend{ - "frontend-test": { - Backend: "backend-test", - PassHostHeader: true, - Routes: map[string]types.Route{ - "route-host-test": { - Rule: "Host:A", - }, - }, - EntryPoints: []string{}, - }, - "frontend-test-test1": { - Backend: "backend-test", - PassHostHeader: true, - Routes: map[string]types.Route{ - "route-host-test-test1": { - Rule: "Host:B", - }, - }, - EntryPoints: []string{}, - }, - "frontend-test-test2": { - Backend: "backend-test", - PassHostHeader: true, - Routes: map[string]types.Route{ - "route-host-test-test2": { - Rule: "Host:C", - }, - }, - EntryPoints: []string{}, - }, - }, - expectedBackends: map[string]*types.Backend{ - "backend-test": { - Servers: map[string]types.Server{ - "test-0-O0Tnh-SwzY69M6SurTKP3wNKkzI": { - URL: "http://127.0.0.1:80", - Weight: 1, - }, - }, - }, - }, - }, - { - desc: "Should build config with a basic auth with a backward compatibility", - nodes: []catalogUpdate{ - { - Service: &serviceUpdate{ - ServiceName: "test", - Attributes: []string{ - "random.foo=bar", - label.TraefikFrontendAuthBasicUsers + "=test:$apr1$H6uskkkW$IgXLP6ewTrSuBkTrqE8wj/,test2:$apr1$d9hr9HBB$4HxwgUir3HP4EsggP/QNo0", - }, - }, - Nodes: []*api.ServiceEntry{ - { - Service: &api.AgentService{ - Service: "test", - Address: "127.0.0.1", - Port: 80, - Tags: []string{ - "random.foo=bar", - label.TraefikWeight + "=42", - label.TraefikFrontendPassHostHeader + "=true", - label.TraefikProtocol + "=https", - }, - }, - Node: &api.Node{ - Node: "localhost", - Address: "127.0.0.1", - }, - }, - }, - }, - }, - expectedFrontends: map[string]*types.Frontend{ - "frontend-test": { - Backend: "backend-test", - PassHostHeader: true, - Routes: map[string]types.Route{ - "route-host-test": { - Rule: "Host:test.localhost", - }, - }, - Auth: &types.Auth{ - Basic: &types.Basic{ - Users: []string{"test:$apr1$H6uskkkW$IgXLP6ewTrSuBkTrqE8wj/", - "test2:$apr1$d9hr9HBB$4HxwgUir3HP4EsggP/QNo0"}, - }, - }, - EntryPoints: []string{}, - }, - }, - expectedBackends: map[string]*types.Backend{ - "backend-test": { - Servers: map[string]types.Server{ - "test-0-ecTTsmX1vPktQQrl53WhNDy-HEg": { - URL: "https://127.0.0.1:80", - Weight: 42, - }, - }, - }, - }, - }, - { - desc: "Should build config with a digest auth", - nodes: []catalogUpdate{ - { - Service: &serviceUpdate{ - ServiceName: "test", - Attributes: []string{ - "random.foo=bar", - label.TraefikFrontendAuthDigestRemoveHeader + "=true", - label.TraefikFrontendAuthDigestUsers + "=test:$apr1$H6uskkkW$IgXLP6ewTrSuBkTrqE8wj/,test2:$apr1$d9hr9HBB$4HxwgUir3HP4EsggP/QNo0", - label.TraefikFrontendAuthDigestUsersFile + "=.htpasswd", - }, - }, - Nodes: []*api.ServiceEntry{ - { - Service: &api.AgentService{ - Service: "test", - Address: "127.0.0.1", - Port: 80, - Tags: []string{ - "random.foo=bar", - label.TraefikWeight + "=42", - label.TraefikFrontendPassHostHeader + "=true", - label.TraefikProtocol + "=https", - }, - }, - Node: &api.Node{ - Node: "localhost", - Address: "127.0.0.1", - }, - }, - }, - }, - }, - expectedFrontends: map[string]*types.Frontend{ - "frontend-test": { - Backend: "backend-test", - PassHostHeader: true, - Routes: map[string]types.Route{ - "route-host-test": { - Rule: "Host:test.localhost", - }, - }, - Auth: &types.Auth{ - Digest: &types.Digest{ - RemoveHeader: true, - Users: []string{"test:$apr1$H6uskkkW$IgXLP6ewTrSuBkTrqE8wj/", - "test2:$apr1$d9hr9HBB$4HxwgUir3HP4EsggP/QNo0"}, - UsersFile: ".htpasswd", - }, - }, - EntryPoints: []string{}, - }, - }, - expectedBackends: map[string]*types.Backend{ - "backend-test": { - Servers: map[string]types.Server{ - "test-0-ecTTsmX1vPktQQrl53WhNDy-HEg": { - URL: "https://127.0.0.1:80", - Weight: 42, - }, - }, - }, - }, - }, - { - desc: "Should build config with a forward auth", - nodes: []catalogUpdate{ - { - Service: &serviceUpdate{ - ServiceName: "test", - Attributes: []string{ - "random.foo=bar", - label.TraefikFrontendAuthForwardAddress + "=auth.server", - label.TraefikFrontendAuthForwardAuthResponseHeaders + "=X-Auth-User,X-Auth-Token", - label.TraefikFrontendAuthForwardTrustForwardHeader + "=true", - label.TraefikFrontendAuthForwardTLSCa + "=ca.crt", - label.TraefikFrontendAuthForwardTLSCaOptional + "=true", - label.TraefikFrontendAuthForwardTLSCert + "=server.crt", - label.TraefikFrontendAuthForwardTLSKey + "=server.key", - label.TraefikFrontendAuthForwardTLSInsecureSkipVerify + "=true", - label.TraefikFrontendAuthHeaderField + "=X-WebAuth-User", - }, - }, - Nodes: []*api.ServiceEntry{ - { - Service: &api.AgentService{ - Service: "test", - Address: "127.0.0.1", - Port: 80, - Tags: []string{ - "random.foo=bar", - label.TraefikWeight + "=42", - label.TraefikFrontendPassHostHeader + "=true", - label.TraefikProtocol + "=https", - }, - }, - Node: &api.Node{ - Node: "localhost", - Address: "127.0.0.1", - }, - }, - }, - }, - }, - expectedFrontends: map[string]*types.Frontend{ - "frontend-test": { - Backend: "backend-test", - PassHostHeader: true, - Routes: map[string]types.Route{ - "route-host-test": { - Rule: "Host:test.localhost", - }, - }, - Auth: &types.Auth{ - HeaderField: "X-WebAuth-User", - Forward: &types.Forward{ - Address: "auth.server", - TLS: &types.ClientTLS{ - CA: "ca.crt", - CAOptional: true, - InsecureSkipVerify: true, - Cert: "server.crt", - Key: "server.key", - }, - TrustForwardHeader: true, - AuthResponseHeaders: []string{"X-Auth-User", "X-Auth-Token"}, - }, - }, - EntryPoints: []string{}, - }, - }, - expectedBackends: map[string]*types.Backend{ - "backend-test": { - Servers: map[string]types.Server{ - "test-0-ecTTsmX1vPktQQrl53WhNDy-HEg": { - URL: "https://127.0.0.1:80", - Weight: 42, - }, - }, - }, - }, - }, - { - desc: "when all labels are set", - nodes: []catalogUpdate{ - { - Service: &serviceUpdate{ - ServiceName: "test", - Attributes: []string{ - label.TraefikBackend + "=foobar", - - label.TraefikBackendCircuitBreakerExpression + "=NetworkErrorRatio() > 0.5", - label.TraefikBackendResponseForwardingFlushInterval + "=10ms", - label.TraefikBackendHealthCheckPath + "=/health", - label.TraefikBackendHealthCheckScheme + "=http", - label.TraefikBackendHealthCheckPort + "=880", - label.TraefikBackendHealthCheckInterval + "=6", - label.TraefikBackendHealthCheckTimeout + "=3", - label.TraefikBackendHealthCheckHostname + "=foo.com", - label.TraefikBackendHealthCheckHeaders + "=Foo:bar || Bar:foo", - label.TraefikBackendLoadBalancerMethod + "=drr", - label.TraefikBackendLoadBalancerStickiness + "=true", - label.TraefikBackendLoadBalancerStickinessCookieName + "=chocolate", - label.TraefikBackendMaxConnAmount + "=666", - label.TraefikBackendMaxConnExtractorFunc + "=client.ip", - label.TraefikBackendBufferingMaxResponseBodyBytes + "=10485760", - label.TraefikBackendBufferingMemResponseBodyBytes + "=2097152", - label.TraefikBackendBufferingMaxRequestBodyBytes + "=10485760", - label.TraefikBackendBufferingMemRequestBodyBytes + "=2097152", - label.TraefikBackendBufferingRetryExpression + "=IsNetworkError() && Attempts() <= 2", - - label.TraefikFrontendPassTLSClientCertPem + "=true", - label.TraefikFrontendPassTLSClientCertInfosNotBefore + "=true", - label.TraefikFrontendPassTLSClientCertInfosNotAfter + "=true", - label.TraefikFrontendPassTLSClientCertInfosSans + "=true", - label.TraefikFrontendPassTLSClientCertInfosIssuerCommonName + "=true", - label.TraefikFrontendPassTLSClientCertInfosIssuerCountry + "=true", - label.TraefikFrontendPassTLSClientCertInfosIssuerDomainComponent + "=true", - label.TraefikFrontendPassTLSClientCertInfosIssuerLocality + "=true", - label.TraefikFrontendPassTLSClientCertInfosIssuerOrganization + "=true", - label.TraefikFrontendPassTLSClientCertInfosIssuerProvince + "=true", - label.TraefikFrontendPassTLSClientCertInfosIssuerSerialNumber + "=true", - label.TraefikFrontendPassTLSClientCertInfosSubjectCommonName + "=true", - label.TraefikFrontendPassTLSClientCertInfosSubjectCountry + "=true", - label.TraefikFrontendPassTLSClientCertInfosSubjectDomainComponent + "=true", - label.TraefikFrontendPassTLSClientCertInfosSubjectLocality + "=true", - label.TraefikFrontendPassTLSClientCertInfosSubjectOrganization + "=true", - label.TraefikFrontendPassTLSClientCertInfosSubjectProvince + "=true", - label.TraefikFrontendPassTLSClientCertInfosSubjectSerialNumber + "=true", - - label.TraefikFrontendAuthBasic + "=test:$apr1$H6uskkkW$IgXLP6ewTrSuBkTrqE8wj/,test2:$apr1$d9hr9HBB$4HxwgUir3HP4EsggP/QNo0", - label.TraefikFrontendAuthBasicRemoveHeader + "=true", - label.TraefikFrontendAuthBasicUsers + "=test:$apr1$H6uskkkW$IgXLP6ewTrSuBkTrqE8wj/,test2:$apr1$d9hr9HBB$4HxwgUir3HP4EsggP/QNo0", - label.TraefikFrontendAuthBasicUsersFile + "=.htpasswd", - label.TraefikFrontendAuthDigestRemoveHeader + "=true", - label.TraefikFrontendAuthDigestUsers + "=test:$apr1$H6uskkkW$IgXLP6ewTrSuBkTrqE8wj/,test2:$apr1$d9hr9HBB$4HxwgUir3HP4EsggP/QNo0", - label.TraefikFrontendAuthDigestUsersFile + "=.htpasswd", - label.TraefikFrontendAuthForwardAddress + "=auth.server", - label.TraefikFrontendAuthForwardAuthResponseHeaders + "=X-Auth-User,X-Auth-Token", - label.TraefikFrontendAuthForwardTrustForwardHeader + "=true", - label.TraefikFrontendAuthForwardTLSCa + "=ca.crt", - label.TraefikFrontendAuthForwardTLSCaOptional + "=true", - label.TraefikFrontendAuthForwardTLSCert + "=server.crt", - label.TraefikFrontendAuthForwardTLSKey + "=server.key", - label.TraefikFrontendAuthForwardTLSInsecureSkipVerify + "=true", - label.TraefikFrontendAuthHeaderField + "=X-WebAuth-User", - - label.TraefikFrontendEntryPoints + "=http,https", - label.TraefikFrontendPassHostHeader + "=true", - label.TraefikFrontendPassTLSCert + "=true", - label.TraefikFrontendPriority + "=666", - label.TraefikFrontendRedirectEntryPoint + "=https", - label.TraefikFrontendRedirectRegex + "=nope", - label.TraefikFrontendRedirectReplacement + "=nope", - label.TraefikFrontendRedirectPermanent + "=true", - label.TraefikFrontendRule + "=Host:traefik.io", - label.TraefikFrontendWhiteListSourceRange + "=10.10.10.10", - label.TraefikFrontendWhiteListIPStrategyExcludedIPS + "=10.10.10.10,10.10.10.11", - label.TraefikFrontendWhiteListIPStrategyDepth + "=5", - - label.TraefikFrontendRequestHeaders + "=Access-Control-Allow-Methods:POST,GET,OPTIONS || Content-type: application/json; charset=utf-8", - label.TraefikFrontendResponseHeaders + "=Access-Control-Allow-Methods:POST,GET,OPTIONS || Content-type: application/json; charset=utf-8", - label.TraefikFrontendSSLProxyHeaders + "=Access-Control-Allow-Methods:POST,GET,OPTIONS || Content-type: application/json; charset=utf-8", - label.TraefikFrontendAllowedHosts + "=foo,bar,bor", - label.TraefikFrontendHostsProxyHeaders + "=foo,bar,bor", - label.TraefikFrontendSSLHost + "=foo", - label.TraefikFrontendCustomFrameOptionsValue + "=foo", - label.TraefikFrontendContentSecurityPolicy + "=foo", - label.TraefikFrontendPublicKey + "=foo", - label.TraefikFrontendReferrerPolicy + "=foo", - label.TraefikFrontendCustomBrowserXSSValue + "=foo", - label.TraefikFrontendSTSSeconds + "=666", - label.TraefikFrontendSSLForceHost + "=true", - label.TraefikFrontendSSLRedirect + "=true", - label.TraefikFrontendSSLTemporaryRedirect + "=true", - label.TraefikFrontendSTSIncludeSubdomains + "=true", - label.TraefikFrontendSTSPreload + "=true", - label.TraefikFrontendForceSTSHeader + "=true", - label.TraefikFrontendFrameDeny + "=true", - label.TraefikFrontendContentTypeNosniff + "=true", - label.TraefikFrontendBrowserXSSFilter + "=true", - label.TraefikFrontendIsDevelopment + "=true", - - label.Prefix + label.BaseFrontendErrorPage + "foo." + label.SuffixErrorPageStatus + "=404", - label.Prefix + label.BaseFrontendErrorPage + "foo." + label.SuffixErrorPageBackend + "=foobar", - label.Prefix + label.BaseFrontendErrorPage + "foo." + label.SuffixErrorPageQuery + "=foo_query", - label.Prefix + label.BaseFrontendErrorPage + "bar." + label.SuffixErrorPageStatus + "=500,600", - label.Prefix + label.BaseFrontendErrorPage + "bar." + label.SuffixErrorPageBackend + "=foobar", - label.Prefix + label.BaseFrontendErrorPage + "bar." + label.SuffixErrorPageQuery + "=bar_query", - - label.TraefikFrontendRateLimitExtractorFunc + "=client.ip", - label.Prefix + label.BaseFrontendRateLimit + "foo." + label.SuffixRateLimitPeriod + "=6", - label.Prefix + label.BaseFrontendRateLimit + "foo." + label.SuffixRateLimitAverage + "=12", - label.Prefix + label.BaseFrontendRateLimit + "foo." + label.SuffixRateLimitBurst + "=18", - label.Prefix + label.BaseFrontendRateLimit + "bar." + label.SuffixRateLimitPeriod + "=3", - label.Prefix + label.BaseFrontendRateLimit + "bar." + label.SuffixRateLimitAverage + "=6", - label.Prefix + label.BaseFrontendRateLimit + "bar." + label.SuffixRateLimitBurst + "=9", - }, - }, - Nodes: []*api.ServiceEntry{ - { - Service: &api.AgentService{ - Service: "test", - Address: "10.0.0.1", - Port: 80, - Tags: []string{ - label.TraefikProtocol + "=https", - label.TraefikWeight + "=12", - }, - }, - Node: &api.Node{ - Node: "localhost", - Address: "127.0.0.1", - }, - }, - { - Service: &api.AgentService{ - Service: "test", - Address: "10.0.0.2", - Port: 80, - Tags: []string{ - label.TraefikProtocol + "=https", - label.TraefikWeight + "=12", - }, - }, - Node: &api.Node{ - Node: "localhost", - Address: "127.0.0.1", - }, - }, - }, - }, - }, - expectedFrontends: map[string]*types.Frontend{ - "frontend-test": { - EntryPoints: []string{ - "http", - "https", - }, - Backend: "backend-test", - Routes: map[string]types.Route{ - "route-host-test": { - Rule: "Host:traefik.io", - }, - }, - PassHostHeader: true, - PassTLSCert: true, - Priority: 666, - PassTLSClientCert: &types.TLSClientHeaders{ - PEM: true, - Infos: &types.TLSClientCertificateInfos{ - NotBefore: true, - Sans: true, - NotAfter: true, - Subject: &types.TLSCLientCertificateDNInfos{ - CommonName: true, - Country: true, - DomainComponent: true, - Locality: true, - Organization: true, - Province: true, - SerialNumber: true, - }, - Issuer: &types.TLSCLientCertificateDNInfos{ - CommonName: true, - Country: true, - DomainComponent: true, - Locality: true, - Organization: true, - Province: true, - SerialNumber: true, - }, - }, - }, - Auth: &types.Auth{ - HeaderField: "X-WebAuth-User", - Basic: &types.Basic{ - RemoveHeader: true, - Users: []string{"test:$apr1$H6uskkkW$IgXLP6ewTrSuBkTrqE8wj/", - "test2:$apr1$d9hr9HBB$4HxwgUir3HP4EsggP/QNo0"}, - UsersFile: ".htpasswd", - }, - }, - WhiteList: &types.WhiteList{ - SourceRange: []string{ - "10.10.10.10", - }, - IPStrategy: &types.IPStrategy{ - Depth: 5, - ExcludedIPs: []string{"10.10.10.10", "10.10.10.11"}, - }, - }, - Headers: &types.Headers{ - CustomRequestHeaders: map[string]string{ - "Access-Control-Allow-Methods": "POST,GET,OPTIONS", - "Content-Type": "application/json; charset=utf-8", - }, - CustomResponseHeaders: map[string]string{ - "Access-Control-Allow-Methods": "POST,GET,OPTIONS", - "Content-Type": "application/json; charset=utf-8", - }, - AllowedHosts: []string{ - "foo", - "bar", - "bor", - }, - HostsProxyHeaders: []string{ - "foo", - "bar", - "bor", - }, - SSLRedirect: true, - SSLTemporaryRedirect: true, - SSLForceHost: true, - SSLHost: "foo", - SSLProxyHeaders: map[string]string{ - "Access-Control-Allow-Methods": "POST,GET,OPTIONS", - "Content-Type": "application/json; charset=utf-8", - }, - STSSeconds: 666, - STSIncludeSubdomains: true, - STSPreload: true, - ForceSTSHeader: true, - FrameDeny: true, - CustomFrameOptionsValue: "foo", - ContentTypeNosniff: true, - BrowserXSSFilter: true, - CustomBrowserXSSValue: "foo", - ContentSecurityPolicy: "foo", - PublicKey: "foo", - ReferrerPolicy: "foo", - IsDevelopment: true, - }, - Errors: map[string]*types.ErrorPage{ - "foo": { - Status: []string{"404"}, - Query: "foo_query", - Backend: "backend-foobar", - }, - "bar": { - Status: []string{"500", "600"}, - Query: "bar_query", - Backend: "backend-foobar", - }, - }, - RateLimit: &types.RateLimit{ - ExtractorFunc: "client.ip", - RateSet: map[string]*types.Rate{ - "foo": { - Period: parse.Duration(6 * time.Second), - Average: 12, - Burst: 18, - }, - "bar": { - Period: parse.Duration(3 * time.Second), - Average: 6, - Burst: 9, - }, - }, - }, - Redirect: &types.Redirect{ - EntryPoint: "https", - Regex: "", - Replacement: "", - Permanent: true, - }, - }, - }, - expectedBackends: map[string]*types.Backend{ - "backend-test": { - Servers: map[string]types.Server{ - "test-0-N753CZ-JEP1SmRf5Wfe6S3-RuM": { - URL: "https://10.0.0.1:80", - Weight: 12, - }, - "test-1-u4RAIw2K4-PDJh41dqqB4kM2wy0": { - URL: "https://10.0.0.2:80", - Weight: 12, - }, - }, - CircuitBreaker: &types.CircuitBreaker{ - Expression: "NetworkErrorRatio() > 0.5", - }, - ResponseForwarding: &types.ResponseForwarding{ - FlushInterval: "10ms", - }, - LoadBalancer: &types.LoadBalancer{ - Method: "drr", - Stickiness: &types.Stickiness{ - CookieName: "chocolate", - }, - }, - MaxConn: &types.MaxConn{ - Amount: 666, - ExtractorFunc: "client.ip", - }, - HealthCheck: &types.HealthCheck{ - Scheme: "http", - Path: "/health", - Port: 880, - Interval: "6", - Timeout: "3", - Hostname: "foo.com", - Headers: map[string]string{ - "Foo": "bar", - "Bar": "foo", - }, - }, - Buffering: &types.Buffering{ - MaxResponseBodyBytes: 10485760, - MemResponseBodyBytes: 2097152, - MaxRequestBodyBytes: 10485760, - MemRequestBodyBytes: 2097152, - RetryExpression: "IsNetworkError() && Attempts() <= 2", - }, - }, - }, - }, - { - desc: "Should build config containing one frontend, one IPv4 and one IPv6 backend", - nodes: []catalogUpdate{ - { - Service: &serviceUpdate{ - ServiceName: "test", - Attributes: []string{ - "random.foo=bar", - label.TraefikBackendLoadBalancerMethod + "=drr", - label.TraefikBackendCircuitBreakerExpression + "=NetworkErrorRatio() > 0.5", - label.TraefikBackendMaxConnAmount + "=1000", - label.TraefikBackendMaxConnExtractorFunc + "=client.ip", - label.TraefikFrontendAuthBasicUsers + "=test:$apr1$H6uskkkW$IgXLP6ewTrSuBkTrqE8wj/,test2:$apr1$d9hr9HBB$4HxwgUir3HP4EsggP/QNo0", - }, - }, - Nodes: []*api.ServiceEntry{ - { - Service: &api.AgentService{ - Service: "test", - Address: "127.0.0.1", - Port: 80, - Tags: []string{ - "random.foo=bar", - label.TraefikWeight + "=42", - label.TraefikFrontendPassHostHeader + "=true", - label.TraefikProtocol + "=https", - }, - }, - Node: &api.Node{ - Node: "localhost", - Address: "127.0.0.1", - }, - }, - { - Service: &api.AgentService{ - Service: "test", - Address: "::1", - Port: 80, - Tags: []string{ - "random.foo=bar", - label.TraefikWeight + "=42", - label.TraefikFrontendPassHostHeader + "=true", - label.TraefikProtocol + "=https", - }, - }, - Node: &api.Node{ - Node: "localhost", - Address: "::1", - }, - }, - }, - }, - }, - expectedFrontends: map[string]*types.Frontend{ - "frontend-test": { - Backend: "backend-test", - PassHostHeader: true, - Routes: map[string]types.Route{ - "route-host-test": { - Rule: "Host:test.localhost", - }, - }, - Auth: &types.Auth{ - Basic: &types.Basic{ - Users: []string{"test:$apr1$H6uskkkW$IgXLP6ewTrSuBkTrqE8wj/", - "test2:$apr1$d9hr9HBB$4HxwgUir3HP4EsggP/QNo0"}, - }, - }, - EntryPoints: []string{}, - }, - }, - expectedBackends: map[string]*types.Backend{ - "backend-test": { - Servers: map[string]types.Server{ - "test-0-ecTTsmX1vPktQQrl53WhNDy-HEg": { - URL: "https://127.0.0.1:80", - Weight: 42, - }, - "test-1-9tI2Ud3Vkl4T4B6bAIWV0vFjEIg": { - URL: "https://[::1]:80", - Weight: 42, - }, - }, - LoadBalancer: &types.LoadBalancer{ - Method: "drr", - }, - CircuitBreaker: &types.CircuitBreaker{ - Expression: "NetworkErrorRatio() > 0.5", - }, - MaxConn: &types.MaxConn{ - Amount: 1000, - ExtractorFunc: "client.ip", - }, - }, - }, - }, - } - - for _, test := range testCases { - test := test - t.Run(test.desc, func(t *testing.T) { - t.Parallel() - - nodes := fakeLoadTraefikLabelsSlice(test.nodes, p.Prefix) - - actualConfig := p.buildConfiguration(nodes) - assert.NotNil(t, actualConfig) - assert.Equal(t, test.expectedBackends, actualConfig.Backends) - assert.Equal(t, test.expectedFrontends, actualConfig.Frontends) - }) - } -} - -func TestProviderBuildConfigurationCustomPrefix(t *testing.T) { - prefix := "traefik-test" - p := &Provider{ - Domain: "localhost", - Prefix: prefix, - ExposedByDefault: false, - FrontEndRule: "Host:{{.ServiceName}}.{{.Domain}}", - frontEndRuleTemplate: template.New("consul catalog frontend rule"), - } - - testCases := []struct { - desc string - nodes []catalogUpdate - expectedFrontends map[string]*types.Frontend - expectedBackends map[string]*types.Backend - }{ - { - desc: "Should build config which contains three frontends and one backend", - nodes: []catalogUpdate{ - { - Service: &serviceUpdate{ - ServiceName: "test", - Attributes: []string{ - "random.foo=bar", - prefix + ".frontend.rule=Host:A", - prefix + ".frontends.test1.rule=Host:B", - prefix + ".frontends.test2.rule=Host:C", - }, - }, - Nodes: []*api.ServiceEntry{ - { - Service: &api.AgentService{ - Service: "test", - Address: "127.0.0.1", - Port: 80, - Tags: []string{ - "random.foo=bar", - }, - }, - Node: &api.Node{ - Node: "localhost", - Address: "127.0.0.1", - }, - }, - }, - }, - }, - expectedFrontends: map[string]*types.Frontend{ - "frontend-test": { - Backend: "backend-test", - PassHostHeader: true, - Routes: map[string]types.Route{ - "route-host-test": { - Rule: "Host:A", - }, - }, - EntryPoints: []string{}, - }, - "frontend-test-test1": { - Backend: "backend-test", - PassHostHeader: true, - Routes: map[string]types.Route{ - "route-host-test-test1": { - Rule: "Host:B", - }, - }, - EntryPoints: []string{}, - }, - "frontend-test-test2": { - Backend: "backend-test", - PassHostHeader: true, - Routes: map[string]types.Route{ - "route-host-test-test2": { - Rule: "Host:C", - }, - }, - EntryPoints: []string{}, - }, - }, - expectedBackends: map[string]*types.Backend{ - "backend-test": { - Servers: map[string]types.Server{ - "test-0-O0Tnh-SwzY69M6SurTKP3wNKkzI": { - URL: "http://127.0.0.1:80", - Weight: 1, - }, - }, - }, - }, - }, - } - - for _, test := range testCases { - test := test - t.Run(test.desc, func(t *testing.T) { - t.Parallel() - - nodes := fakeLoadTraefikLabelsSlice(test.nodes, p.Prefix) - - actualConfig := p.buildConfiguration(nodes) - assert.NotNil(t, actualConfig) - assert.Equal(t, test.expectedBackends, actualConfig.Backends) - assert.Equal(t, test.expectedFrontends, actualConfig.Frontends) - }) - } -} - -func TestGetTag(t *testing.T) { - testCases := []struct { - desc string - tags []string - key string - defaultValue string - expected string - }{ - { - desc: "Should return value of foo.bar key", - tags: []string{ - "foo.bar=random", - "traefik.backend.weight=42", - "management", - }, - key: "foo.bar", - defaultValue: "0", - expected: "random", - }, - { - desc: "Should return default value when nonexistent key", - tags: []string{ - "foo.bar.foo.bar=random", - "traefik.backend.weight=42", - "management", - }, - key: "foo.bar", - defaultValue: "0", - expected: "0", - }, - } - - for _, test := range testCases { - test := test - t.Run(test.desc, func(t *testing.T) { - t.Parallel() - - actual := getTag(test.key, test.tags, test.defaultValue) - assert.Equal(t, test.expected, actual) - }) - } -} - -func TestHasTag(t *testing.T) { - testCases := []struct { - desc string - name string - tags []string - expected bool - }{ - { - desc: "tag without value", - name: "foo", - tags: []string{"foo"}, - expected: true, - }, - { - desc: "tag with value", - name: "foo", - tags: []string{"foo=true"}, - expected: true, - }, - { - desc: "missing tag", - name: "foo", - tags: []string{"foobar=true"}, - expected: false, - }, - } - - for _, test := range testCases { - test := test - t.Run(test.desc, func(t *testing.T) { - t.Parallel() - - actual := hasTag(test.name, test.tags) - assert.Equal(t, test.expected, actual) - }) - } -} - -func TestProviderGetPrefixedName(t *testing.T) { - testCases := []struct { - desc string - name string - prefix string - expected string - }{ - { - desc: "empty name with prefix", - name: "", - prefix: "foo", - expected: "", - }, - { - desc: "empty name without prefix", - name: "", - prefix: "", - expected: "", - }, - { - desc: "with prefix", - name: "bar", - prefix: "foo", - expected: "foo.bar", - }, - { - desc: "without prefix", - name: "bar", - prefix: "", - expected: "bar", - }, - } - - for _, test := range testCases { - test := test - t.Run(test.desc, func(t *testing.T) { - t.Parallel() - - p := &Provider{Prefix: test.prefix} - - actual := p.getPrefixedName(test.name) - assert.Equal(t, test.expected, actual) - }) - } - -} - -func TestProviderGetAttribute(t *testing.T) { - testCases := []struct { - desc string - tags []string - key string - defaultValue string - prefix string - expected string - }{ - { - desc: "Should return tag value 42", - prefix: "traefik", - tags: []string{ - "foo.bar=ramdom", - "traefik.backend.weight=42", - }, - key: "backend.weight", - defaultValue: "0", - expected: "42", - }, - { - desc: "Should return tag default value 0", - prefix: "traefik", - tags: []string{ - "foo.bar=ramdom", - "traefik.backend.wei=42", - }, - key: "backend.weight", - defaultValue: "0", - expected: "0", - }, - { - desc: "Should return tag value 42 when empty prefix", - tags: []string{ - "foo.bar=ramdom", - "backend.weight=42", - }, - key: "backend.weight", - defaultValue: "0", - expected: "42", - }, - { - desc: "Should return default value 0 when empty prefix", - tags: []string{ - "foo.bar=ramdom", - "backend.wei=42", - }, - key: "backend.weight", - defaultValue: "0", - expected: "0", - }, - { - desc: "Should return for.bar key value random when empty prefix", - tags: []string{ - "foo.bar=ramdom", - "backend.wei=42", - }, - key: "foo.bar", - defaultValue: "random", - expected: "ramdom", - }, - } - - for _, test := range testCases { - test := test - t.Run(test.desc, func(t *testing.T) { - t.Parallel() - - p := &Provider{ - Domain: "localhost", - Prefix: test.prefix, - } - - actual := p.getAttribute(test.key, test.tags, test.defaultValue) - assert.Equal(t, test.expected, actual) - }) - } -} - -func TestProviderGetFrontendRule(t *testing.T) { - testCases := []struct { - desc string - service serviceUpdate - domain string - expected string - }{ - { - desc: "Should return default host foo.localhost", - service: serviceUpdate{ - ServiceName: "foo", - Attributes: []string{}, - }, - domain: "localhost", - expected: "Host:foo.localhost", - }, - { - desc: "When no domain should return default host foo", - service: serviceUpdate{ - ServiceName: "foo", - Attributes: []string{}, - }, - domain: "", - expected: "Host:foo", - }, - { - desc: "Should return host *.example.com", - service: serviceUpdate{ - ServiceName: "foo", - Attributes: []string{ - "traefik.frontend.rule=Host:*.example.com", - }, - }, - domain: "localhost", - expected: "Host:*.example.com", - }, - { - desc: "Should return host foo.example.com", - service: serviceUpdate{ - ServiceName: "foo", - Attributes: []string{ - "traefik.frontend.rule=Host:{{.ServiceName}}.example.com", - }, - }, - domain: "localhost", - expected: "Host:foo.example.com", - }, - { - desc: "Should return path prefix /bar", - service: serviceUpdate{ - ServiceName: "foo", - Attributes: []string{ - "traefik.frontend.rule=PathPrefix:{{getTag \"contextPath\" .Attributes \"/\"}}", - "contextPath=/bar", - }, - }, - domain: "localhost", - expected: "PathPrefix:/bar", - }, - } - - for _, test := range testCases { - test := test - t.Run(test.desc, func(t *testing.T) { - t.Parallel() - - p := &Provider{ - Domain: test.domain, - Prefix: "traefik", - FrontEndRule: "Host:{{.ServiceName}}.{{.Domain}}", - frontEndRuleTemplate: template.New("consul catalog frontend rule"), - } - p.setupFrontEndRuleTemplate() - - labels := tagsToNeutralLabels(test.service.Attributes, p.Prefix) - test.service.TraefikLabels = labels - - actual := p.getFrontendRule(test.service) - assert.Equal(t, test.expected, actual) - }) - } -} - -func TestGetBackendAddress(t *testing.T) { - testCases := []struct { - desc string - node *api.ServiceEntry - expected string - }{ - { - desc: "Should return the address of the service", - node: &api.ServiceEntry{ - Node: &api.Node{ - Address: "10.1.0.1", - }, - Service: &api.AgentService{ - Address: "10.2.0.1", - }, - }, - expected: "10.2.0.1", - }, - { - desc: "Should return the address of the node", - node: &api.ServiceEntry{ - Node: &api.Node{ - Address: "10.1.0.1", - }, - Service: &api.AgentService{ - Address: "", - }, - }, - expected: "10.1.0.1", - }, - } - - for _, test := range testCases { - test := test - t.Run(test.desc, func(t *testing.T) { - t.Parallel() - - actual := getBackendAddress(test.node) - assert.Equal(t, test.expected, actual) - }) - } -} - -func TestGetServerName(t *testing.T) { - testCases := []struct { - desc string - node *api.ServiceEntry - expected string - }{ - { - desc: "Should create backend name without tags", - node: &api.ServiceEntry{ - Service: &api.AgentService{ - Service: "api", - Address: "10.0.0.1", - Port: 80, - Tags: []string{}, - }, - }, - expected: "api-0-eUSiqD6uNvvh6zxsY-OeRi8ZbaE", - }, - { - desc: "Should create backend name with multiple tags", - node: &api.ServiceEntry{ - Service: &api.AgentService{ - Service: "api", - Address: "10.0.0.1", - Port: 80, - Tags: []string{"traefik.weight=42", "traefik.enable=true"}, - }, - }, - expected: "api-1-eJ8MR2JxjXyZgs1bhurVa0-9OI8", - }, - { - desc: "Should create backend name with one tag", - node: &api.ServiceEntry{ - Service: &api.AgentService{ - Service: "api", - Address: "10.0.0.1", - Port: 80, - Tags: []string{"a funny looking tag"}, - }, - }, - expected: "api-2-lMCDCsG7sh0SCXOHo4oBOQB-9D4", - }, - } - - for i, test := range testCases { - test := test - i := i - t.Run(test.desc, func(t *testing.T) { - t.Parallel() - - actual := getServerName(test.node, i) - assert.Equal(t, test.expected, actual) - }) - } -} - -func fakeLoadTraefikLabelsSlice(nodes []catalogUpdate, prefix string) []catalogUpdate { - var result []catalogUpdate - - for _, node := range nodes { - labels := tagsToNeutralLabels(node.Service.Attributes, prefix) - node.Service.TraefikLabels = labels - result = append(result, node) - } - - return result -} diff --git a/old/provider/consulcatalog/consul_catalog.go b/old/provider/consulcatalog/consul_catalog.go deleted file mode 100644 index 91961504b..000000000 --- a/old/provider/consulcatalog/consul_catalog.go +++ /dev/null @@ -1,618 +0,0 @@ -package consulcatalog - -import ( - "fmt" - "strconv" - "strings" - "sync" - "text/template" - "time" - - "github.com/BurntSushi/ty/fun" - "github.com/cenkalti/backoff" - "github.com/containous/traefik/old/log" - "github.com/containous/traefik/old/provider" - "github.com/containous/traefik/old/provider/label" - "github.com/containous/traefik/old/types" - "github.com/containous/traefik/pkg/job" - "github.com/containous/traefik/pkg/safe" - "github.com/hashicorp/consul/api" -) - -const ( - // DefaultWatchWaitTime is the duration to wait when polling consul - DefaultWatchWaitTime = 15 * time.Second -) - -var _ provider.Provider = (*Provider)(nil) - -// Provider holds configurations of the Consul catalog provider. -type Provider struct { - provider.BaseProvider `mapstructure:",squash" export:"true"` - Endpoint string `description:"Consul server endpoint"` - Domain string `description:"Default domain used"` - Stale bool `description:"Use stale consistency for catalog reads" export:"true"` - ExposedByDefault bool `description:"Expose Consul services by default" export:"true"` - Prefix string `description:"Prefix used for Consul catalog tags" export:"true"` - FrontEndRule string `description:"Frontend rule used for Consul services" export:"true"` - TLS *types.ClientTLS `description:"Enable TLS support" export:"true"` - client *api.Client - frontEndRuleTemplate *template.Template -} - -// Service represent a Consul service. -type Service struct { - Name string - Tags []string - Nodes []string - Addresses []string - Ports []int -} - -type serviceUpdate struct { - ServiceName string - ParentServiceName string - Attributes []string - TraefikLabels map[string]string -} - -type frontendSegment struct { - Name string - Labels map[string]string -} - -type catalogUpdate struct { - Service *serviceUpdate - Nodes []*api.ServiceEntry -} - -type nodeSorter []*api.ServiceEntry - -func (a nodeSorter) Len() int { - return len(a) -} - -func (a nodeSorter) Swap(i int, j int) { - a[i], a[j] = a[j], a[i] -} - -func (a nodeSorter) Less(i int, j int) bool { - lEntry := a[i] - rEntry := a[j] - - ls := strings.ToLower(lEntry.Service.Service) - lr := strings.ToLower(rEntry.Service.Service) - - if ls != lr { - return ls < lr - } - if lEntry.Service.Address != rEntry.Service.Address { - return lEntry.Service.Address < rEntry.Service.Address - } - if lEntry.Node.Address != rEntry.Node.Address { - return lEntry.Node.Address < rEntry.Node.Address - } - return lEntry.Service.Port < rEntry.Service.Port -} - -// Init the provider -func (p *Provider) Init(constraints types.Constraints) error { - err := p.BaseProvider.Init(constraints) - if err != nil { - return err - } - - client, err := p.createClient() - if err != nil { - return err - } - - p.client = client - p.setupFrontEndRuleTemplate() - - return nil -} - -// Provide allows the consul catalog provider to provide configurations to traefik -// using the given configuration channel. -func (p *Provider) Provide(configurationChan chan<- types.ConfigMessage, pool *safe.Pool) error { - pool.Go(func(stop chan bool) { - notify := func(err error, time time.Duration) { - log.Errorf("Consul connection error %+v, retrying in %s", err, time) - } - operation := func() error { - return p.watch(configurationChan, stop) - } - errRetry := backoff.RetryNotify(safe.OperationWithRecover(operation), job.NewBackOff(backoff.NewExponentialBackOff()), notify) - if errRetry != nil { - log.Errorf("Cannot connect to consul server %+v", errRetry) - } - }) - return nil -} - -func (p *Provider) createClient() (*api.Client, error) { - config := api.DefaultConfig() - config.Address = p.Endpoint - if p.TLS != nil { - tlsConfig, err := p.TLS.CreateTLSConfig() - if err != nil { - return nil, err - } - - config.Scheme = "https" - config.Transport.TLSClientConfig = tlsConfig - } - - client, err := api.NewClient(config) - if err != nil { - return nil, err - } - - return client, nil -} - -func (p *Provider) watch(configurationChan chan<- types.ConfigMessage, stop chan bool) error { - stopCh := make(chan struct{}) - watchCh := make(chan map[string][]string) - errorCh := make(chan error) - - var errorOnce sync.Once - notifyError := func(err error) { - errorOnce.Do(func() { - errorCh <- err - }) - } - - p.watchHealthState(stopCh, watchCh, notifyError) - p.watchCatalogServices(stopCh, watchCh, notifyError) - - defer close(stopCh) - defer close(watchCh) - - safe.Go(func() { - for index := range watchCh { - log.Debug("List of services changed") - nodes, err := p.getNodes(index) - if err != nil { - notifyError(err) - } - configuration := p.buildConfiguration(nodes) - configurationChan <- types.ConfigMessage{ - ProviderName: "consul_catalog", - Configuration: configuration, - } - } - }) - - for { - select { - case <-stop: - return nil - case err := <-errorCh: - return err - } - } -} - -func (p *Provider) watchCatalogServices(stopCh <-chan struct{}, watchCh chan<- map[string][]string, notifyError func(error)) { - catalog := p.client.Catalog() - - safe.Go(func() { - // variable to hold previous state - var flashback map[string]Service - - options := &api.QueryOptions{WaitTime: DefaultWatchWaitTime, AllowStale: p.Stale} - - for { - select { - case <-stopCh: - return - default: - } - - data, meta, err := catalog.Services(options) - if err != nil { - log.Errorf("Failed to list services: %v", err) - notifyError(err) - return - } - - if options.WaitIndex == meta.LastIndex { - continue - } - - options.WaitIndex = meta.LastIndex - - if data != nil { - current := make(map[string]Service) - for key, value := range data { - nodes, _, err := catalog.Service(key, "", &api.QueryOptions{AllowStale: p.Stale}) - if err != nil { - log.Errorf("Failed to get detail of service %s: %v", key, err) - notifyError(err) - return - } - - nodesID := getServiceIds(nodes) - ports := getServicePorts(nodes) - addresses := getServiceAddresses(nodes) - - if service, ok := current[key]; ok { - service.Tags = value - service.Nodes = nodesID - service.Ports = ports - } else { - service := Service{ - Name: key, - Tags: value, - Nodes: nodesID, - Addresses: addresses, - Ports: ports, - } - current[key] = service - } - } - - // A critical note is that the return of a blocking request is no guarantee of a change. - // It is possible that there was an idempotent write that does not affect the result of the query. - // Thus it is required to do extra check for changes... - if hasChanged(current, flashback) { - watchCh <- data - flashback = current - } - } - } - }) -} - -func (p *Provider) watchHealthState(stopCh <-chan struct{}, watchCh chan<- map[string][]string, notifyError func(error)) { - health := p.client.Health() - catalog := p.client.Catalog() - - safe.Go(func() { - // variable to hold previous state - var flashback map[string][]string - var flashbackMaintenance []string - - options := &api.QueryOptions{WaitTime: DefaultWatchWaitTime, AllowStale: p.Stale} - - for { - select { - case <-stopCh: - return - default: - } - - // Listening to changes that leads to `passing` state or degrades from it. - healthyState, meta, err := health.State("any", options) - if err != nil { - log.WithError(err).Error("Failed to retrieve health checks") - notifyError(err) - return - } - - var current = make(map[string][]string) - var currentFailing = make(map[string]*api.HealthCheck) - var maintenance []string - if healthyState != nil { - for _, healthy := range healthyState { - key := fmt.Sprintf("%s-%s", healthy.Node, healthy.ServiceID) - _, failing := currentFailing[key] - if healthy.Status == "passing" && !failing { - current[key] = append(current[key], healthy.Node) - } else if strings.HasPrefix(healthy.CheckID, "_service_maintenance") || strings.HasPrefix(healthy.CheckID, "_node_maintenance") { - maintenance = append(maintenance, healthy.CheckID) - } else { - currentFailing[key] = healthy - if _, ok := current[key]; ok { - delete(current, key) - } - } - } - } - - // If LastIndex didn't change then it means `Get` returned - // because of the WaitTime and the key didn't changed. - if options.WaitIndex == meta.LastIndex { - continue - } - - options.WaitIndex = meta.LastIndex - - // The response should be unified with watchCatalogServices - data, _, err := catalog.Services(&api.QueryOptions{AllowStale: p.Stale}) - if err != nil { - log.Errorf("Failed to list services: %v", err) - notifyError(err) - return - } - - if data != nil { - // A critical note is that the return of a blocking request is no guarantee of a change. - // It is possible that there was an idempotent write that does not affect the result of the query. - // Thus it is required to do extra check for changes... - addedKeys, removedKeys, changedKeys := getChangedHealth(current, flashback) - - if len(addedKeys) > 0 || len(removedKeys) > 0 || len(changedKeys) > 0 { - log.WithField("DiscoveredServices", addedKeys). - WithField("MissingServices", removedKeys). - WithField("ChangedServices", changedKeys). - Debug("Health State change detected.") - - watchCh <- data - flashback = current - flashbackMaintenance = maintenance - } else { - addedKeysMaintenance, removedMaintenance := getChangedStringKeys(maintenance, flashbackMaintenance) - - if len(addedKeysMaintenance) > 0 || len(removedMaintenance) > 0 { - log.WithField("MaintenanceMode", maintenance).Debug("Maintenance change detected.") - watchCh <- data - flashback = current - flashbackMaintenance = maintenance - } - } - } - } - }) -} - -func (p *Provider) getNodes(index map[string][]string) ([]catalogUpdate, error) { - visited := make(map[string]bool) - - var nodes []catalogUpdate - for service := range index { - name := strings.ToLower(service) - if !strings.Contains(name, " ") && !visited[name] { - visited[name] = true - log.WithField("service", name).Debug("Fetching service") - healthy, err := p.healthyNodes(name) - if err != nil { - return nil, err - } - // healthy.Nodes can be empty if constraints do not match, without throwing error - if healthy.Service != nil && len(healthy.Nodes) > 0 { - nodes = append(nodes, healthy) - } - } - } - return nodes, nil -} - -func hasChanged(current map[string]Service, previous map[string]Service) bool { - if len(current) != len(previous) { - return true - } - addedServiceKeys, removedServiceKeys := getChangedServiceKeys(current, previous) - return len(removedServiceKeys) > 0 || len(addedServiceKeys) > 0 || hasServiceChanged(current, previous) -} - -func getChangedServiceKeys(current map[string]Service, previous map[string]Service) ([]string, []string) { - currKeySet := fun.Set(fun.Keys(current).([]string)).(map[string]bool) - prevKeySet := fun.Set(fun.Keys(previous).([]string)).(map[string]bool) - - addedKeys := fun.Difference(currKeySet, prevKeySet).(map[string]bool) - removedKeys := fun.Difference(prevKeySet, currKeySet).(map[string]bool) - - return fun.Keys(addedKeys).([]string), fun.Keys(removedKeys).([]string) -} - -func hasServiceChanged(current map[string]Service, previous map[string]Service) bool { - for key, value := range current { - if prevValue, ok := previous[key]; ok { - addedNodesKeys, removedNodesKeys := getChangedStringKeys(value.Nodes, prevValue.Nodes) - if len(addedNodesKeys) > 0 || len(removedNodesKeys) > 0 { - return true - } - addedTagsKeys, removedTagsKeys := getChangedStringKeys(value.Tags, prevValue.Tags) - if len(addedTagsKeys) > 0 || len(removedTagsKeys) > 0 { - return true - } - addedAddressesKeys, removedAddressesKeys := getChangedStringKeys(value.Addresses, prevValue.Addresses) - if len(addedAddressesKeys) > 0 || len(removedAddressesKeys) > 0 { - return true - } - addedPortsKeys, removedPortsKeys := getChangedIntKeys(value.Ports, prevValue.Ports) - if len(addedPortsKeys) > 0 || len(removedPortsKeys) > 0 { - return true - } - } - } - return false -} - -func getChangedStringKeys(currState []string, prevState []string) ([]string, []string) { - currKeySet := fun.Set(currState).(map[string]bool) - prevKeySet := fun.Set(prevState).(map[string]bool) - - addedKeys := fun.Difference(currKeySet, prevKeySet).(map[string]bool) - removedKeys := fun.Difference(prevKeySet, currKeySet).(map[string]bool) - - return fun.Keys(addedKeys).([]string), fun.Keys(removedKeys).([]string) -} - -func getChangedHealth(current map[string][]string, previous map[string][]string) ([]string, []string, []string) { - currKeySet := fun.Set(fun.Keys(current).([]string)).(map[string]bool) - prevKeySet := fun.Set(fun.Keys(previous).([]string)).(map[string]bool) - - addedKeys := fun.Difference(currKeySet, prevKeySet).(map[string]bool) - removedKeys := fun.Difference(prevKeySet, currKeySet).(map[string]bool) - - var changedKeys []string - - for key, value := range current { - if prevValue, ok := previous[key]; ok { - addedNodesKeys, removedNodesKeys := getChangedStringKeys(value, prevValue) - if len(addedNodesKeys) > 0 || len(removedNodesKeys) > 0 { - changedKeys = append(changedKeys, key) - } - } - } - - return fun.Keys(addedKeys).([]string), fun.Keys(removedKeys).([]string), changedKeys -} - -func getChangedIntKeys(currState []int, prevState []int) ([]int, []int) { - currKeySet := fun.Set(currState).(map[int]bool) - prevKeySet := fun.Set(prevState).(map[int]bool) - - addedKeys := fun.Difference(currKeySet, prevKeySet).(map[int]bool) - removedKeys := fun.Difference(prevKeySet, currKeySet).(map[int]bool) - - return fun.Keys(addedKeys).([]int), fun.Keys(removedKeys).([]int) -} - -func getServiceIds(services []*api.CatalogService) []string { - var serviceIds []string - for _, service := range services { - serviceIds = append(serviceIds, service.ID) - } - return serviceIds -} - -func getServicePorts(services []*api.CatalogService) []int { - var servicePorts []int - for _, service := range services { - servicePorts = append(servicePorts, service.ServicePort) - } - return servicePorts -} - -func getServiceAddresses(services []*api.CatalogService) []string { - var serviceAddresses []string - for _, service := range services { - serviceAddresses = append(serviceAddresses, service.ServiceAddress) - } - return serviceAddresses -} - -func (p *Provider) healthyNodes(service string) (catalogUpdate, error) { - health := p.client.Health() - data, _, err := health.Service(service, "", true, &api.QueryOptions{AllowStale: p.Stale}) - if err != nil { - log.WithError(err).Errorf("Failed to fetch details of %s", service) - return catalogUpdate{}, err - } - - nodes := fun.Filter(func(node *api.ServiceEntry) bool { - return p.nodeFilter(service, node) - }, data).([]*api.ServiceEntry) - - // Merge tags of nodes matching constraints, in a single slice. - tags := fun.Foldl(func(node *api.ServiceEntry, set []string) []string { - return fun.Keys(fun.Union( - fun.Set(set), - fun.Set(node.Service.Tags), - ).(map[string]bool)).([]string) - }, []string{}, nodes).([]string) - - labels := tagsToNeutralLabels(tags, p.Prefix) - - return catalogUpdate{ - Service: &serviceUpdate{ - ServiceName: service, - Attributes: tags, - TraefikLabels: labels, - }, - Nodes: nodes, - }, nil -} - -func (p *Provider) nodeFilter(service string, node *api.ServiceEntry) bool { - // Filter disabled application. - if !p.isServiceEnabled(node) { - log.Debugf("Filtering disabled Consul service %s", service) - return false - } - - // Filter by constraints. - constraintTags := p.getConstraintTags(node.Service.Tags) - ok, failingConstraint := p.MatchConstraints(constraintTags) - if !ok && failingConstraint != nil { - log.Debugf("Service %v pruned by '%v' constraint", service, failingConstraint.String()) - return false - } - return true -} - -func (p *Provider) isServiceEnabled(node *api.ServiceEntry) bool { - rawValue := getTag(p.getPrefixedName(label.SuffixEnable), node.Service.Tags, "") - - if len(rawValue) == 0 { - return p.ExposedByDefault - } - - value, err := strconv.ParseBool(rawValue) - if err != nil { - log.Errorf("Invalid value for %s: %s", label.SuffixEnable, rawValue) - return p.ExposedByDefault - } - return value -} - -func (p *Provider) getConstraintTags(tags []string) []string { - var values []string - - prefix := p.getPrefixedName("tags=") - for _, tag := range tags { - // We look for a Consul tag named 'traefik.tags' (unless different 'prefix' is configured) - if strings.HasPrefix(strings.ToLower(tag), prefix) { - // If 'traefik.tags=' tag is found, take the tag value and split by ',' adding the result to the list to be returned - splitedTags := label.SplitAndTrimString(tag[len(prefix):], ",") - values = append(values, splitedTags...) - } - } - - return values -} - -func (p *Provider) generateFrontends(service *serviceUpdate) []*serviceUpdate { - frontends := make([]*serviceUpdate, 0) - // to support .frontend.xxx - frontends = append(frontends, &serviceUpdate{ - ServiceName: service.ServiceName, - ParentServiceName: service.ServiceName, - Attributes: service.Attributes, - TraefikLabels: service.TraefikLabels, - }) - - // loop over children of .frontends.* - for _, frontend := range getSegments(label.Prefix+"frontends", label.Prefix, service.TraefikLabels) { - frontends = append(frontends, &serviceUpdate{ - ServiceName: service.ServiceName + "-" + frontend.Name, - ParentServiceName: service.ServiceName, - Attributes: service.Attributes, - TraefikLabels: frontend.Labels, - }) - } - - return frontends -} - -func getSegments(path string, prefix string, tree map[string]string) []*frontendSegment { - segments := make([]*frontendSegment, 0) - // find segment names - segmentNames := make(map[string]bool) - for key := range tree { - if strings.HasPrefix(key, path+".") { - segmentNames[strings.SplitN(strings.TrimPrefix(key, path+"."), ".", 2)[0]] = true - } - } - - // get labels for each segment found - for segment := range segmentNames { - labels := make(map[string]string) - for key, value := range tree { - if strings.HasPrefix(key, path+"."+segment) { - labels[prefix+"frontend"+strings.TrimPrefix(key, path+"."+segment)] = value - } - } - segments = append(segments, &frontendSegment{ - Name: segment, - Labels: labels, - }) - } - - return segments -} diff --git a/old/provider/consulcatalog/consul_catalog_test.go b/old/provider/consulcatalog/consul_catalog_test.go deleted file mode 100644 index 70ede6d67..000000000 --- a/old/provider/consulcatalog/consul_catalog_test.go +++ /dev/null @@ -1,862 +0,0 @@ -package consulcatalog - -import ( - "sort" - "testing" - - "github.com/BurntSushi/ty/fun" - "github.com/hashicorp/consul/api" - "github.com/stretchr/testify/assert" -) - -func TestNodeSorter(t *testing.T) { - testCases := []struct { - desc string - nodes []*api.ServiceEntry - expected []*api.ServiceEntry - }{ - { - desc: "Should sort nothing", - nodes: []*api.ServiceEntry{}, - expected: []*api.ServiceEntry{}, - }, - { - desc: "Should sort by node address", - nodes: []*api.ServiceEntry{ - { - Service: &api.AgentService{ - Service: "foo", - Address: "127.0.0.1", - Port: 80, - }, - Node: &api.Node{ - Node: "localhost", - Address: "127.0.0.1", - }, - }, - }, - expected: []*api.ServiceEntry{ - { - Service: &api.AgentService{ - Service: "foo", - Address: "127.0.0.1", - Port: 80, - }, - Node: &api.Node{ - Node: "localhost", - Address: "127.0.0.1", - }, - }, - }, - }, - { - desc: "Should sort by service name", - nodes: []*api.ServiceEntry{ - { - Service: &api.AgentService{ - Service: "foo", - Address: "127.0.0.2", - Port: 80, - }, - Node: &api.Node{ - Node: "localhost", - Address: "127.0.0.2", - }, - }, - { - Service: &api.AgentService{ - Service: "bar", - Address: "127.0.0.2", - Port: 81, - }, - Node: &api.Node{ - Node: "localhost", - Address: "127.0.0.2", - }, - }, - { - Service: &api.AgentService{ - Service: "foo", - Address: "127.0.0.1", - Port: 80, - }, - Node: &api.Node{ - Node: "localhost", - Address: "127.0.0.1", - }, - }, - { - Service: &api.AgentService{ - Service: "bar", - Address: "127.0.0.2", - Port: 80, - }, - Node: &api.Node{ - Node: "localhost", - Address: "127.0.0.2", - }, - }, - }, - expected: []*api.ServiceEntry{ - { - Service: &api.AgentService{ - Service: "bar", - Address: "127.0.0.2", - Port: 80, - }, - Node: &api.Node{ - Node: "localhost", - Address: "127.0.0.2", - }, - }, - { - Service: &api.AgentService{ - Service: "bar", - Address: "127.0.0.2", - Port: 81, - }, - Node: &api.Node{ - Node: "localhost", - Address: "127.0.0.2", - }, - }, - { - Service: &api.AgentService{ - Service: "foo", - Address: "127.0.0.1", - Port: 80, - }, - Node: &api.Node{ - Node: "localhost", - Address: "127.0.0.1", - }, - }, - { - Service: &api.AgentService{ - Service: "foo", - Address: "127.0.0.2", - Port: 80, - }, - Node: &api.Node{ - Node: "localhost", - Address: "127.0.0.2", - }, - }, - }, - }, - { - desc: "Should sort by node address", - nodes: []*api.ServiceEntry{ - { - Service: &api.AgentService{ - Service: "foo", - Address: "", - Port: 80, - }, - Node: &api.Node{ - Node: "localhost", - Address: "127.0.0.2", - }, - }, - { - Service: &api.AgentService{ - Service: "foo", - Address: "", - Port: 80, - }, - Node: &api.Node{ - Node: "localhost", - Address: "127.0.0.1", - }, - }, - }, - expected: []*api.ServiceEntry{ - { - Service: &api.AgentService{ - Service: "foo", - Address: "", - Port: 80, - }, - Node: &api.Node{ - Node: "localhost", - Address: "127.0.0.1", - }, - }, - { - Service: &api.AgentService{ - Service: "foo", - Address: "", - Port: 80, - }, - Node: &api.Node{ - Node: "localhost", - Address: "127.0.0.2", - }, - }, - }, - }, - } - - for _, test := range testCases { - test := test - t.Run(test.desc, func(t *testing.T) { - t.Parallel() - - sort.Sort(nodeSorter(test.nodes)) - actual := test.nodes - assert.Equal(t, test.expected, actual) - }) - } -} - -func TestGetChangedKeys(t *testing.T) { - type Input struct { - currState map[string]Service - prevState map[string]Service - } - - type Output struct { - addedKeys []string - removedKeys []string - } - - testCases := []struct { - desc string - input Input - output Output - }{ - { - desc: "Should add 0 services and removed 0", - input: Input{ - currState: map[string]Service{ - "foo-service": {Name: "v1"}, - "bar-service": {Name: "v1"}, - "baz-service": {Name: "v1"}, - "qux-service": {Name: "v1"}, - "quux-service": {Name: "v1"}, - "quuz-service": {Name: "v1"}, - "corge-service": {Name: "v1"}, - "grault-service": {Name: "v1"}, - "garply-service": {Name: "v1"}, - "waldo-service": {Name: "v1"}, - "fred-service": {Name: "v1"}, - "plugh-service": {Name: "v1"}, - "xyzzy-service": {Name: "v1"}, - "thud-service": {Name: "v1"}, - }, - prevState: map[string]Service{ - "foo-service": {Name: "v1"}, - "bar-service": {Name: "v1"}, - "baz-service": {Name: "v1"}, - "qux-service": {Name: "v1"}, - "quux-service": {Name: "v1"}, - "quuz-service": {Name: "v1"}, - "corge-service": {Name: "v1"}, - "grault-service": {Name: "v1"}, - "garply-service": {Name: "v1"}, - "waldo-service": {Name: "v1"}, - "fred-service": {Name: "v1"}, - "plugh-service": {Name: "v1"}, - "xyzzy-service": {Name: "v1"}, - "thud-service": {Name: "v1"}, - }, - }, - output: Output{ - addedKeys: []string{}, - removedKeys: []string{}, - }, - }, - { - desc: "Should add 3 services and removed 0", - input: Input{ - currState: map[string]Service{ - "foo-service": {Name: "v1"}, - "bar-service": {Name: "v1"}, - "baz-service": {Name: "v1"}, - "qux-service": {Name: "v1"}, - "quux-service": {Name: "v1"}, - "quuz-service": {Name: "v1"}, - "corge-service": {Name: "v1"}, - "grault-service": {Name: "v1"}, - "garply-service": {Name: "v1"}, - "waldo-service": {Name: "v1"}, - "fred-service": {Name: "v1"}, - "plugh-service": {Name: "v1"}, - "xyzzy-service": {Name: "v1"}, - "thud-service": {Name: "v1"}, - }, - prevState: map[string]Service{ - "foo-service": {Name: "v1"}, - "bar-service": {Name: "v1"}, - "baz-service": {Name: "v1"}, - "corge-service": {Name: "v1"}, - "grault-service": {Name: "v1"}, - "garply-service": {Name: "v1"}, - "waldo-service": {Name: "v1"}, - "fred-service": {Name: "v1"}, - "plugh-service": {Name: "v1"}, - "xyzzy-service": {Name: "v1"}, - "thud-service": {Name: "v1"}, - }, - }, - output: Output{ - addedKeys: []string{"qux-service", "quux-service", "quuz-service"}, - removedKeys: []string{}, - }, - }, - { - desc: "Should add 2 services and removed 2", - input: Input{ - currState: map[string]Service{ - "foo-service": {Name: "v1"}, - "qux-service": {Name: "v1"}, - "quux-service": {Name: "v1"}, - "quuz-service": {Name: "v1"}, - "corge-service": {Name: "v1"}, - "grault-service": {Name: "v1"}, - "garply-service": {Name: "v1"}, - "waldo-service": {Name: "v1"}, - "fred-service": {Name: "v1"}, - "plugh-service": {Name: "v1"}, - "xyzzy-service": {Name: "v1"}, - "thud-service": {Name: "v1"}, - }, - prevState: map[string]Service{ - "foo-service": {Name: "v1"}, - "bar-service": {Name: "v1"}, - "baz-service": {Name: "v1"}, - "qux-service": {Name: "v1"}, - "quux-service": {Name: "v1"}, - "quuz-service": {Name: "v1"}, - "corge-service": {Name: "v1"}, - "waldo-service": {Name: "v1"}, - "fred-service": {Name: "v1"}, - "plugh-service": {Name: "v1"}, - "xyzzy-service": {Name: "v1"}, - "thud-service": {Name: "v1"}, - }, - }, - output: Output{ - addedKeys: []string{"grault-service", "garply-service"}, - removedKeys: []string{"bar-service", "baz-service"}, - }, - }, - } - - for _, test := range testCases { - test := test - t.Run(test.desc, func(t *testing.T) { - t.Parallel() - - addedKeys, removedKeys := getChangedServiceKeys(test.input.currState, test.input.prevState) - assert.Equal(t, fun.Set(test.output.addedKeys), fun.Set(addedKeys), "Added keys comparison results: got %q, want %q", addedKeys, test.output.addedKeys) - assert.Equal(t, fun.Set(test.output.removedKeys), fun.Set(removedKeys), "Removed keys comparison results: got %q, want %q", removedKeys, test.output.removedKeys) - }) - } -} - -func TestFilterEnabled(t *testing.T) { - testCases := []struct { - desc string - exposedByDefault bool - node *api.ServiceEntry - expected bool - }{ - { - desc: "exposed", - exposedByDefault: true, - node: &api.ServiceEntry{ - Service: &api.AgentService{ - Service: "api", - Address: "10.0.0.1", - Port: 80, - Tags: []string{""}, - }, - }, - expected: true, - }, - { - desc: "exposed and tolerated by valid label value", - exposedByDefault: true, - node: &api.ServiceEntry{ - Service: &api.AgentService{ - Service: "api", - Address: "10.0.0.1", - Port: 80, - Tags: []string{"", "traefik.enable=true"}, - }, - }, - expected: true, - }, - { - desc: "exposed and tolerated by invalid label value", - exposedByDefault: true, - node: &api.ServiceEntry{ - Service: &api.AgentService{ - Service: "api", - Address: "10.0.0.1", - Port: 80, - Tags: []string{"", "traefik.enable=bad"}, - }, - }, - expected: true, - }, - { - desc: "exposed but overridden by label", - exposedByDefault: true, - node: &api.ServiceEntry{ - Service: &api.AgentService{ - Service: "api", - Address: "10.0.0.1", - Port: 80, - Tags: []string{"", "traefik.enable=false"}, - }, - }, - expected: false, - }, - { - desc: "non-exposed", - exposedByDefault: false, - node: &api.ServiceEntry{ - Service: &api.AgentService{ - Service: "api", - Address: "10.0.0.1", - Port: 80, - Tags: []string{""}, - }, - }, - expected: false, - }, - { - desc: "non-exposed but overridden by label", - exposedByDefault: false, - node: &api.ServiceEntry{ - Service: &api.AgentService{ - Service: "api", - Address: "10.0.0.1", - Port: 80, - Tags: []string{"", "traefik.enable=true"}, - }, - }, - expected: true, - }, - } - - for _, test := range testCases { - test := test - t.Run(test.desc, func(t *testing.T) { - t.Parallel() - provider := &Provider{ - Domain: "localhost", - Prefix: "traefik", - ExposedByDefault: test.exposedByDefault, - } - actual := provider.nodeFilter("test", test.node) - assert.Equal(t, test.expected, actual) - }) - } -} - -func TestGetChangedStringKeys(t *testing.T) { - testCases := []struct { - desc string - current []string - previous []string - expectedAdded []string - expectedRemoved []string - }{ - { - desc: "1 element added, 0 removed", - current: []string{"chou"}, - previous: []string{}, - expectedAdded: []string{"chou"}, - expectedRemoved: []string{}, - }, { - desc: "0 element added, 0 removed", - current: []string{"chou"}, - previous: []string{"chou"}, - expectedAdded: []string{}, - expectedRemoved: []string{}, - }, - { - desc: "0 element added, 1 removed", - current: []string{}, - previous: []string{"chou"}, - expectedAdded: []string{}, - expectedRemoved: []string{"chou"}, - }, - { - desc: "1 element added, 1 removed", - current: []string{"carotte"}, - previous: []string{"chou"}, - expectedAdded: []string{"carotte"}, - expectedRemoved: []string{"chou"}, - }, - } - - for _, test := range testCases { - test := test - t.Run(test.desc, func(t *testing.T) { - t.Parallel() - - actualAdded, actualRemoved := getChangedStringKeys(test.current, test.previous) - assert.Equal(t, test.expectedAdded, actualAdded) - assert.Equal(t, test.expectedRemoved, actualRemoved) - }) - } -} - -func TestHasServiceChanged(t *testing.T) { - testCases := []struct { - desc string - current map[string]Service - previous map[string]Service - expected bool - }{ - { - desc: "Change detected due to change of nodes", - current: map[string]Service{ - "foo-service": { - Name: "foo", - Nodes: []string{"node1"}, - Tags: []string{}, - }, - }, - previous: map[string]Service{ - "foo-service": { - Name: "foo", - Nodes: []string{"node2"}, - Tags: []string{}, - }, - }, - expected: true, - }, - { - desc: "No change missing current service", - current: make(map[string]Service), - previous: map[string]Service{ - "foo-service": { - Name: "foo", - Nodes: []string{"node1"}, - Tags: []string{}, - }, - }, - expected: false, - }, - { - desc: "No change on nodes", - current: map[string]Service{ - "foo-service": { - Name: "foo", - Nodes: []string{"node1"}, - Tags: []string{}, - }, - }, - previous: map[string]Service{ - "foo-service": { - Name: "foo", - Nodes: []string{"node1"}, - Tags: []string{}, - }, - }, - expected: false, - }, - { - desc: "No change on nodes and tags", - current: map[string]Service{ - "foo-service": { - Name: "foo", - Nodes: []string{"node1"}, - Tags: []string{"foo=bar"}, - }, - }, - previous: map[string]Service{ - "foo-service": { - Name: "foo", - Nodes: []string{"node1"}, - Tags: []string{"foo=bar"}, - }, - }, - expected: false, - }, - { - desc: "Change detected on tags", - current: map[string]Service{ - "foo-service": { - Name: "foo", - Nodes: []string{"node1"}, - Tags: []string{"foo=bar"}, - }, - }, - previous: map[string]Service{ - "foo-service": { - Name: "foo", - Nodes: []string{"node1"}, - Tags: []string{"foo"}, - }, - }, - expected: true, - }, - { - desc: "Change detected on ports", - current: map[string]Service{ - "foo-service": { - Name: "foo", - Nodes: []string{"node1"}, - Tags: []string{"foo=bar"}, - Ports: []int{80}, - }, - }, - previous: map[string]Service{ - "foo-service": { - Name: "foo", - Nodes: []string{"node1"}, - Tags: []string{"foo"}, - Ports: []int{81}, - }, - }, - expected: true, - }, - { - desc: "Change detected on ports", - current: map[string]Service{ - "foo-service": { - Name: "foo", - Nodes: []string{"node1"}, - Tags: []string{"foo=bar"}, - Ports: []int{80}, - }, - }, - previous: map[string]Service{ - "foo-service": { - Name: "foo", - Nodes: []string{"node1"}, - Tags: []string{"foo"}, - Ports: []int{81, 82}, - }, - }, - expected: true, - }, - { - desc: "Change detected on addresses", - current: map[string]Service{ - "foo-service": { - Name: "foo", - Nodes: []string{"node1"}, - Addresses: []string{"127.0.0.1"}, - }, - }, - previous: map[string]Service{ - "foo-service": { - Name: "foo", - Nodes: []string{"node1"}, - Addresses: []string{"127.0.0.2"}, - }, - }, - expected: true, - }, - { - desc: "No Change detected", - current: map[string]Service{ - "foo-service": { - Name: "foo", - Nodes: []string{"node1"}, - Tags: []string{"foo"}, - Ports: []int{80}, - }, - }, - previous: map[string]Service{ - "foo-service": { - Name: "foo", - Nodes: []string{"node1"}, - Tags: []string{"foo"}, - Ports: []int{80}, - }, - }, - expected: false, - }, - } - - for _, test := range testCases { - test := test - t.Run(test.desc, func(t *testing.T) { - t.Parallel() - - actual := hasServiceChanged(test.current, test.previous) - assert.Equal(t, test.expected, actual) - }) - } -} - -func TestHasChanged(t *testing.T) { - testCases := []struct { - desc string - current map[string]Service - previous map[string]Service - expected bool - }{ - { - desc: "Change detected due to change new service", - current: map[string]Service{ - "foo-service": { - Name: "foo", - Nodes: []string{"node1"}, - Tags: []string{}, - }, - }, - previous: make(map[string]Service), - expected: true, - }, - { - desc: "Change detected due to change service removed", - current: make(map[string]Service), - previous: map[string]Service{ - "foo-service": { - Name: "foo", - Nodes: []string{"node1"}, - Tags: []string{}, - }, - }, - expected: true, - }, - { - desc: "Change detected due to change of nodes", - current: map[string]Service{ - "foo-service": { - Name: "foo", - Nodes: []string{"node1"}, - Tags: []string{}, - }, - }, - previous: map[string]Service{ - "foo-service": { - Name: "foo", - Nodes: []string{"node2"}, - Tags: []string{}, - }, - }, - expected: true, - }, - { - desc: "No change on nodes", - current: map[string]Service{ - "foo-service": { - Name: "foo", - Nodes: []string{"node1"}, - Tags: []string{}, - }, - }, - previous: map[string]Service{ - "foo-service": { - Name: "foo", - Nodes: []string{"node1"}, - Tags: []string{}, - }, - }, - expected: false, - }, - { - desc: "No change on nodes and tags", - current: map[string]Service{ - "foo-service": { - Name: "foo", - Nodes: []string{"node1"}, - Tags: []string{"foo=bar"}, - }, - }, - previous: map[string]Service{ - "foo-service": { - Name: "foo", - Nodes: []string{"node1"}, - Tags: []string{"foo=bar"}, - }, - }, - expected: false, - }, - { - desc: "Change detected on tags", - current: map[string]Service{ - "foo-service": { - Name: "foo", - Nodes: []string{"node1"}, - Tags: []string{"foo=bar"}, - }, - }, - previous: map[string]Service{ - "foo-service": { - Name: "foo", - Nodes: []string{"node1"}, - Tags: []string{"foo"}, - }, - }, - expected: true, - }, - } - - for _, test := range testCases { - test := test - t.Run(test.desc, func(t *testing.T) { - t.Parallel() - - actual := hasChanged(test.current, test.previous) - assert.Equal(t, test.expected, actual) - }) - } -} - -func TestGetConstraintTags(t *testing.T) { - provider := &Provider{ - Domain: "localhost", - Prefix: "traefik", - } - - testCases := []struct { - desc string - tags []string - expected []string - }{ - { - desc: "nil tags", - }, - { - desc: "invalid tag", - tags: []string{"tags=foobar"}, - expected: nil, - }, - { - desc: "wrong tag", - tags: []string{"traefik_tags=foobar"}, - expected: nil, - }, - { - desc: "empty value", - tags: []string{"traefik.tags="}, - expected: nil, - }, - { - desc: "simple tag", - tags: []string{"traefik.tags=foobar "}, - expected: []string{"foobar"}, - }, - { - desc: "multiple values tag", - tags: []string{"traefik.tags=foobar, fiibir"}, - expected: []string{"foobar", "fiibir"}, - }, - { - desc: "multiple tags", - tags: []string{"traefik.tags=foobar", "traefik.tags=foobor"}, - expected: []string{"foobar", "foobor"}, - }, - } - - for _, test := range testCases { - test := test - t.Run(test.desc, func(t *testing.T) { - t.Parallel() - - constraints := provider.getConstraintTags(test.tags) - assert.EqualValues(t, test.expected, constraints) - }) - } -} diff --git a/old/provider/consulcatalog/convert_types.go b/old/provider/consulcatalog/convert_types.go deleted file mode 100644 index 5bce57cb8..000000000 --- a/old/provider/consulcatalog/convert_types.go +++ /dev/null @@ -1,29 +0,0 @@ -package consulcatalog - -import ( - "strings" - - "github.com/containous/traefik/old/provider/label" -) - -func tagsToNeutralLabels(tags []string, prefix string) map[string]string { - var labels map[string]string - - for _, tag := range tags { - if strings.HasPrefix(tag, prefix) { - - parts := strings.SplitN(tag, "=", 2) - if len(parts) == 2 { - if labels == nil { - labels = make(map[string]string) - } - - // replace custom prefix by the generic prefix - key := label.Prefix + strings.TrimPrefix(parts[0], prefix+".") - labels[key] = parts[1] - } - } - } - - return labels -} diff --git a/old/provider/consulcatalog/convert_types_test.go b/old/provider/consulcatalog/convert_types_test.go deleted file mode 100644 index b0c24b02d..000000000 --- a/old/provider/consulcatalog/convert_types_test.go +++ /dev/null @@ -1,64 +0,0 @@ -package consulcatalog - -import ( - "testing" - - "github.com/stretchr/testify/assert" -) - -func TestTagsToNeutralLabels(t *testing.T) { - testCases := []struct { - desc string - tags []string - prefix string - expected map[string]string - }{ - { - desc: "without tags", - expected: nil, - }, - { - desc: "with a prefix", - prefix: "test", - tags: []string{ - "test.aaa=01", - "test.bbb=02", - "ccc=03", - "test.ddd=04=to", - }, - expected: map[string]string{ - "traefik.aaa": "01", - "traefik.bbb": "02", - "traefik.ddd": "04=to", - }, - }, - - { - desc: "with an empty prefix", - prefix: "", - tags: []string{ - "test.aaa=01", - "test.bbb=02", - "ccc=03", - "test.ddd=04=to", - }, - expected: map[string]string{ - "traefik.test.aaa": "01", - "traefik.test.bbb": "02", - "traefik.ccc": "03", - "traefik.test.ddd": "04=to", - }, - }, - } - - for _, test := range testCases { - test := test - t.Run(test.desc, func(t *testing.T) { - t.Parallel() - - labels := tagsToNeutralLabels(test.tags, test.prefix) - - assert.Equal(t, test.expected, labels) - }) - } -} diff --git a/old/provider/dynamodb/dynamodb.go b/old/provider/dynamodb/dynamodb.go deleted file mode 100644 index 2047ae701..000000000 --- a/old/provider/dynamodb/dynamodb.go +++ /dev/null @@ -1,217 +0,0 @@ -package dynamodb - -import ( - "context" - "errors" - "time" - - "github.com/aws/aws-sdk-go/aws" - "github.com/aws/aws-sdk-go/aws/credentials" - "github.com/aws/aws-sdk-go/aws/defaults" - "github.com/aws/aws-sdk-go/aws/session" - "github.com/aws/aws-sdk-go/service/dynamodb" - "github.com/aws/aws-sdk-go/service/dynamodb/dynamodbattribute" - "github.com/aws/aws-sdk-go/service/dynamodb/dynamodbiface" - "github.com/cenkalti/backoff" - "github.com/containous/traefik/old/log" - "github.com/containous/traefik/old/provider" - "github.com/containous/traefik/old/types" - "github.com/containous/traefik/pkg/job" - "github.com/containous/traefik/pkg/safe" -) - -var _ provider.Provider = (*Provider)(nil) - -// Provider holds configuration for provider. -type Provider struct { - provider.BaseProvider `mapstructure:",squash" export:"true"` - AccessKeyID string `description:"The AWS credentials access key to use for making requests"` - RefreshSeconds int `description:"Polling interval (in seconds)" export:"true"` - Region string `description:"The AWS region to use for requests" export:"true"` - SecretAccessKey string `description:"The AWS credentials secret key to use for making requests"` - TableName string `description:"The AWS dynamodb table that stores configuration for traefik" export:"true"` - Endpoint string `description:"The endpoint of a dynamodb. Used for testing with a local dynamodb"` -} - -type dynamoClient struct { - db dynamodbiface.DynamoDBAPI -} - -// Init the provider -func (p *Provider) Init(constraints types.Constraints) error { - return p.BaseProvider.Init(constraints) -} - -// createClient configures aws credentials and creates a dynamoClient -func (p *Provider) createClient() (*dynamoClient, error) { - log.Info("Creating Provider client...") - sess, err := session.NewSession() - if err != nil { - return nil, err - } - if p.Region == "" { - return nil, errors.New("no Region provided for Provider") - } - cfg := &aws.Config{ - Region: &p.Region, - Credentials: credentials.NewChainCredentials( - []credentials.Provider{ - &credentials.StaticProvider{ - Value: credentials.Value{ - AccessKeyID: p.AccessKeyID, - SecretAccessKey: p.SecretAccessKey, - }, - }, - &credentials.EnvProvider{}, - &credentials.SharedCredentialsProvider{}, - defaults.RemoteCredProvider(*(defaults.Config()), defaults.Handlers()), - }), - } - - if p.Trace { - cfg.WithLogger(aws.LoggerFunc(func(args ...interface{}) { - log.Debug(args...) - })) - } - - if p.Endpoint != "" { - cfg.Endpoint = aws.String(p.Endpoint) - } - - return &dynamoClient{ - db: dynamodb.New(sess, cfg), - }, nil -} - -// scanTable scans the given table and returns slice of all items in the table -func (p *Provider) scanTable(client *dynamoClient) ([]map[string]*dynamodb.AttributeValue, error) { - log.Debugf("Scanning Provider table: %s ...", p.TableName) - params := &dynamodb.ScanInput{ - TableName: aws.String(p.TableName), - } - items := make([]map[string]*dynamodb.AttributeValue, 0) - err := client.db.ScanPages(params, - func(page *dynamodb.ScanOutput, lastPage bool) bool { - items = append(items, page.Items...) - return !lastPage - }) - if err != nil { - log.Errorf("Failed to scan Provider table %s", p.TableName) - return nil, err - } - log.Debugf("Successfully scanned Provider table %s", p.TableName) - return items, nil -} - -// buildConfiguration retrieves items from dynamodb and converts them into Backends and Frontends in a Configuration -func (p *Provider) buildConfiguration(client *dynamoClient) (*types.Configuration, error) { - items, err := p.scanTable(client) - if err != nil { - return nil, err - } - log.Debugf("Number of Items retrieved from Provider: %d", len(items)) - backends := make(map[string]*types.Backend) - frontends := make(map[string]*types.Frontend) - // unmarshal dynamoAttributes into Backends and Frontends - for i, item := range items { - log.Debugf("Provider Item: %d\n%v", i, item) - // verify the type of each item by checking to see if it has - // the corresponding type, backend or frontend map - if backend, exists := item["backend"]; exists { - log.Debug("Unmarshaling backend from Provider...") - tmpBackend := &types.Backend{} - err = dynamodbattribute.Unmarshal(backend, tmpBackend) - if err != nil { - log.Errorf(err.Error()) - } else { - backends[*item["name"].S] = tmpBackend - log.Debug("Backend from Provider unmarshalled successfully") - } - } else if frontend, exists := item["frontend"]; exists { - log.Debugf("Unmarshaling frontend from Provider...") - tmpFrontend := &types.Frontend{} - err = dynamodbattribute.Unmarshal(frontend, tmpFrontend) - if err != nil { - log.Errorf(err.Error()) - } else { - frontends[*item["name"].S] = tmpFrontend - log.Debug("Frontend from Provider unmarshalled successfully") - } - } else { - log.Warnf("Error in format of Provider Item: %v", item) - } - } - - return &types.Configuration{ - Backends: backends, - Frontends: frontends, - }, nil -} - -// Provide provides the configuration to traefik via the configuration channel -// if watch is enabled it polls dynamodb -func (p *Provider) Provide(configurationChan chan<- types.ConfigMessage, pool *safe.Pool) error { - handleCanceled := func(ctx context.Context, err error) error { - if ctx.Err() == context.Canceled || err == context.Canceled { - return nil - } - return err - } - - pool.Go(func(stop chan bool) { - ctx, cancel := context.WithCancel(context.Background()) - safe.Go(func() { - <-stop - cancel() - }) - - operation := func() error { - awsClient, err := p.createClient() - if err != nil { - return handleCanceled(ctx, err) - } - - configuration, err := p.buildConfiguration(awsClient) - if err != nil { - return handleCanceled(ctx, err) - } - - configurationChan <- types.ConfigMessage{ - ProviderName: "dynamodb", - Configuration: configuration, - } - - if p.Watch { - reload := time.NewTicker(time.Second * time.Duration(p.RefreshSeconds)) - defer reload.Stop() - for { - log.Debug("Watching Provider...") - select { - case <-reload.C: - configuration, err := p.buildConfiguration(awsClient) - if err != nil { - return handleCanceled(ctx, err) - } - - configurationChan <- types.ConfigMessage{ - ProviderName: "dynamodb", - Configuration: configuration, - } - case <-ctx.Done(): - return handleCanceled(ctx, ctx.Err()) - } - } - } - return nil - } - notify := func(err error, time time.Duration) { - log.Errorf("Provider error: %s time: %v", err.Error(), time) - } - - err := backoff.RetryNotify(safe.OperationWithRecover(operation), job.NewBackOff(backoff.NewExponentialBackOff()), notify) - if err != nil { - log.Errorf("Failed to connect to Provider. %s", err.Error()) - } - }) - return nil -} diff --git a/old/provider/dynamodb/dynamodb_test.go b/old/provider/dynamodb/dynamodb_test.go deleted file mode 100644 index 2c025e668..000000000 --- a/old/provider/dynamodb/dynamodb_test.go +++ /dev/null @@ -1,140 +0,0 @@ -package dynamodb - -import ( - "errors" - "reflect" - "testing" - - "github.com/aws/aws-sdk-go/aws" - "github.com/aws/aws-sdk-go/service/dynamodb" - "github.com/aws/aws-sdk-go/service/dynamodb/dynamodbattribute" - "github.com/aws/aws-sdk-go/service/dynamodb/dynamodbiface" - "github.com/containous/traefik/old/types" -) - -type mockDynamoDBClient struct { - dynamodbiface.DynamoDBAPI - testWithError bool -} - -var backend = &types.Backend{ - HealthCheck: &types.HealthCheck{ - Path: "/build", - }, - Servers: map[string]types.Server{ - "server1": { - URL: "http://test.traefik.io", - }, - }, -} - -var frontend = &types.Frontend{ - EntryPoints: []string{"http"}, - Backend: "test.traefik.io", - Routes: map[string]types.Route{ - "route1": { - Rule: "Host:test.traefik.io", - }, - }, -} - -// ScanPages simulates a call to ScanPages (see https://docs.aws.amazon.com/sdk-for-go/api/service/dynamodb/#DynamoDB.ScanPages) -// by running the fn function twice and returning an item each time. -func (m *mockDynamoDBClient) ScanPages(input *dynamodb.ScanInput, fn func(*dynamodb.ScanOutput, bool) bool) error { - if m.testWithError { - return errors.New("fake error") - } - attributeBackend, err := dynamodbattribute.Marshal(backend) - if err != nil { - return err - } - - attributeFrontend, err := dynamodbattribute.Marshal(frontend) - if err != nil { - return err - } - - fn(&dynamodb.ScanOutput{ - Items: []map[string]*dynamodb.AttributeValue{ - { - "id": &dynamodb.AttributeValue{ - S: aws.String("test.traefik.io_backend"), - }, - "name": &dynamodb.AttributeValue{ - S: aws.String("test.traefik.io"), - }, - "backend": attributeBackend, - }, - }, - }, false) - - fn(&dynamodb.ScanOutput{ - Items: []map[string]*dynamodb.AttributeValue{ - { - "id": &dynamodb.AttributeValue{ - S: aws.String("test.traefik.io_frontend"), - }, - "name": &dynamodb.AttributeValue{ - S: aws.String("test.traefik.io"), - }, - "frontend": attributeFrontend, - }, - }, - }, true) - return nil -} - -func TestBuildConfigurationSuccessful(t *testing.T) { - dbiface := &dynamoClient{ - db: &mockDynamoDBClient{ - testWithError: false, - }, - } - provider := Provider{} - loadedConfig, err := provider.buildConfiguration(dbiface) - if err != nil { - t.Fatal(err) - } - expectedConfig := &types.Configuration{ - Backends: map[string]*types.Backend{ - "test.traefik.io": backend, - }, - Frontends: map[string]*types.Frontend{ - "test.traefik.io": frontend, - }, - } - if !reflect.DeepEqual(loadedConfig, expectedConfig) { - t.Fatalf("Configurations did not match: %v %v", loadedConfig, expectedConfig) - } -} - -func TestBuildConfigurationFailure(t *testing.T) { - dbiface := &dynamoClient{ - db: &mockDynamoDBClient{ - testWithError: true, - }, - } - provider := Provider{} - _, err := provider.buildConfiguration(dbiface) - if err == nil { - t.Fatal("Expected error") - } -} - -func TestCreateClientSuccessful(t *testing.T) { - provider := Provider{ - Region: "us-east-1", - } - _, err := provider.createClient() - if err != nil { - t.Fatal(err) - } -} - -func TestCreateClientFailure(t *testing.T) { - provider := Provider{} - _, err := provider.createClient() - if err == nil { - t.Fatal("Expected error") - } -} diff --git a/old/provider/ecs/builder_test.go b/old/provider/ecs/builder_test.go deleted file mode 100644 index 8f1e313d1..000000000 --- a/old/provider/ecs/builder_test.go +++ /dev/null @@ -1,80 +0,0 @@ -package ecs - -import ( - "github.com/aws/aws-sdk-go/service/ecs" -) - -func instance(ops ...func(*ecsInstance)) ecsInstance { - e := &ecsInstance{ - containerDefinition: &ecs.ContainerDefinition{}, - } - - for _, op := range ops { - op(e) - } - - return *e -} - -func name(name string) func(*ecsInstance) { - return func(e *ecsInstance) { - e.Name = name - } -} - -func ID(ID string) func(*ecsInstance) { - return func(e *ecsInstance) { - e.ID = ID - } -} - -func iMachine(opts ...func(*machine)) func(*ecsInstance) { - return func(e *ecsInstance) { - e.machine = &machine{} - - for _, opt := range opts { - opt(e.machine) - } - } -} - -func mState(state string) func(*machine) { - return func(m *machine) { - m.state = state - } -} - -func mPrivateIP(ip string) func(*machine) { - return func(m *machine) { - m.privateIP = ip - } -} - -func mPorts(opts ...func(*portMapping)) func(*machine) { - return func(m *machine) { - for _, opt := range opts { - p := &portMapping{} - opt(p) - m.ports = append(m.ports, *p) - } - } -} - -func mPort(containerPort int32, hostPort int32) func(*portMapping) { - return func(pm *portMapping) { - pm.containerPort = int64(containerPort) - pm.hostPort = int64(hostPort) - } -} - -func labels(labels map[string]string) func(*ecsInstance) { - return func(c *ecsInstance) { - c.TraefikLabels = labels - } -} - -func dockerLabels(labels map[string]*string) func(*ecsInstance) { - return func(c *ecsInstance) { - c.containerDefinition.DockerLabels = labels - } -} diff --git a/old/provider/ecs/cluster.go b/old/provider/ecs/cluster.go deleted file mode 100644 index c9e948d29..000000000 --- a/old/provider/ecs/cluster.go +++ /dev/null @@ -1,32 +0,0 @@ -package ecs - -import ( - "fmt" - "strings" -) - -// Clusters holds ecs clusters name -type Clusters []string - -// Set adds strings elem into the the parser -// it splits str on , and ; -func (c *Clusters) Set(str string) error { - fargs := func(c rune) bool { - return c == ',' || c == ';' - } - // get function - slice := strings.FieldsFunc(str, fargs) - *c = append(*c, slice...) - return nil -} - -// Get Clusters -func (c *Clusters) Get() interface{} { return *c } - -// String return slice in a string -func (c *Clusters) String() string { return fmt.Sprintf("%v", *c) } - -// SetValue sets Clusters into the parser -func (c *Clusters) SetValue(val interface{}) { - *c = val.(Clusters) -} diff --git a/old/provider/ecs/cluster_test.go b/old/provider/ecs/cluster_test.go deleted file mode 100644 index 7c438b36b..000000000 --- a/old/provider/ecs/cluster_test.go +++ /dev/null @@ -1,145 +0,0 @@ -package ecs - -import ( - "testing" - - "github.com/stretchr/testify/assert" -) - -func TestClustersSet(t *testing.T) { - tests := []struct { - desc string - value string - expected Clusters - }{ - { - desc: "One value should return Clusters of size 1", - value: "cluster", - expected: Clusters{"cluster"}, - }, - { - desc: "Two values separated by comma should return Clusters of size 2", - value: "cluster1,cluster2", - expected: Clusters{"cluster1", "cluster2"}, - }, - { - desc: "Two values separated by semicolon should return Clusters of size 2", - value: "cluster1;cluster2", - expected: Clusters{"cluster1", "cluster2"}, - }, - { - desc: "Three values separated by comma and semicolon should return Clusters of size 3", - value: "cluster1,cluster2;cluster3", - expected: Clusters{"cluster1", "cluster2", "cluster3"}, - }, - } - - for _, test := range tests { - test := test - t.Run(test.desc, func(t *testing.T) { - t.Parallel() - var clusters Clusters - err := clusters.Set(test.value) - assert.Nil(t, err) - assert.Equal(t, test.expected, clusters) - }) - } -} - -func TestClustersGet(t *testing.T) { - tests := []struct { - desc string - clusters Clusters - expected Clusters - }{ - { - desc: "Should return 1 cluster", - clusters: Clusters{"cluster"}, - expected: Clusters{"cluster"}, - }, - { - desc: "Should return 2 clusters", - clusters: Clusters{"cluster1", "cluster2"}, - expected: Clusters{"cluster1", "cluster2"}, - }, - { - desc: "Should return 3 clusters", - clusters: Clusters{"cluster1", "cluster2", "cluster3"}, - expected: Clusters{"cluster1", "cluster2", "cluster3"}, - }, - } - - for _, test := range tests { - test := test - t.Run(test.desc, func(t *testing.T) { - t.Parallel() - actual := test.clusters.Get() - assert.Equal(t, test.expected, actual) - }) - } -} - -func TestClustersString(t *testing.T) { - tests := []struct { - desc string - clusters Clusters - expected string - }{ - { - desc: "Should return 1 cluster", - clusters: Clusters{"cluster"}, - expected: "[cluster]", - }, - { - desc: "Should return 2 clusters", - clusters: Clusters{"cluster1", "cluster2"}, - expected: "[cluster1 cluster2]", - }, - { - desc: "Should return 3 clusters", - clusters: Clusters{"cluster1", "cluster2", "cluster3"}, - expected: "[cluster1 cluster2 cluster3]", - }, - } - for _, test := range tests { - test := test - t.Run(test.desc, func(t *testing.T) { - t.Parallel() - actual := test.clusters.String() - assert.Equal(t, test.expected, actual) - }) - } -} - -func TestClustersSetValue(t *testing.T) { - tests := []struct { - desc string - clusters Clusters - expected Clusters - }{ - { - desc: "Should return Clusters of size 1", - clusters: Clusters{"cluster"}, - expected: Clusters{"cluster"}, - }, - { - desc: "Should return Clusters of size 2", - clusters: Clusters{"cluster1", "cluster2"}, - expected: Clusters{"cluster1", "cluster2"}, - }, - { - desc: "Should return Clusters of size 3", - clusters: Clusters{"cluster1", "cluster2", "cluster3"}, - expected: Clusters{"cluster1", "cluster2", "cluster3"}, - }, - } - for _, test := range tests { - test := test - t.Run(test.desc, func(t *testing.T) { - t.Parallel() - var slice Clusters - slice.SetValue(test.clusters) - assert.Equal(t, test.expected, slice) - }) - } -} diff --git a/old/provider/ecs/config.go b/old/provider/ecs/config.go deleted file mode 100644 index 535ae16d2..000000000 --- a/old/provider/ecs/config.go +++ /dev/null @@ -1,252 +0,0 @@ -package ecs - -import ( - "crypto/md5" - "encoding/hex" - "fmt" - "net" - "strconv" - "strings" - "text/template" - - "github.com/BurntSushi/ty/fun" - "github.com/aws/aws-sdk-go/service/ec2" - "github.com/containous/traefik/old/log" - "github.com/containous/traefik/old/provider" - "github.com/containous/traefik/old/provider/label" - "github.com/containous/traefik/old/types" -) - -// buildConfiguration fills the config template with the given instances -func (p *Provider) buildConfiguration(instances []ecsInstance) (*types.Configuration, error) { - var ecsFuncMap = template.FuncMap{ - // Backend functions - "getHost": getHost, - "getPort": getPort, - "getCircuitBreaker": label.GetCircuitBreaker, - "getLoadBalancer": label.GetLoadBalancer, - "getMaxConn": label.GetMaxConn, - "getHealthCheck": label.GetHealthCheck, - "getBuffering": label.GetBuffering, - "getResponseForwarding": label.GetResponseForwarding, - - "getServers": getServers, - - // Frontend functions - "filterFrontends": filterFrontends, - "getFrontendRule": p.getFrontendRule, - "getFrontendName": p.getFrontendName, - "getPassHostHeader": label.GetFuncBool(label.TraefikFrontendPassHostHeader, label.DefaultPassHostHeader), - "getPassTLSCert": label.GetFuncBool(label.TraefikFrontendPassTLSCert, label.DefaultPassTLSCert), - "getPassTLSClientCert": label.GetTLSClientCert, - "getPriority": label.GetFuncInt(label.TraefikFrontendPriority, label.DefaultFrontendPriority), - "getBasicAuth": label.GetFuncSliceString(label.TraefikFrontendAuthBasic), // Deprecated - "getAuth": label.GetAuth, - "getEntryPoints": label.GetFuncSliceString(label.TraefikFrontendEntryPoints), - "getRedirect": label.GetRedirect, - "getErrorPages": label.GetErrorPages, - "getRateLimit": label.GetRateLimit, - "getHeaders": label.GetHeaders, - "getWhiteList": label.GetWhiteList, - } - - services := make(map[string][]ecsInstance) - for _, instance := range instances { - segmentProperties := label.ExtractTraefikLabels(instance.TraefikLabels) - - for segmentName, labels := range segmentProperties { - instance.SegmentLabels = labels - instance.SegmentName = segmentName - - backendName := getBackendName(instance) - if p.filterInstance(instance) { - if serviceInstances, ok := services[backendName]; ok { - services[backendName] = append(serviceInstances, instance) - } else { - services[backendName] = []ecsInstance{instance} - } - } - } - } - - return p.GetConfiguration("templates/ecs.tmpl", ecsFuncMap, struct { - Services map[string][]ecsInstance - }{ - Services: services, - }) -} - -func (p *Provider) filterInstance(i ecsInstance) bool { - if i.machine == nil { - log.Debug("Filtering ecs instance with nil machine") - return false - } - - if labelPort := label.GetStringValue(i.TraefikLabels, label.TraefikPort, ""); len(i.machine.ports) == 0 && labelPort == "" { - log.Debugf("Filtering ecs instance without port %s (%s)", i.Name, i.ID) - return false - } - - if strings.ToLower(i.machine.state) != ec2.InstanceStateNameRunning { - log.Debugf("Filtering ecs instance with an incorrect state %s (%s) (state = %s)", i.Name, i.ID, i.machine.state) - return false - } - - if len(i.machine.privateIP) == 0 { - log.Debugf("Filtering ecs instance without an ip address %s (%s)", i.Name, i.ID) - return false - } - - if !isEnabled(i, p.ExposedByDefault) { - log.Debugf("Filtering disabled ecs instance %s (%s)", i.Name, i.ID) - return false - } - - constraintTags := label.GetSliceStringValue(i.TraefikLabels, label.TraefikTags) - if ok, failingConstraint := p.MatchConstraints(constraintTags); !ok { - if failingConstraint != nil { - log.Debugf("Filtering ecs instance pruned by constraint %s (%s) (constraint = %q)", i.Name, i.ID, failingConstraint.String()) - } - return false - } - - return true -} - -func getBackendName(i ecsInstance) string { - if len(i.SegmentName) > 0 { - return getSegmentBackendName(i) - } - - return getDefaultBackendName(i) -} - -func getSegmentBackendName(i ecsInstance) string { - if value := label.GetStringValue(i.SegmentLabels, label.TraefikBackend, ""); len(value) > 0 { - return provider.Normalize(i.Name + "-" + value) - } - - return provider.Normalize(i.Name + "-" + i.SegmentName) -} - -func getDefaultBackendName(i ecsInstance) string { - if value := label.GetStringValue(i.SegmentLabels, label.TraefikBackend, ""); len(value) != 0 { - return provider.Normalize(value) - } - - return provider.Normalize(i.Name) -} - -func (p *Provider) getFrontendRule(i ecsInstance) string { - if value := label.GetStringValue(i.SegmentLabels, label.TraefikFrontendRule, ""); len(value) != 0 { - return value - } - - domain := label.GetStringValue(i.SegmentLabels, label.TraefikDomain, p.Domain) - if len(domain) > 0 { - domain = "." + domain - } - - defaultRule := "Host:" + strings.ToLower(strings.Replace(i.Name, "_", "-", -1)) + domain - - return label.GetStringValue(i.TraefikLabels, label.TraefikFrontendRule, defaultRule) -} - -func (p *Provider) getFrontendName(instance ecsInstance) string { - name := getBackendName(instance) - if len(instance.SegmentName) > 0 { - name = instance.SegmentName + "-" + name - } - - return provider.Normalize(name) -} - -func getHost(i ecsInstance) string { - return i.machine.privateIP -} - -func getPort(i ecsInstance) string { - value := label.GetStringValue(i.SegmentLabels, label.TraefikPort, "") - - if len(value) == 0 { - value = label.GetStringValue(i.TraefikLabels, label.TraefikPort, "") - } - - if len(value) > 0 { - port, err := strconv.ParseInt(value, 10, 64) - if err == nil { - for _, mapping := range i.machine.ports { - if port == mapping.hostPort || port == mapping.containerPort { - return strconv.FormatInt(mapping.hostPort, 10) - } - } - return value - } - } - return strconv.FormatInt(i.machine.ports[0].hostPort, 10) -} - -func filterFrontends(instances []ecsInstance) []ecsInstance { - byName := make(map[string]struct{}) - - return fun.Filter(func(i ecsInstance) bool { - backendName := getBackendName(i) - if len(i.SegmentName) > 0 { - backendName = backendName + "-" + i.SegmentName - } - - _, found := byName[backendName] - if !found { - byName[backendName] = struct{}{} - } - return !found - }, instances).([]ecsInstance) -} - -func getServers(instances []ecsInstance) map[string]types.Server { - var servers map[string]types.Server - - for _, instance := range instances { - if servers == nil { - servers = make(map[string]types.Server) - } - - protocol := label.GetStringValue(instance.SegmentLabels, label.TraefikProtocol, label.DefaultProtocol) - host := getHost(instance) - port := getPort(instance) - - serverURL := fmt.Sprintf("%s://%s", protocol, net.JoinHostPort(host, port)) - serverName := getServerName(instance, serverURL) - - if _, exist := servers[serverName]; exist { - log.Debugf("Skipping server %q with the same URL.", serverName) - continue - } - - servers[serverName] = types.Server{ - URL: serverURL, - Weight: label.GetIntValue(instance.SegmentLabels, label.TraefikWeight, label.DefaultWeight), - } - } - - return servers -} - -func isEnabled(i ecsInstance, exposedByDefault bool) bool { - return label.GetBoolValue(i.TraefikLabels, label.TraefikEnable, exposedByDefault) -} - -func getServerName(instance ecsInstance, url string) string { - hash := md5.New() - _, err := hash.Write([]byte(url)) - if err != nil { - // Impossible case - log.Errorf("Fail to hash server URL %q", url) - } - - if len(instance.SegmentName) > 0 { - return provider.Normalize(fmt.Sprintf("server-%s-%s-%s", instance.Name, instance.ID, hex.EncodeToString(hash.Sum(nil)))) - } - - return provider.Normalize(fmt.Sprintf("server-%s-%s", instance.Name, instance.ID)) -} diff --git a/old/provider/ecs/config_segment_test.go b/old/provider/ecs/config_segment_test.go deleted file mode 100644 index efc672b37..000000000 --- a/old/provider/ecs/config_segment_test.go +++ /dev/null @@ -1,901 +0,0 @@ -// +build ignore - -package ecs - -import ( - "testing" - "time" - - "github.com/aws/aws-sdk-go/service/ec2" - "github.com/containous/flaeg/parse" - "github.com/containous/traefik/old/provider/label" - "github.com/containous/traefik/old/types" - "github.com/stretchr/testify/assert" - "github.com/stretchr/testify/require" -) - -func TestSegmentBuildConfiguration(t *testing.T) { - testCases := []struct { - desc string - instanceInfo []ecsInstance - expectedFrontends map[string]*types.Frontend - expectedBackends map[string]*types.Backend - }{ - { - desc: "when no container", - instanceInfo: []ecsInstance{}, - expectedFrontends: map[string]*types.Frontend{}, - expectedBackends: map[string]*types.Backend{}, - }, - { - desc: "simple configuration", - instanceInfo: []ecsInstance{ - instance( - ID("123456789abc"), - name("foo"), - labels(map[string]string{ - "traefik.sauternes.port": "2503", - "traefik.sauternes.frontend.entryPoints": "http,https", - }), - iMachine( - mState(ec2.InstanceStateNameRunning), - mPrivateIP("127.0.0.1"), - mPorts( - mPort(80, 2503), - ), - ), - ), - }, - expectedFrontends: map[string]*types.Frontend{ - "frontend-sauternes-foo-sauternes": { - Backend: "backend-foo-sauternes", - PassHostHeader: true, - EntryPoints: []string{"http", "https"}, - Routes: map[string]types.Route{ - "route-frontend-sauternes-foo-sauternes": { - Rule: "Host:foo.ecs.localhost", - }, - }, - }, - }, - expectedBackends: map[string]*types.Backend{ - "backend-foo-sauternes": { - Servers: map[string]types.Server{ - "server-foo-123456789abc-863563a2e23c95502862016417ee95ea": { - URL: "http://127.0.0.1:2503", - Weight: label.DefaultWeight, - }, - }, - CircuitBreaker: nil, - }, - }, - }, - { - desc: "auth basic", - instanceInfo: []ecsInstance{ - instance( - ID("123456789abc"), - name("foo"), - labels(map[string]string{ - "traefik.sauternes.port": "2503", - "traefik.sauternes.frontend.entryPoints": "http,https", - label.Prefix + "sauternes." + label.SuffixFrontendAuthHeaderField: "X-WebAuth-User", - label.Prefix + "sauternes." + label.SuffixFrontendAuthBasicUsers: "test:$apr1$H6uskkkW$IgXLP6ewTrSuBkTrqE8wj/,test2:$apr1$d9hr9HBB$4HxwgUir3HP4EsggP/QNo0", - label.Prefix + "sauternes." + label.SuffixFrontendAuthBasicUsersFile: ".htpasswd", - label.Prefix + "sauternes." + label.SuffixFrontendAuthBasicRemoveHeader: "true", - }), - iMachine( - mState(ec2.InstanceStateNameRunning), - mPrivateIP("127.0.0.1"), - mPorts( - mPort(80, 2503), - ), - ), - ), - }, - expectedFrontends: map[string]*types.Frontend{ - "frontend-sauternes-foo-sauternes": { - Backend: "backend-foo-sauternes", - PassHostHeader: true, - EntryPoints: []string{"http", "https"}, - Routes: map[string]types.Route{ - "route-frontend-sauternes-foo-sauternes": { - Rule: "Host:foo.ecs.localhost", - }, - }, - Auth: &types.Auth{ - HeaderField: "X-WebAuth-User", - Basic: &types.Basic{ - RemoveHeader: true, - Users: []string{"test:$apr1$H6uskkkW$IgXLP6ewTrSuBkTrqE8wj/", - "test2:$apr1$d9hr9HBB$4HxwgUir3HP4EsggP/QNo0"}, - UsersFile: ".htpasswd", - }, - }, - }, - }, - expectedBackends: map[string]*types.Backend{ - "backend-foo-sauternes": { - Servers: map[string]types.Server{ - "server-foo-123456789abc-863563a2e23c95502862016417ee95ea": { - URL: "http://127.0.0.1:2503", - Weight: label.DefaultWeight, - }, - }, - CircuitBreaker: nil, - }, - }, - }, - { - desc: "auth basic backward compatibility", - instanceInfo: []ecsInstance{ - instance( - ID("123456789abc"), - name("foo"), - labels(map[string]string{ - "traefik.sauternes.port": "2503", - "traefik.sauternes.frontend.entryPoints": "http,https", - label.Prefix + "sauternes." + label.SuffixFrontendAuthBasic: "test:$apr1$H6uskkkW$IgXLP6ewTrSuBkTrqE8wj/,test2:$apr1$d9hr9HBB$4HxwgUir3HP4EsggP/QNo0", - }), - iMachine( - mState(ec2.InstanceStateNameRunning), - mPrivateIP("127.0.0.1"), - mPorts( - mPort(80, 2503), - ), - ), - ), - }, - expectedFrontends: map[string]*types.Frontend{ - "frontend-sauternes-foo-sauternes": { - Backend: "backend-foo-sauternes", - PassHostHeader: true, - EntryPoints: []string{"http", "https"}, - Routes: map[string]types.Route{ - "route-frontend-sauternes-foo-sauternes": { - Rule: "Host:foo.ecs.localhost", - }, - }, - Auth: &types.Auth{ - Basic: &types.Basic{ - Users: []string{"test:$apr1$H6uskkkW$IgXLP6ewTrSuBkTrqE8wj/", - "test2:$apr1$d9hr9HBB$4HxwgUir3HP4EsggP/QNo0"}, - }, - }, - }, - }, - expectedBackends: map[string]*types.Backend{ - "backend-foo-sauternes": { - Servers: map[string]types.Server{ - "server-foo-123456789abc-863563a2e23c95502862016417ee95ea": { - URL: "http://127.0.0.1:2503", - Weight: label.DefaultWeight, - }, - }, - CircuitBreaker: nil, - }, - }, - }, - { - desc: "auth digest", - instanceInfo: []ecsInstance{ - instance( - ID("123456789abc"), - name("foo"), - labels(map[string]string{ - "traefik.sauternes.port": "2503", - "traefik.sauternes.frontend.entryPoints": "http,https", - label.Prefix + "sauternes." + label.SuffixFrontendAuthHeaderField: "X-WebAuth-User", - label.Prefix + "sauternes." + label.SuffixFrontendAuthDigestUsers: "test:$apr1$H6uskkkW$IgXLP6ewTrSuBkTrqE8wj/,test2:$apr1$d9hr9HBB$4HxwgUir3HP4EsggP/QNo0", - label.Prefix + "sauternes." + label.SuffixFrontendAuthDigestUsersFile: ".htpasswd", - label.Prefix + "sauternes." + label.SuffixFrontendAuthDigestRemoveHeader: "true", - }), - iMachine( - mState(ec2.InstanceStateNameRunning), - mPrivateIP("127.0.0.1"), - mPorts( - mPort(80, 2503), - ), - ), - ), - }, - expectedFrontends: map[string]*types.Frontend{ - "frontend-sauternes-foo-sauternes": { - Backend: "backend-foo-sauternes", - PassHostHeader: true, - EntryPoints: []string{"http", "https"}, - Routes: map[string]types.Route{ - "route-frontend-sauternes-foo-sauternes": { - Rule: "Host:foo.ecs.localhost", - }, - }, - Auth: &types.Auth{ - HeaderField: "X-WebAuth-User", - Digest: &types.Digest{ - RemoveHeader: true, - Users: []string{"test:$apr1$H6uskkkW$IgXLP6ewTrSuBkTrqE8wj/", - "test2:$apr1$d9hr9HBB$4HxwgUir3HP4EsggP/QNo0"}, - UsersFile: ".htpasswd", - }, - }, - }, - }, - expectedBackends: map[string]*types.Backend{ - "backend-foo-sauternes": { - Servers: map[string]types.Server{ - "server-foo-123456789abc-863563a2e23c95502862016417ee95ea": { - URL: "http://127.0.0.1:2503", - Weight: label.DefaultWeight, - }, - }, - CircuitBreaker: nil, - }, - }, - }, - { - desc: "auth forward", - instanceInfo: []ecsInstance{ - instance( - ID("123456789abc"), - name("foo"), - labels(map[string]string{ - "traefik.sauternes.port": "2503", - "traefik.sauternes.frontend.entryPoints": "http,https", - label.Prefix + "sauternes." + label.SuffixFrontendAuthHeaderField: "X-WebAuth-User", - label.Prefix + "sauternes." + label.SuffixFrontendAuthForwardAddress: "auth.server", - label.Prefix + "sauternes." + label.SuffixFrontendAuthForwardTrustForwardHeader: "true", - label.Prefix + "sauternes." + label.SuffixFrontendAuthForwardTLSCa: "ca.crt", - label.Prefix + "sauternes." + label.SuffixFrontendAuthForwardTLSCaOptional: "true", - label.Prefix + "sauternes." + label.SuffixFrontendAuthForwardTLSCert: "server.crt", - label.Prefix + "sauternes." + label.SuffixFrontendAuthForwardTLSKey: "server.key", - label.Prefix + "sauternes." + label.SuffixFrontendAuthForwardTLSInsecureSkipVerify: "true", - label.Prefix + "sauternes." + label.SuffixFrontendAuthForwardAuthResponseHeaders: "X-Auth-User,X-Auth-Token", - }), - iMachine( - mState(ec2.InstanceStateNameRunning), - mPrivateIP("127.0.0.1"), - mPorts( - mPort(80, 2503), - ), - ), - ), - }, - expectedFrontends: map[string]*types.Frontend{ - "frontend-sauternes-foo-sauternes": { - Backend: "backend-foo-sauternes", - PassHostHeader: true, - EntryPoints: []string{"http", "https"}, - Routes: map[string]types.Route{ - "route-frontend-sauternes-foo-sauternes": { - Rule: "Host:foo.ecs.localhost", - }, - }, - Auth: &types.Auth{ - HeaderField: "X-WebAuth-User", - Forward: &types.Forward{ - Address: "auth.server", - TLS: &types.ClientTLS{ - CA: "ca.crt", - CAOptional: true, - Cert: "server.crt", - Key: "server.key", - InsecureSkipVerify: true, - }, - TrustForwardHeader: true, - AuthResponseHeaders: []string{"X-Auth-User", "X-Auth-Token"}, - }, - }, - }, - }, - expectedBackends: map[string]*types.Backend{ - "backend-foo-sauternes": { - Servers: map[string]types.Server{ - "server-foo-123456789abc-863563a2e23c95502862016417ee95ea": { - URL: "http://127.0.0.1:2503", - Weight: label.DefaultWeight, - }, - }, - CircuitBreaker: nil, - }, - }, - }, - { - desc: "when all labels are set", - instanceInfo: []ecsInstance{ - instance( - ID("123456789abc"), - name("foo"), - labels(map[string]string{ - label.Prefix + "sauternes." + label.SuffixPort: "666", - label.Prefix + "sauternes." + label.SuffixProtocol: "https", - label.Prefix + "sauternes." + label.SuffixWeight: "12", - - label.Prefix + "sauternes." + label.SuffixFrontendAuthBasicRemoveHeader: "true", - label.Prefix + "sauternes." + label.SuffixFrontendAuthBasicUsers: "test:$apr1$H6uskkkW$IgXLP6ewTrSuBkTrqE8wj/,test2:$apr1$d9hr9HBB$4HxwgUir3HP4EsggP/QNo0", - label.Prefix + "sauternes." + label.SuffixFrontendAuthBasicUsersFile: ".htpasswd", - label.Prefix + "sauternes." + label.SuffixFrontendAuthDigestRemoveHeader: "true", - label.Prefix + "sauternes." + label.SuffixFrontendAuthDigestUsers: "test:$apr1$H6uskkkW$IgXLP6ewTrSuBkTrqE8wj/,test2:$apr1$d9hr9HBB$4HxwgUir3HP4EsggP/QNo0", - label.Prefix + "sauternes." + label.SuffixFrontendAuthDigestUsersFile: ".htpasswd", - label.Prefix + "sauternes." + label.SuffixFrontendAuthForwardAddress: "auth.server", - label.Prefix + "sauternes." + label.SuffixFrontendAuthForwardTrustForwardHeader: "true", - label.Prefix + "sauternes." + label.SuffixFrontendAuthForwardTLSCa: "ca.crt", - label.Prefix + "sauternes." + label.SuffixFrontendAuthForwardTLSCaOptional: "true", - label.Prefix + "sauternes." + label.SuffixFrontendAuthForwardTLSCert: "server.crt", - label.Prefix + "sauternes." + label.SuffixFrontendAuthForwardTLSKey: "server.key", - label.Prefix + "sauternes." + label.SuffixFrontendAuthForwardTLSInsecureSkipVerify: "true", - label.Prefix + "sauternes." + label.SuffixFrontendAuthHeaderField: "X-WebAuth-User", - - label.Prefix + "sauternes." + label.SuffixFrontendPassTLSClientCertPem: "true", - label.Prefix + "sauternes." + label.SuffixFrontendPassTLSClientCertInfosNotBefore: "true", - label.Prefix + "sauternes." + label.SuffixFrontendPassTLSClientCertInfosNotAfter: "true", - label.Prefix + "sauternes." + label.SuffixFrontendPassTLSClientCertInfosSans: "true", - label.Prefix + "sauternes." + label.SuffixFrontendPassTLSClientCertInfosIssuerCommonName: "true", - label.Prefix + "sauternes." + label.SuffixFrontendPassTLSClientCertInfosIssuerCountry: "true", - label.Prefix + "sauternes." + label.SuffixFrontendPassTLSClientCertInfosIssuerDomainComponent: "true", - label.Prefix + "sauternes." + label.SuffixFrontendPassTLSClientCertInfosIssuerLocality: "true", - label.Prefix + "sauternes." + label.SuffixFrontendPassTLSClientCertInfosIssuerOrganization: "true", - label.Prefix + "sauternes." + label.SuffixFrontendPassTLSClientCertInfosIssuerProvince: "true", - label.Prefix + "sauternes." + label.SuffixFrontendPassTLSClientCertInfosIssuerSerialNumber: "true", - label.Prefix + "sauternes." + label.SuffixFrontendPassTLSClientCertInfosSubjectCommonName: "true", - label.Prefix + "sauternes." + label.SuffixFrontendPassTLSClientCertInfosSubjectCountry: "true", - label.Prefix + "sauternes." + label.SuffixFrontendPassTLSClientCertInfosSubjectDomainComponent: "true", - label.Prefix + "sauternes." + label.SuffixFrontendPassTLSClientCertInfosSubjectLocality: "true", - label.Prefix + "sauternes." + label.SuffixFrontendPassTLSClientCertInfosSubjectOrganization: "true", - label.Prefix + "sauternes." + label.SuffixFrontendPassTLSClientCertInfosSubjectProvince: "true", - label.Prefix + "sauternes." + label.SuffixFrontendPassTLSClientCertInfosSubjectSerialNumber: "true", - - label.Prefix + "sauternes." + label.SuffixFrontendAuthBasic: "test:$apr1$H6uskkkW$IgXLP6ewTrSuBkTrqE8wj/,test2:$apr1$d9hr9HBB$4HxwgUir3HP4EsggP/QNo0", - label.Prefix + "sauternes." + label.SuffixFrontendEntryPoints: "http,https", - label.Prefix + "sauternes." + label.SuffixFrontendPassHostHeader: "true", - label.Prefix + "sauternes." + label.SuffixFrontendPassTLSCert: "true", - label.Prefix + "sauternes." + label.SuffixFrontendPriority: "666", - label.Prefix + "sauternes." + label.SuffixFrontendRedirectEntryPoint: "https", - label.Prefix + "sauternes." + label.SuffixFrontendRedirectRegex: "nope", - label.Prefix + "sauternes." + label.SuffixFrontendRedirectReplacement: "nope", - label.Prefix + "sauternes." + label.SuffixFrontendRedirectPermanent: "true", - label.Prefix + "sauternes." + label.SuffixFrontendWhiteListSourceRange: "10.10.10.10", - label.Prefix + "sauternes." + label.SuffixFrontendWhiteListIPStrategyExcludedIPS: "10.10.10.10,10.10.10.11", - label.Prefix + "sauternes." + label.SuffixFrontendWhiteListIPStrategyDepth: "5", - - label.Prefix + "sauternes." + label.SuffixFrontendRequestHeaders: "Access-Control-Allow-Methods:POST,GET,OPTIONS || Content-type: application/json; charset=utf-8", - label.Prefix + "sauternes." + label.SuffixFrontendResponseHeaders: "Access-Control-Allow-Methods:POST,GET,OPTIONS || Content-type: application/json; charset=utf-8", - label.Prefix + "sauternes." + label.SuffixFrontendHeadersSSLProxyHeaders: "Access-Control-Allow-Methods:POST,GET,OPTIONS || Content-type: application/json; charset=utf-8", - label.Prefix + "sauternes." + label.SuffixFrontendHeadersAllowedHosts: "foo,bar,bor", - label.Prefix + "sauternes." + label.SuffixFrontendHeadersHostsProxyHeaders: "foo,bar,bor", - label.Prefix + "sauternes." + label.SuffixFrontendHeadersSSLHost: "foo", - label.Prefix + "sauternes." + label.SuffixFrontendHeadersCustomFrameOptionsValue: "foo", - label.Prefix + "sauternes." + label.SuffixFrontendHeadersContentSecurityPolicy: "foo", - label.Prefix + "sauternes." + label.SuffixFrontendHeadersPublicKey: "foo", - label.Prefix + "sauternes." + label.SuffixFrontendHeadersReferrerPolicy: "foo", - label.Prefix + "sauternes." + label.SuffixFrontendHeadersCustomBrowserXSSValue: "foo", - label.Prefix + "sauternes." + label.SuffixFrontendHeadersSTSSeconds: "666", - label.Prefix + "sauternes." + label.SuffixFrontendHeadersSSLForceHost: "true", - label.Prefix + "sauternes." + label.SuffixFrontendHeadersSSLRedirect: "true", - label.Prefix + "sauternes." + label.SuffixFrontendHeadersSSLTemporaryRedirect: "true", - label.Prefix + "sauternes." + label.SuffixFrontendHeadersSTSIncludeSubdomains: "true", - label.Prefix + "sauternes." + label.SuffixFrontendHeadersSTSPreload: "true", - label.Prefix + "sauternes." + label.SuffixFrontendHeadersForceSTSHeader: "true", - label.Prefix + "sauternes." + label.SuffixFrontendHeadersFrameDeny: "true", - label.Prefix + "sauternes." + label.SuffixFrontendHeadersContentTypeNosniff: "true", - label.Prefix + "sauternes." + label.SuffixFrontendHeadersBrowserXSSFilter: "true", - label.Prefix + "sauternes." + label.SuffixFrontendHeadersIsDevelopment: "true", - - label.Prefix + "sauternes." + label.BaseFrontendErrorPage + "foo." + label.SuffixErrorPageStatus: "404", - label.Prefix + "sauternes." + label.BaseFrontendErrorPage + "foo." + label.SuffixErrorPageBackend: "foobar", - label.Prefix + "sauternes." + label.BaseFrontendErrorPage + "foo." + label.SuffixErrorPageQuery: "foo_query", - label.Prefix + "sauternes." + label.BaseFrontendErrorPage + "bar." + label.SuffixErrorPageStatus: "500,600", - label.Prefix + "sauternes." + label.BaseFrontendErrorPage + "bar." + label.SuffixErrorPageBackend: "foobar", - label.Prefix + "sauternes." + label.BaseFrontendErrorPage + "bar." + label.SuffixErrorPageQuery: "bar_query", - - label.Prefix + "sauternes." + label.SuffixFrontendRateLimitExtractorFunc: "client.ip", - label.Prefix + "sauternes." + label.BaseFrontendRateLimit + "foo." + label.SuffixRateLimitPeriod: "6", - label.Prefix + "sauternes." + label.BaseFrontendRateLimit + "foo." + label.SuffixRateLimitAverage: "12", - label.Prefix + "sauternes." + label.BaseFrontendRateLimit + "foo." + label.SuffixRateLimitBurst: "18", - label.Prefix + "sauternes." + label.BaseFrontendRateLimit + "bar." + label.SuffixRateLimitPeriod: "3", - label.Prefix + "sauternes." + label.BaseFrontendRateLimit + "bar." + label.SuffixRateLimitAverage: "6", - label.Prefix + "sauternes." + label.BaseFrontendRateLimit + "bar." + label.SuffixRateLimitBurst: "9", - }), - iMachine( - mState(ec2.InstanceStateNameRunning), - mPrivateIP("127.0.0.1"), - mPorts( - mPort(80, 666), - ), - ), - ), - }, - expectedFrontends: map[string]*types.Frontend{ - "frontend-sauternes-foo-sauternes": { - Backend: "backend-foo-sauternes", - EntryPoints: []string{ - "http", - "https", - }, - PassHostHeader: true, - PassTLSCert: true, - Priority: 666, - PassTLSClientCert: &types.TLSClientHeaders{ - PEM: true, - Infos: &types.TLSClientCertificateInfos{ - NotBefore: true, - Sans: true, - NotAfter: true, - Subject: &types.TLSCLientCertificateDNInfos{ - CommonName: true, - Country: true, - DomainComponent: true, - Locality: true, - Organization: true, - Province: true, - SerialNumber: true, - }, - Issuer: &types.TLSCLientCertificateDNInfos{ - CommonName: true, - Country: true, - DomainComponent: true, - Locality: true, - Organization: true, - Province: true, - SerialNumber: true, - }, - }, - }, - Auth: &types.Auth{ - HeaderField: "X-WebAuth-User", - Basic: &types.Basic{ - RemoveHeader: true, - Users: []string{"test:$apr1$H6uskkkW$IgXLP6ewTrSuBkTrqE8wj/", - "test2:$apr1$d9hr9HBB$4HxwgUir3HP4EsggP/QNo0"}, - UsersFile: ".htpasswd", - }, - }, - WhiteList: &types.WhiteList{ - SourceRange: []string{"10.10.10.10"}, - IPStrategy: &types.IPStrategy{ - Depth: 5, - ExcludedIPs: []string{"10.10.10.10", "10.10.10.11"}, - }, - }, - Headers: &types.Headers{ - CustomRequestHeaders: map[string]string{ - "Access-Control-Allow-Methods": "POST,GET,OPTIONS", - "Content-Type": "application/json; charset=utf-8", - }, - CustomResponseHeaders: map[string]string{ - "Access-Control-Allow-Methods": "POST,GET,OPTIONS", - "Content-Type": "application/json; charset=utf-8", - }, - AllowedHosts: []string{ - "foo", - "bar", - "bor", - }, - HostsProxyHeaders: []string{ - "foo", - "bar", - "bor", - }, - SSLRedirect: true, - SSLTemporaryRedirect: true, - SSLForceHost: true, - SSLHost: "foo", - SSLProxyHeaders: map[string]string{ - "Access-Control-Allow-Methods": "POST,GET,OPTIONS", - "Content-Type": "application/json; charset=utf-8", - }, - STSSeconds: 666, - STSIncludeSubdomains: true, - STSPreload: true, - ForceSTSHeader: true, - FrameDeny: true, - CustomFrameOptionsValue: "foo", - ContentTypeNosniff: true, - BrowserXSSFilter: true, - CustomBrowserXSSValue: "foo", - ContentSecurityPolicy: "foo", - PublicKey: "foo", - ReferrerPolicy: "foo", - IsDevelopment: true, - }, - Errors: map[string]*types.ErrorPage{ - "foo": { - Status: []string{"404"}, - Query: "foo_query", - Backend: "backend-foobar", - }, - "bar": { - Status: []string{"500", "600"}, - Query: "bar_query", - Backend: "backend-foobar", - }, - }, - RateLimit: &types.RateLimit{ - ExtractorFunc: "client.ip", - RateSet: map[string]*types.Rate{ - "foo": { - Period: parse.Duration(6 * time.Second), - Average: 12, - Burst: 18, - }, - "bar": { - Period: parse.Duration(3 * time.Second), - Average: 6, - Burst: 9, - }, - }, - }, - Redirect: &types.Redirect{ - EntryPoint: "https", - Regex: "", - Replacement: "", - Permanent: true, - }, - - Routes: map[string]types.Route{ - "route-frontend-sauternes-foo-sauternes": { - Rule: "Host:foo.ecs.localhost", - }, - }, - }, - }, - expectedBackends: map[string]*types.Backend{ - "backend-foo-sauternes": { - Servers: map[string]types.Server{ - "server-foo-123456789abc-7f6444e0dff3330c8b0ad2bbbd383b0f": { - URL: "https://127.0.0.1:666", - Weight: 12, - }, - }, - CircuitBreaker: nil, - }, - }, - }, - { - desc: "several containers", - instanceInfo: []ecsInstance{ - instance( - ID("123456789abc"), - name("test1"), - labels(map[string]string{ - "traefik.sauternes.port": "2503", - "traefik.sauternes.protocol": "https", - "traefik.sauternes.weight": "80", - "traefik.sauternes.backend": "foobar", - "traefik.sauternes.frontend.passHostHeader": "false", - "traefik.sauternes.frontend.rule": "Path:/mypath", - "traefik.sauternes.frontend.priority": "5000", - "traefik.sauternes.frontend.entryPoints": "http,https,ws", - "traefik.sauternes.frontend.auth.basic": "test:$apr1$H6uskkkW$IgXLP6ewTrSuBkTrqE8wj/,test2:$apr1$d9hr9HBB$4HxwgUir3HP4EsggP/QNo0", - "traefik.sauternes.frontend.redirect.entryPoint": "https", - }), - iMachine( - mState(ec2.InstanceStateNameRunning), - mPrivateIP("127.0.0.1"), - mPorts( - mPort(80, 2503), - ), - ), - ), - instance( - ID("abc987654321"), - name("test2"), - labels(map[string]string{ - "traefik.anothersauternes.port": "8079", - "traefik.anothersauternes.weight": "33", - "traefik.anothersauternes.frontend.rule": "Path:/anotherpath", - }), - iMachine( - mState(ec2.InstanceStateNameRunning), - mPrivateIP("127.0.0.2"), - mPorts( - mPort(80, 8079), - ), - ), - ), - }, - expectedFrontends: map[string]*types.Frontend{ - "frontend-sauternes-test1-foobar": { - Backend: "backend-test1-foobar", - PassHostHeader: false, - Priority: 5000, - EntryPoints: []string{"http", "https", "ws"}, - Auth: &types.Auth{ - Basic: &types.Basic{ - Users: []string{"test:$apr1$H6uskkkW$IgXLP6ewTrSuBkTrqE8wj/", - "test2:$apr1$d9hr9HBB$4HxwgUir3HP4EsggP/QNo0"}, - }, - }, - Redirect: &types.Redirect{ - EntryPoint: "https", - }, - Routes: map[string]types.Route{ - "route-frontend-sauternes-test1-foobar": { - Rule: "Path:/mypath", - }, - }, - }, - "frontend-anothersauternes-test2-anothersauternes": { - Backend: "backend-test2-anothersauternes", - PassHostHeader: true, - EntryPoints: []string{}, - Routes: map[string]types.Route{ - "route-frontend-anothersauternes-test2-anothersauternes": { - Rule: "Path:/anotherpath", - }, - }, - }, - }, - expectedBackends: map[string]*types.Backend{ - "backend-test1-foobar": { - Servers: map[string]types.Server{ - "server-test1-123456789abc-79533a101142718f0fdf84c42593c41e": { - URL: "https://127.0.0.1:2503", - Weight: 80, - }, - }, - CircuitBreaker: nil, - }, - "backend-test2-anothersauternes": { - Servers: map[string]types.Server{ - "server-test2-abc987654321-045e3e4aa5a744a325c099b803700a93": { - URL: "http://127.0.0.2:8079", - Weight: 33, - }, - }, - CircuitBreaker: nil, - }, - }, - }, - { - desc: "several segments with the same backend name and same port", - instanceInfo: []ecsInstance{ - instance( - ID("123456789abc"), - name("test1"), - labels(map[string]string{ - "traefik.port": "2503", - "traefik.protocol": "https", - "traefik.weight": "80", - "traefik.frontend.entryPoints": "http,https", - "traefik.frontend.redirect.entryPoint": "https", - - "traefik.sauternes.backend": "foobar", - "traefik.sauternes.frontend.rule": "Path:/sauternes", - "traefik.sauternes.frontend.priority": "5000", - - "traefik.arbois.backend": "foobar", - "traefik.arbois.frontend.rule": "Path:/arbois", - "traefik.arbois.frontend.priority": "3000", - }), - - iMachine( - mState(ec2.InstanceStateNameRunning), - mPrivateIP("127.0.0.1"), - mPorts( - mPort(80, 2503), - ), - ), - ), - }, - expectedFrontends: map[string]*types.Frontend{ - "frontend-sauternes-test1-foobar": { - Backend: "backend-test1-foobar", - PassHostHeader: true, - Priority: 5000, - EntryPoints: []string{"http", "https"}, - Redirect: &types.Redirect{ - EntryPoint: "https", - }, - Routes: map[string]types.Route{ - "route-frontend-sauternes-test1-foobar": { - Rule: "Path:/sauternes", - }, - }, - }, - "frontend-arbois-test1-foobar": { - Backend: "backend-test1-foobar", - PassHostHeader: true, - Priority: 3000, - EntryPoints: []string{"http", "https"}, - Redirect: &types.Redirect{ - EntryPoint: "https", - }, - Routes: map[string]types.Route{ - "route-frontend-arbois-test1-foobar": { - Rule: "Path:/arbois", - }, - }, - }, - }, - expectedBackends: map[string]*types.Backend{ - "backend-test1-foobar": { - Servers: map[string]types.Server{ - "server-test1-123456789abc-79533a101142718f0fdf84c42593c41e": { - URL: "https://127.0.0.1:2503", - Weight: 80, - }, - }, - CircuitBreaker: nil, - }, - }, - }, - { - desc: "several segments with the same backend name and different port (wrong behavior)", - instanceInfo: []ecsInstance{ - instance( - ID("123456789abc"), - name("test1"), - labels(map[string]string{ - "traefik.protocol": "https", - "traefik.frontend.entryPoints": "http,https", - "traefik.frontend.redirect.entryPoint": "https", - - "traefik.sauternes.port": "2503", - "traefik.sauternes.weight": "80", - "traefik.sauternes.backend": "foobar", - "traefik.sauternes.frontend.rule": "Path:/sauternes", - "traefik.sauternes.frontend.priority": "5000", - - "traefik.arbois.port": "2504", - "traefik.arbois.weight": "90", - "traefik.arbois.backend": "foobar", - "traefik.arbois.frontend.rule": "Path:/arbois", - "traefik.arbois.frontend.priority": "3000", - }), - iMachine( - mState(ec2.InstanceStateNameRunning), - mPrivateIP("127.0.0.1"), - mPorts( - mPort(80, 2503), - mPort(80, 2504), - ), - ), - ), - }, - expectedFrontends: map[string]*types.Frontend{ - "frontend-sauternes-test1-foobar": { - Backend: "backend-test1-foobar", - PassHostHeader: true, - Priority: 5000, - EntryPoints: []string{"http", "https"}, - Redirect: &types.Redirect{ - EntryPoint: "https", - }, - Routes: map[string]types.Route{ - "route-frontend-sauternes-test1-foobar": { - Rule: "Path:/sauternes", - }, - }, - }, - "frontend-arbois-test1-foobar": { - Backend: "backend-test1-foobar", - PassHostHeader: true, - Priority: 3000, - EntryPoints: []string{"http", "https"}, - Redirect: &types.Redirect{ - EntryPoint: "https", - }, - Routes: map[string]types.Route{ - "route-frontend-arbois-test1-foobar": { - Rule: "Path:/arbois", - }, - }, - }, - }, - expectedBackends: map[string]*types.Backend{ - "backend-test1-foobar": { - Servers: map[string]types.Server{ - "server-test1-123456789abc-79533a101142718f0fdf84c42593c41e": { - URL: "https://127.0.0.1:2503", - Weight: 80, - }, - "server-test1-123456789abc-315a41140f1bd825b066e39686c18482": { - URL: "https://127.0.0.1:2504", - Weight: 90, - }, - }, - CircuitBreaker: nil, - }, - }, - }, - { - desc: "several segments with the same backend name and different port binding", - instanceInfo: []ecsInstance{ - instance( - ID("123456789abc"), - name("test1"), - labels(map[string]string{ - "traefik.protocol": "https", - "traefik.frontend.entryPoints": "http,https", - "traefik.frontend.redirect.entryPoint": "https", - - "traefik.sauternes.port": "2503", - "traefik.sauternes.weight": "80", - "traefik.sauternes.backend": "foobar", - "traefik.sauternes.frontend.rule": "Path:/sauternes", - "traefik.sauternes.frontend.priority": "5000", - - "traefik.arbois.port": "8080", - "traefik.arbois.weight": "90", - "traefik.arbois.backend": "foobar", - "traefik.arbois.frontend.rule": "Path:/arbois", - "traefik.arbois.frontend.priority": "3000", - }), - iMachine( - mState(ec2.InstanceStateNameRunning), - mPrivateIP("127.0.0.1"), - mPorts( - mPort(80, 2503), - mPort(8080, 2504), - ), - ), - ), - }, - expectedFrontends: map[string]*types.Frontend{ - "frontend-sauternes-test1-foobar": { - Backend: "backend-test1-foobar", - PassHostHeader: true, - Priority: 5000, - EntryPoints: []string{"http", "https"}, - Redirect: &types.Redirect{ - EntryPoint: "https", - }, - Routes: map[string]types.Route{ - "route-frontend-sauternes-test1-foobar": { - Rule: "Path:/sauternes", - }, - }, - }, - "frontend-arbois-test1-foobar": { - Backend: "backend-test1-foobar", - PassHostHeader: true, - Priority: 3000, - EntryPoints: []string{"http", "https"}, - Redirect: &types.Redirect{ - EntryPoint: "https", - }, - Routes: map[string]types.Route{ - "route-frontend-arbois-test1-foobar": { - Rule: "Path:/arbois", - }, - }, - }, - }, - expectedBackends: map[string]*types.Backend{ - "backend-test1-foobar": { - Servers: map[string]types.Server{ - "server-test1-123456789abc-79533a101142718f0fdf84c42593c41e": { - URL: "https://127.0.0.1:2503", - Weight: 80, - }, - "server-test1-123456789abc-315a41140f1bd825b066e39686c18482": { - URL: "https://127.0.0.1:2504", - Weight: 90, - }, - }, - CircuitBreaker: nil, - }, - }, - }, - } - - provider := &Provider{ - Domain: "ecs.localhost", - ExposedByDefault: true, - } - - for _, test := range testCases { - test := test - t.Run(test.desc, func(t *testing.T) { - t.Parallel() - - actualConfig, err := provider.buildConfiguration(test.instanceInfo) - - assert.NoError(t, err) - require.NotNil(t, actualConfig, "actualConfig") - - assert.EqualValues(t, test.expectedBackends, actualConfig.Backends) - assert.EqualValues(t, test.expectedFrontends, actualConfig.Frontends) - }) - } -} diff --git a/old/provider/ecs/config_test.go b/old/provider/ecs/config_test.go deleted file mode 100644 index 60962adfd..000000000 --- a/old/provider/ecs/config_test.go +++ /dev/null @@ -1,1288 +0,0 @@ -// +build ignore - -package ecs - -import ( - "testing" - "time" - - "github.com/aws/aws-sdk-go/aws" - "github.com/aws/aws-sdk-go/service/ec2" - "github.com/aws/aws-sdk-go/service/ecs" - "github.com/containous/flaeg/parse" - "github.com/containous/traefik/old/provider/label" - "github.com/containous/traefik/old/types" - "github.com/stretchr/testify/assert" -) - -func TestBuildConfiguration(t *testing.T) { - testCases := []struct { - desc string - instances []ecsInstance - expected *types.Configuration - err error - }{ - { - desc: "config parsed successfully", - instances: []ecsInstance{ - instance( - name("instance"), - ID("1"), - dockerLabels(map[string]*string{}), - iMachine( - mState(ec2.InstanceStateNameRunning), - mPrivateIP("10.0.0.1"), - mPorts( - mPort(0, 1337), - ), - ), - ), - }, - expected: &types.Configuration{ - Backends: map[string]*types.Backend{ - "backend-instance": { - Servers: map[string]types.Server{ - "server-instance-1": { - URL: "http://10.0.0.1:1337", - Weight: label.DefaultWeight, - }}, - }, - }, - Frontends: map[string]*types.Frontend{ - "frontend-instance": { - EntryPoints: []string{}, - Backend: "backend-instance", - Routes: map[string]types.Route{ - "route-frontend-instance": { - Rule: "Host:instance", - }, - }, - PassHostHeader: true, - }, - }, - }, - }, - { - desc: "config parsed successfully with health check labels", - instances: []ecsInstance{ - instance( - name("instance"), - ID("1"), - dockerLabels(map[string]*string{ - label.TraefikBackendHealthCheckPath: aws.String("/health"), - label.TraefikBackendHealthCheckInterval: aws.String("6s"), - label.TraefikBackendHealthCheckTimeout: aws.String("3s"), - }), - iMachine( - mState(ec2.InstanceStateNameRunning), - mPrivateIP("10.0.0.1"), - mPorts( - mPort(0, 1337), - ), - ), - ), - }, - expected: &types.Configuration{ - Backends: map[string]*types.Backend{ - "backend-instance": { - HealthCheck: &types.HealthCheck{ - Path: "/health", - Interval: "6s", - Timeout: "3s", - }, - Servers: map[string]types.Server{ - "server-instance-1": { - URL: "http://10.0.0.1:1337", - Weight: label.DefaultWeight, - }}, - }, - }, - Frontends: map[string]*types.Frontend{ - "frontend-instance": { - EntryPoints: []string{}, - Backend: "backend-instance", - Routes: map[string]types.Route{ - "route-frontend-instance": { - Rule: "Host:instance", - }, - }, - PassHostHeader: true, - }, - }, - }, - }, - { - desc: "config parsed successfully with basic auth labels", - instances: []ecsInstance{ - instance( - name("instance"), - ID("1"), - dockerLabels(map[string]*string{ - label.TraefikFrontendAuthBasicUsers: aws.String("test:$apr1$H6uskkkW$IgXLP6ewTrSuBkTrqE8wj/,test2:$apr1$d9hr9HBB$4HxwgUir3HP4EsggP/QNo0"), - label.TraefikFrontendAuthBasicUsersFile: aws.String(".htpasswd"), - label.TraefikFrontendAuthBasicRemoveHeader: aws.String("true"), - label.TraefikFrontendAuthHeaderField: aws.String("X-WebAuth-User"), - }), - iMachine( - mState(ec2.InstanceStateNameRunning), - mPrivateIP("10.0.0.1"), - mPorts( - mPort(0, 1337), - ), - ), - ), - }, - expected: &types.Configuration{ - Backends: map[string]*types.Backend{ - "backend-instance": { - Servers: map[string]types.Server{ - "server-instance-1": { - URL: "http://10.0.0.1:1337", - Weight: label.DefaultWeight, - }}, - }, - }, - Frontends: map[string]*types.Frontend{ - "frontend-instance": { - EntryPoints: []string{}, - Backend: "backend-instance", - Routes: map[string]types.Route{ - "route-frontend-instance": { - Rule: "Host:instance", - }, - }, - Auth: &types.Auth{ - HeaderField: "X-WebAuth-User", - Basic: &types.Basic{ - RemoveHeader: true, - Users: []string{"test:$apr1$H6uskkkW$IgXLP6ewTrSuBkTrqE8wj/", - "test2:$apr1$d9hr9HBB$4HxwgUir3HP4EsggP/QNo0"}, - UsersFile: ".htpasswd", - }, - }, - PassHostHeader: true, - }, - }, - }, - }, - { - desc: "config parsed successfully with basic auth (backward compatibility) labels", - instances: []ecsInstance{ - instance( - name("instance"), - ID("1"), - dockerLabels(map[string]*string{ - label.TraefikFrontendAuthBasic: aws.String("test:$apr1$H6uskkkW$IgXLP6ewTrSuBkTrqE8wj/,test2:$apr1$d9hr9HBB$4HxwgUir3HP4EsggP/QNo0"), - }), - iMachine( - mState(ec2.InstanceStateNameRunning), - mPrivateIP("10.0.0.1"), - mPorts( - mPort(0, 1337), - ), - ), - ), - }, - expected: &types.Configuration{ - Backends: map[string]*types.Backend{ - "backend-instance": { - Servers: map[string]types.Server{ - "server-instance-1": { - URL: "http://10.0.0.1:1337", - Weight: label.DefaultWeight, - }}, - }, - }, - Frontends: map[string]*types.Frontend{ - "frontend-instance": { - EntryPoints: []string{}, - Backend: "backend-instance", - Routes: map[string]types.Route{ - "route-frontend-instance": { - Rule: "Host:instance", - }, - }, - Auth: &types.Auth{ - Basic: &types.Basic{ - Users: []string{"test:$apr1$H6uskkkW$IgXLP6ewTrSuBkTrqE8wj/", - "test2:$apr1$d9hr9HBB$4HxwgUir3HP4EsggP/QNo0"}, - }, - }, - PassHostHeader: true, - }, - }, - }, - }, - { - desc: "config parsed successfully with digest auth labels", - instances: []ecsInstance{ - instance( - name("instance"), - ID("1"), - dockerLabels(map[string]*string{ - label.TraefikFrontendAuthDigestRemoveHeader: aws.String("true"), - label.TraefikFrontendAuthDigestUsers: aws.String("test:$apr1$H6uskkkW$IgXLP6ewTrSuBkTrqE8wj/,test2:$apr1$d9hr9HBB$4HxwgUir3HP4EsggP/QNo0"), - label.TraefikFrontendAuthDigestUsersFile: aws.String(".htpasswd"), - label.TraefikFrontendAuthHeaderField: aws.String("X-WebAuth-User"), - }), - iMachine( - mState(ec2.InstanceStateNameRunning), - mPrivateIP("10.0.0.1"), - mPorts( - mPort(0, 1337), - ), - ), - ), - }, - expected: &types.Configuration{ - Backends: map[string]*types.Backend{ - "backend-instance": { - Servers: map[string]types.Server{ - "server-instance-1": { - URL: "http://10.0.0.1:1337", - Weight: label.DefaultWeight, - }}, - }, - }, - Frontends: map[string]*types.Frontend{ - "frontend-instance": { - EntryPoints: []string{}, - Backend: "backend-instance", - Routes: map[string]types.Route{ - "route-frontend-instance": { - Rule: "Host:instance", - }, - }, - Auth: &types.Auth{ - HeaderField: "X-WebAuth-User", - Digest: &types.Digest{ - RemoveHeader: true, - Users: []string{"test:$apr1$H6uskkkW$IgXLP6ewTrSuBkTrqE8wj/", - "test2:$apr1$d9hr9HBB$4HxwgUir3HP4EsggP/QNo0"}, - UsersFile: ".htpasswd", - }, - }, - PassHostHeader: true, - }, - }, - }, - }, - { - desc: "config parsed successfully with forward auth labels", - instances: []ecsInstance{ - instance( - name("instance"), - ID("1"), - dockerLabels(map[string]*string{ - label.TraefikFrontendAuthForwardAddress: aws.String("auth.server"), - label.TraefikFrontendAuthForwardTrustForwardHeader: aws.String("true"), - label.TraefikFrontendAuthForwardTLSCa: aws.String("ca.crt"), - label.TraefikFrontendAuthForwardTLSCaOptional: aws.String("true"), - label.TraefikFrontendAuthForwardTLSCert: aws.String("server.crt"), - label.TraefikFrontendAuthForwardTLSKey: aws.String("server.key"), - label.TraefikFrontendAuthForwardTLSInsecureSkipVerify: aws.String("true"), - label.TraefikFrontendAuthHeaderField: aws.String("X-WebAuth-User"), - label.TraefikFrontendAuthForwardAuthResponseHeaders: aws.String("X-Auth-User,X-Auth-Token"), - }), - iMachine( - mState(ec2.InstanceStateNameRunning), - mPrivateIP("10.0.0.1"), - mPorts( - mPort(0, 1337), - ), - ), - ), - }, - expected: &types.Configuration{ - Backends: map[string]*types.Backend{ - "backend-instance": { - Servers: map[string]types.Server{ - "server-instance-1": { - URL: "http://10.0.0.1:1337", - Weight: label.DefaultWeight, - }}, - }, - }, - Frontends: map[string]*types.Frontend{ - "frontend-instance": { - EntryPoints: []string{}, - Backend: "backend-instance", - Routes: map[string]types.Route{ - "route-frontend-instance": { - Rule: "Host:instance", - }, - }, - Auth: &types.Auth{ - HeaderField: "X-WebAuth-User", - Forward: &types.Forward{ - Address: "auth.server", - TLS: &types.ClientTLS{ - CA: "ca.crt", - CAOptional: true, - InsecureSkipVerify: true, - Cert: "server.crt", - Key: "server.key", - }, - TrustForwardHeader: true, - AuthResponseHeaders: []string{"X-Auth-User", "X-Auth-Token"}, - }, - }, - PassHostHeader: true, - }, - }, - }, - }, - { - desc: "when all labels are set", - instances: []ecsInstance{ - instance( - name("testing-instance"), - ID("6"), - dockerLabels(map[string]*string{ - label.TraefikPort: aws.String("666"), - label.TraefikProtocol: aws.String("https"), - label.TraefikWeight: aws.String("12"), - - label.TraefikBackend: aws.String("foobar"), - - label.TraefikBackendCircuitBreakerExpression: aws.String("NetworkErrorRatio() > 0.5"), - label.TraefikBackendResponseForwardingFlushInterval: aws.String("10ms"), - label.TraefikBackendHealthCheckScheme: aws.String("http"), - label.TraefikBackendHealthCheckPath: aws.String("/health"), - label.TraefikBackendHealthCheckPort: aws.String("880"), - label.TraefikBackendHealthCheckInterval: aws.String("6"), - label.TraefikBackendHealthCheckTimeout: aws.String("3"), - label.TraefikBackendHealthCheckHostname: aws.String("foo.com"), - label.TraefikBackendHealthCheckHeaders: aws.String("Foo:bar || Bar:foo"), - label.TraefikBackendLoadBalancerMethod: aws.String("drr"), - label.TraefikBackendLoadBalancerStickiness: aws.String("true"), - label.TraefikBackendLoadBalancerStickinessCookieName: aws.String("chocolate"), - label.TraefikBackendMaxConnAmount: aws.String("666"), - label.TraefikBackendMaxConnExtractorFunc: aws.String("client.ip"), - label.TraefikBackendBufferingMaxResponseBodyBytes: aws.String("10485760"), - label.TraefikBackendBufferingMemResponseBodyBytes: aws.String("2097152"), - label.TraefikBackendBufferingMaxRequestBodyBytes: aws.String("10485760"), - label.TraefikBackendBufferingMemRequestBodyBytes: aws.String("2097152"), - label.TraefikBackendBufferingRetryExpression: aws.String("IsNetworkError() && Attempts() <= 2"), - - label.TraefikFrontendPassTLSClientCertPem: aws.String("true"), - label.TraefikFrontendPassTLSClientCertInfosNotBefore: aws.String("true"), - label.TraefikFrontendPassTLSClientCertInfosNotAfter: aws.String("true"), - label.TraefikFrontendPassTLSClientCertInfosSans: aws.String("true"), - label.TraefikFrontendPassTLSClientCertInfosIssuerCommonName: aws.String("true"), - label.TraefikFrontendPassTLSClientCertInfosIssuerCountry: aws.String("true"), - label.TraefikFrontendPassTLSClientCertInfosIssuerDomainComponent: aws.String("true"), - label.TraefikFrontendPassTLSClientCertInfosIssuerLocality: aws.String("true"), - label.TraefikFrontendPassTLSClientCertInfosIssuerOrganization: aws.String("true"), - label.TraefikFrontendPassTLSClientCertInfosIssuerProvince: aws.String("true"), - label.TraefikFrontendPassTLSClientCertInfosIssuerSerialNumber: aws.String("true"), - label.TraefikFrontendPassTLSClientCertInfosSubjectCommonName: aws.String("true"), - label.TraefikFrontendPassTLSClientCertInfosSubjectCountry: aws.String("true"), - label.TraefikFrontendPassTLSClientCertInfosSubjectDomainComponent: aws.String("true"), - label.TraefikFrontendPassTLSClientCertInfosSubjectLocality: aws.String("true"), - label.TraefikFrontendPassTLSClientCertInfosSubjectOrganization: aws.String("true"), - label.TraefikFrontendPassTLSClientCertInfosSubjectProvince: aws.String("true"), - label.TraefikFrontendPassTLSClientCertInfosSubjectSerialNumber: aws.String("true"), - - label.TraefikFrontendAuthBasic: aws.String("test:$apr1$H6uskkkW$IgXLP6ewTrSuBkTrqE8wj/,test2:$apr1$d9hr9HBB$4HxwgUir3HP4EsggP/QNo0"), - label.TraefikFrontendAuthBasicRemoveHeader: aws.String("true"), - label.TraefikFrontendAuthBasicUsers: aws.String("test:$apr1$H6uskkkW$IgXLP6ewTrSuBkTrqE8wj/,test2:$apr1$d9hr9HBB$4HxwgUir3HP4EsggP/QNo0"), - label.TraefikFrontendAuthBasicUsersFile: aws.String(".htpasswd"), - label.TraefikFrontendAuthDigestRemoveHeader: aws.String("true"), - label.TraefikFrontendAuthDigestUsers: aws.String("test:$apr1$H6uskkkW$IgXLP6ewTrSuBkTrqE8wj/,test2:$apr1$d9hr9HBB$4HxwgUir3HP4EsggP/QNo0"), - label.TraefikFrontendAuthDigestUsersFile: aws.String(".htpasswd"), - label.TraefikFrontendAuthForwardAddress: aws.String("auth.server"), - label.TraefikFrontendAuthForwardTrustForwardHeader: aws.String("true"), - label.TraefikFrontendAuthForwardTLSCa: aws.String("ca.crt"), - label.TraefikFrontendAuthForwardTLSCaOptional: aws.String("true"), - label.TraefikFrontendAuthForwardTLSCert: aws.String("server.crt"), - label.TraefikFrontendAuthForwardTLSKey: aws.String("server.key"), - label.TraefikFrontendAuthForwardTLSInsecureSkipVerify: aws.String("true"), - label.TraefikFrontendAuthHeaderField: aws.String("X-WebAuth-User"), - - label.TraefikFrontendEntryPoints: aws.String("http,https"), - label.TraefikFrontendPassHostHeader: aws.String("true"), - label.TraefikFrontendPassTLSCert: aws.String("true"), - label.TraefikFrontendPriority: aws.String("666"), - label.TraefikFrontendRedirectEntryPoint: aws.String("https"), - label.TraefikFrontendRedirectRegex: aws.String("nope"), - label.TraefikFrontendRedirectReplacement: aws.String("nope"), - label.TraefikFrontendRedirectPermanent: aws.String("true"), - label.TraefikFrontendRule: aws.String("Host:traefik.io"), - label.TraefikFrontendWhiteListSourceRange: aws.String("10.10.10.10"), - label.TraefikFrontendWhiteListIPStrategyExcludedIPS: aws.String("10.10.10.10,10.10.10.11"), - label.TraefikFrontendWhiteListIPStrategyDepth: aws.String("5"), - - label.TraefikFrontendRequestHeaders: aws.String("Access-Control-Allow-Methods:POST,GET,OPTIONS || Content-type: application/json; charset=utf-8"), - label.TraefikFrontendResponseHeaders: aws.String("Access-Control-Allow-Methods:POST,GET,OPTIONS || Content-type: application/json; charset=utf-8"), - label.TraefikFrontendSSLProxyHeaders: aws.String("Access-Control-Allow-Methods:POST,GET,OPTIONS || Content-type: application/json; charset=utf-8"), - label.TraefikFrontendAllowedHosts: aws.String("foo,bar,bor"), - label.TraefikFrontendHostsProxyHeaders: aws.String("foo,bar,bor"), - label.TraefikFrontendSSLHost: aws.String("foo"), - label.TraefikFrontendCustomFrameOptionsValue: aws.String("foo"), - label.TraefikFrontendContentSecurityPolicy: aws.String("foo"), - label.TraefikFrontendPublicKey: aws.String("foo"), - label.TraefikFrontendReferrerPolicy: aws.String("foo"), - label.TraefikFrontendCustomBrowserXSSValue: aws.String("foo"), - label.TraefikFrontendSTSSeconds: aws.String("666"), - label.TraefikFrontendSSLForceHost: aws.String("true"), - label.TraefikFrontendSSLRedirect: aws.String("true"), - label.TraefikFrontendSSLTemporaryRedirect: aws.String("true"), - label.TraefikFrontendSTSIncludeSubdomains: aws.String("true"), - label.TraefikFrontendSTSPreload: aws.String("true"), - label.TraefikFrontendForceSTSHeader: aws.String("true"), - label.TraefikFrontendFrameDeny: aws.String("true"), - label.TraefikFrontendContentTypeNosniff: aws.String("true"), - label.TraefikFrontendBrowserXSSFilter: aws.String("true"), - label.TraefikFrontendIsDevelopment: aws.String("true"), - - label.Prefix + label.BaseFrontendErrorPage + "foo." + label.SuffixErrorPageStatus: aws.String("404"), - label.Prefix + label.BaseFrontendErrorPage + "foo." + label.SuffixErrorPageBackend: aws.String("foobar"), - label.Prefix + label.BaseFrontendErrorPage + "foo." + label.SuffixErrorPageQuery: aws.String("foo_query"), - label.Prefix + label.BaseFrontendErrorPage + "bar." + label.SuffixErrorPageStatus: aws.String("500,600"), - label.Prefix + label.BaseFrontendErrorPage + "bar." + label.SuffixErrorPageBackend: aws.String("foobar"), - label.Prefix + label.BaseFrontendErrorPage + "bar." + label.SuffixErrorPageQuery: aws.String("bar_query"), - - label.TraefikFrontendRateLimitExtractorFunc: aws.String("client.ip"), - label.Prefix + label.BaseFrontendRateLimit + "foo." + label.SuffixRateLimitPeriod: aws.String("6"), - label.Prefix + label.BaseFrontendRateLimit + "foo." + label.SuffixRateLimitAverage: aws.String("12"), - label.Prefix + label.BaseFrontendRateLimit + "foo." + label.SuffixRateLimitBurst: aws.String("18"), - label.Prefix + label.BaseFrontendRateLimit + "bar." + label.SuffixRateLimitPeriod: aws.String("3"), - label.Prefix + label.BaseFrontendRateLimit + "bar." + label.SuffixRateLimitAverage: aws.String("6"), - label.Prefix + label.BaseFrontendRateLimit + "bar." + label.SuffixRateLimitBurst: aws.String("9"), - }), - iMachine( - mState(ec2.InstanceStateNameRunning), - mPrivateIP("10.0.0.1"), - mPorts( - mPort(0, 1337), - ), - ), - ), - }, - expected: &types.Configuration{ - Backends: map[string]*types.Backend{ - "backend-foobar": { - Servers: map[string]types.Server{ - "server-testing-instance-6": { - URL: "https://10.0.0.1:666", - Weight: 12, - }, - }, - CircuitBreaker: &types.CircuitBreaker{ - Expression: "NetworkErrorRatio() > 0.5", - }, - ResponseForwarding: &types.ResponseForwarding{ - FlushInterval: "10ms", - }, - LoadBalancer: &types.LoadBalancer{ - Method: "drr", - Stickiness: &types.Stickiness{ - CookieName: "chocolate", - }, - }, - MaxConn: &types.MaxConn{ - Amount: 666, - ExtractorFunc: "client.ip", - }, - HealthCheck: &types.HealthCheck{ - Scheme: "http", - Path: "/health", - Port: 880, - Interval: "6", - Timeout: "3", - Hostname: "foo.com", - Headers: map[string]string{ - "Foo": "bar", - "Bar": "foo", - }, - }, - Buffering: &types.Buffering{ - MaxResponseBodyBytes: 10485760, - MemResponseBodyBytes: 2097152, - MaxRequestBodyBytes: 10485760, - MemRequestBodyBytes: 2097152, - RetryExpression: "IsNetworkError() && Attempts() <= 2", - }, - }, - }, - Frontends: map[string]*types.Frontend{ - "frontend-foobar": { - EntryPoints: []string{ - "http", - "https", - }, - Backend: "backend-foobar", - Routes: map[string]types.Route{ - "route-frontend-foobar": { - Rule: "Host:traefik.io", - }, - }, - PassHostHeader: true, - PassTLSCert: true, - Priority: 666, - PassTLSClientCert: &types.TLSClientHeaders{ - PEM: true, - Infos: &types.TLSClientCertificateInfos{ - NotBefore: true, - Sans: true, - NotAfter: true, - Subject: &types.TLSCLientCertificateDNInfos{ - CommonName: true, - Country: true, - DomainComponent: true, - Locality: true, - Organization: true, - Province: true, - SerialNumber: true, - }, - Issuer: &types.TLSCLientCertificateDNInfos{ - CommonName: true, - Country: true, - DomainComponent: true, - Locality: true, - Organization: true, - Province: true, - SerialNumber: true, - }, - }, - }, - Auth: &types.Auth{ - HeaderField: "X-WebAuth-User", - Basic: &types.Basic{ - RemoveHeader: true, - Users: []string{"test:$apr1$H6uskkkW$IgXLP6ewTrSuBkTrqE8wj/", - "test2:$apr1$d9hr9HBB$4HxwgUir3HP4EsggP/QNo0"}, - UsersFile: ".htpasswd", - }, - }, - WhiteList: &types.WhiteList{ - SourceRange: []string{"10.10.10.10"}, - IPStrategy: &types.IPStrategy{ - Depth: 5, - ExcludedIPs: []string{"10.10.10.10", "10.10.10.11"}, - }, - }, - Headers: &types.Headers{ - CustomRequestHeaders: map[string]string{ - "Access-Control-Allow-Methods": "POST,GET,OPTIONS", - "Content-Type": "application/json; charset=utf-8", - }, - CustomResponseHeaders: map[string]string{ - "Access-Control-Allow-Methods": "POST,GET,OPTIONS", - "Content-Type": "application/json; charset=utf-8", - }, - AllowedHosts: []string{ - "foo", - "bar", - "bor", - }, - HostsProxyHeaders: []string{ - "foo", - "bar", - "bor", - }, - SSLRedirect: true, - SSLTemporaryRedirect: true, - SSLForceHost: true, - SSLHost: "foo", - SSLProxyHeaders: map[string]string{ - "Access-Control-Allow-Methods": "POST,GET,OPTIONS", - "Content-Type": "application/json; charset=utf-8", - }, - STSSeconds: 666, - STSIncludeSubdomains: true, - STSPreload: true, - ForceSTSHeader: true, - FrameDeny: true, - CustomFrameOptionsValue: "foo", - ContentTypeNosniff: true, - BrowserXSSFilter: true, - CustomBrowserXSSValue: "foo", - ContentSecurityPolicy: "foo", - PublicKey: "foo", - ReferrerPolicy: "foo", - IsDevelopment: true, - }, - Errors: map[string]*types.ErrorPage{ - "bar": { - Status: []string{ - "500", - "600", - }, - Backend: "backend-foobar", - Query: "bar_query", - }, - "foo": { - Status: []string{ - "404", - }, - Backend: "backend-foobar", - Query: "foo_query", - }, - }, - RateLimit: &types.RateLimit{ - RateSet: map[string]*types.Rate{ - "bar": { - Period: parse.Duration(3 * time.Second), - Average: 6, - Burst: 9, - }, - "foo": { - Period: parse.Duration(6 * time.Second), - Average: 12, - Burst: 18, - }, - }, - ExtractorFunc: "client.ip", - }, - Redirect: &types.Redirect{ - EntryPoint: "https", - Regex: "", - Replacement: "", - Permanent: true, - }, - }, - }, - }, - }, - { - desc: "Containers with same backend name", - instances: []ecsInstance{ - instance( - name("testing-instance-v1"), - ID("6"), - dockerLabels(map[string]*string{ - label.TraefikPort: aws.String("666"), - label.TraefikProtocol: aws.String("https"), - label.TraefikWeight: aws.String("12"), - - label.TraefikBackend: aws.String("foobar"), - - label.TraefikBackendCircuitBreakerExpression: aws.String("NetworkErrorRatio() > 0.5"), - label.TraefikBackendHealthCheckScheme: aws.String("http"), - label.TraefikBackendHealthCheckPath: aws.String("/health"), - label.TraefikBackendHealthCheckPort: aws.String("880"), - label.TraefikBackendHealthCheckInterval: aws.String("6"), - label.TraefikBackendHealthCheckTimeout: aws.String("3"), - label.TraefikBackendHealthCheckHostname: aws.String("foo.com"), - label.TraefikBackendHealthCheckHeaders: aws.String("Foo:bar || Bar:foo"), - label.TraefikBackendLoadBalancerMethod: aws.String("drr"), - label.TraefikBackendLoadBalancerStickiness: aws.String("true"), - label.TraefikBackendLoadBalancerStickinessCookieName: aws.String("chocolate"), - label.TraefikBackendMaxConnAmount: aws.String("666"), - label.TraefikBackendMaxConnExtractorFunc: aws.String("client.ip"), - label.TraefikBackendBufferingMaxResponseBodyBytes: aws.String("10485760"), - label.TraefikBackendBufferingMemResponseBodyBytes: aws.String("2097152"), - label.TraefikBackendBufferingMaxRequestBodyBytes: aws.String("10485760"), - label.TraefikBackendBufferingMemRequestBodyBytes: aws.String("2097152"), - label.TraefikBackendBufferingRetryExpression: aws.String("IsNetworkError() && Attempts() <= 2"), - - label.TraefikFrontendAuthBasicUsers: aws.String("test:$apr1$H6uskkkW$IgXLP6ewTrSuBkTrqE8wj/,test2:$apr1$d9hr9HBB$4HxwgUir3HP4EsggP/QNo0"), - label.TraefikFrontendEntryPoints: aws.String("http,https"), - label.TraefikFrontendPassHostHeader: aws.String("true"), - label.TraefikFrontendPassTLSCert: aws.String("true"), - label.TraefikFrontendPriority: aws.String("666"), - label.TraefikFrontendRedirectEntryPoint: aws.String("https"), - label.TraefikFrontendRedirectRegex: aws.String("nope"), - label.TraefikFrontendRedirectReplacement: aws.String("nope"), - label.TraefikFrontendRedirectPermanent: aws.String("true"), - label.TraefikFrontendRule: aws.String("Host:traefik.io"), - label.TraefikFrontendWhiteListSourceRange: aws.String("10.10.10.10"), - - label.TraefikFrontendRequestHeaders: aws.String("Access-Control-Allow-Methods:POST,GET,OPTIONS || Content-type: application/json; charset=utf-8"), - label.TraefikFrontendResponseHeaders: aws.String("Access-Control-Allow-Methods:POST,GET,OPTIONS || Content-type: application/json; charset=utf-8"), - label.TraefikFrontendSSLProxyHeaders: aws.String("Access-Control-Allow-Methods:POST,GET,OPTIONS || Content-type: application/json; charset=utf-8"), - label.TraefikFrontendAllowedHosts: aws.String("foo,bar,bor"), - label.TraefikFrontendHostsProxyHeaders: aws.String("foo,bar,bor"), - label.TraefikFrontendSSLHost: aws.String("foo"), - label.TraefikFrontendCustomFrameOptionsValue: aws.String("foo"), - label.TraefikFrontendContentSecurityPolicy: aws.String("foo"), - label.TraefikFrontendPublicKey: aws.String("foo"), - label.TraefikFrontendReferrerPolicy: aws.String("foo"), - label.TraefikFrontendCustomBrowserXSSValue: aws.String("foo"), - label.TraefikFrontendSTSSeconds: aws.String("666"), - label.TraefikFrontendSSLForceHost: aws.String("true"), - label.TraefikFrontendSSLRedirect: aws.String("true"), - label.TraefikFrontendSSLTemporaryRedirect: aws.String("true"), - label.TraefikFrontendSTSIncludeSubdomains: aws.String("true"), - label.TraefikFrontendSTSPreload: aws.String("true"), - label.TraefikFrontendForceSTSHeader: aws.String("true"), - label.TraefikFrontendFrameDeny: aws.String("true"), - label.TraefikFrontendContentTypeNosniff: aws.String("true"), - label.TraefikFrontendBrowserXSSFilter: aws.String("true"), - label.TraefikFrontendIsDevelopment: aws.String("true"), - - label.Prefix + label.BaseFrontendErrorPage + "foo." + label.SuffixErrorPageStatus: aws.String("404"), - label.Prefix + label.BaseFrontendErrorPage + "foo." + label.SuffixErrorPageBackend: aws.String("foobar"), - label.Prefix + label.BaseFrontendErrorPage + "foo." + label.SuffixErrorPageQuery: aws.String("foo_query"), - label.Prefix + label.BaseFrontendErrorPage + "bar." + label.SuffixErrorPageStatus: aws.String("500,600"), - label.Prefix + label.BaseFrontendErrorPage + "bar." + label.SuffixErrorPageBackend: aws.String("foobar"), - label.Prefix + label.BaseFrontendErrorPage + "bar." + label.SuffixErrorPageQuery: aws.String("bar_query"), - - label.TraefikFrontendRateLimitExtractorFunc: aws.String("client.ip"), - label.Prefix + label.BaseFrontendRateLimit + "foo." + label.SuffixRateLimitPeriod: aws.String("6"), - label.Prefix + label.BaseFrontendRateLimit + "foo." + label.SuffixRateLimitAverage: aws.String("12"), - label.Prefix + label.BaseFrontendRateLimit + "foo." + label.SuffixRateLimitBurst: aws.String("18"), - label.Prefix + label.BaseFrontendRateLimit + "bar." + label.SuffixRateLimitPeriod: aws.String("3"), - label.Prefix + label.BaseFrontendRateLimit + "bar." + label.SuffixRateLimitAverage: aws.String("6"), - label.Prefix + label.BaseFrontendRateLimit + "bar." + label.SuffixRateLimitBurst: aws.String("9"), - }), - iMachine( - mState(ec2.InstanceStateNameRunning), - mPrivateIP("10.0.0.1"), - mPorts( - mPort(0, 1337), - ), - ), - ), - instance( - name("testing-instance-v2"), - ID("6"), - dockerLabels(map[string]*string{ - label.TraefikPort: aws.String("555"), - label.TraefikProtocol: aws.String("https"), - label.TraefikWeight: aws.String("15"), - - label.TraefikBackend: aws.String("foobar"), - - label.TraefikBackendCircuitBreakerExpression: aws.String("NetworkErrorRatio() > 0.5"), - label.TraefikBackendHealthCheckScheme: aws.String("http"), - label.TraefikBackendHealthCheckPath: aws.String("/health"), - label.TraefikBackendHealthCheckPort: aws.String("880"), - label.TraefikBackendHealthCheckInterval: aws.String("6"), - label.TraefikBackendHealthCheckTimeout: aws.String("3"), - label.TraefikBackendHealthCheckHostname: aws.String("bar.com"), - label.TraefikBackendHealthCheckHeaders: aws.String("Foo:bar || Bar:foo"), - label.TraefikBackendLoadBalancerMethod: aws.String("drr"), - label.TraefikBackendLoadBalancerStickiness: aws.String("true"), - label.TraefikBackendLoadBalancerStickinessCookieName: aws.String("chocolate"), - label.TraefikBackendMaxConnAmount: aws.String("666"), - label.TraefikBackendMaxConnExtractorFunc: aws.String("client.ip"), - label.TraefikBackendBufferingMaxResponseBodyBytes: aws.String("10485760"), - label.TraefikBackendBufferingMemResponseBodyBytes: aws.String("2097152"), - label.TraefikBackendBufferingMaxRequestBodyBytes: aws.String("10485760"), - label.TraefikBackendBufferingMemRequestBodyBytes: aws.String("2097152"), - label.TraefikBackendBufferingRetryExpression: aws.String("IsNetworkError() && Attempts() <= 2"), - - label.TraefikFrontendAuthBasic: aws.String("test:$apr1$H6uskkkW$IgXLP6ewTrSuBkTrqE8wj/,test2:$apr1$d9hr9HBB$4HxwgUir3HP4EsggP/QNo0"), - label.TraefikFrontendEntryPoints: aws.String("http,https"), - label.TraefikFrontendPassHostHeader: aws.String("true"), - label.TraefikFrontendPassTLSCert: aws.String("true"), - label.TraefikFrontendPriority: aws.String("666"), - label.TraefikFrontendRedirectEntryPoint: aws.String("https"), - label.TraefikFrontendRedirectRegex: aws.String("nope"), - label.TraefikFrontendRedirectReplacement: aws.String("nope"), - label.TraefikFrontendRedirectPermanent: aws.String("true"), - label.TraefikFrontendRule: aws.String("Host:traefik.io"), - label.TraefikFrontendWhiteListSourceRange: aws.String("10.10.10.10"), - - label.TraefikFrontendRequestHeaders: aws.String("Access-Control-Allow-Methods:POST,GET,OPTIONS || Content-type: application/json; charset=utf-8"), - label.TraefikFrontendResponseHeaders: aws.String("Access-Control-Allow-Methods:POST,GET,OPTIONS || Content-type: application/json; charset=utf-8"), - label.TraefikFrontendSSLProxyHeaders: aws.String("Access-Control-Allow-Methods:POST,GET,OPTIONS || Content-type: application/json; charset=utf-8"), - label.TraefikFrontendAllowedHosts: aws.String("foo,bar,bor"), - label.TraefikFrontendHostsProxyHeaders: aws.String("foo,bar,bor"), - label.TraefikFrontendSSLHost: aws.String("foo"), - label.TraefikFrontendCustomFrameOptionsValue: aws.String("foo"), - label.TraefikFrontendContentSecurityPolicy: aws.String("foo"), - label.TraefikFrontendPublicKey: aws.String("foo"), - label.TraefikFrontendReferrerPolicy: aws.String("foo"), - label.TraefikFrontendCustomBrowserXSSValue: aws.String("foo"), - label.TraefikFrontendSTSSeconds: aws.String("666"), - label.TraefikFrontendSSLForceHost: aws.String("true"), - label.TraefikFrontendSSLRedirect: aws.String("true"), - label.TraefikFrontendSSLTemporaryRedirect: aws.String("true"), - label.TraefikFrontendSTSIncludeSubdomains: aws.String("true"), - label.TraefikFrontendSTSPreload: aws.String("true"), - label.TraefikFrontendForceSTSHeader: aws.String("true"), - label.TraefikFrontendFrameDeny: aws.String("true"), - label.TraefikFrontendContentTypeNosniff: aws.String("true"), - label.TraefikFrontendBrowserXSSFilter: aws.String("true"), - label.TraefikFrontendIsDevelopment: aws.String("true"), - - label.Prefix + label.BaseFrontendErrorPage + "foo." + label.SuffixErrorPageStatus: aws.String("404"), - label.Prefix + label.BaseFrontendErrorPage + "foo." + label.SuffixErrorPageBackend: aws.String("foobar"), - label.Prefix + label.BaseFrontendErrorPage + "foo." + label.SuffixErrorPageQuery: aws.String("foo_query"), - label.Prefix + label.BaseFrontendErrorPage + "bar." + label.SuffixErrorPageStatus: aws.String("500,600"), - label.Prefix + label.BaseFrontendErrorPage + "bar." + label.SuffixErrorPageBackend: aws.String("foobar"), - label.Prefix + label.BaseFrontendErrorPage + "bar." + label.SuffixErrorPageQuery: aws.String("bar_query"), - - label.TraefikFrontendRateLimitExtractorFunc: aws.String("client.ip"), - label.Prefix + label.BaseFrontendRateLimit + "foo." + label.SuffixRateLimitPeriod: aws.String("6"), - label.Prefix + label.BaseFrontendRateLimit + "foo." + label.SuffixRateLimitAverage: aws.String("12"), - label.Prefix + label.BaseFrontendRateLimit + "foo." + label.SuffixRateLimitBurst: aws.String("18"), - label.Prefix + label.BaseFrontendRateLimit + "bar." + label.SuffixRateLimitPeriod: aws.String("3"), - label.Prefix + label.BaseFrontendRateLimit + "bar." + label.SuffixRateLimitAverage: aws.String("6"), - label.Prefix + label.BaseFrontendRateLimit + "bar." + label.SuffixRateLimitBurst: aws.String("9"), - }), - iMachine( - mState(ec2.InstanceStateNameRunning), - mPrivateIP("10.2.2.1"), - mPorts( - mPort(0, 1337), - ), - ), - ), - }, - expected: &types.Configuration{ - Backends: map[string]*types.Backend{ - "backend-foobar": { - Servers: map[string]types.Server{ - "server-testing-instance-v1-6": { - URL: "https://10.0.0.1:666", - Weight: 12, - }, - "server-testing-instance-v2-6": { - URL: "https://10.2.2.1:555", - Weight: 15, - }, - }, - CircuitBreaker: &types.CircuitBreaker{ - Expression: "NetworkErrorRatio() > 0.5", - }, - LoadBalancer: &types.LoadBalancer{ - Method: "drr", - Stickiness: &types.Stickiness{ - CookieName: "chocolate", - }, - }, - MaxConn: &types.MaxConn{ - Amount: 666, - ExtractorFunc: "client.ip", - }, - HealthCheck: &types.HealthCheck{ - Scheme: "http", - Path: "/health", - Port: 880, - Interval: "6", - Timeout: "3", - Hostname: "foo.com", - Headers: map[string]string{ - "Foo": "bar", - "Bar": "foo", - }, - }, - Buffering: &types.Buffering{ - MaxResponseBodyBytes: 10485760, - MemResponseBodyBytes: 2097152, - MaxRequestBodyBytes: 10485760, - MemRequestBodyBytes: 2097152, - RetryExpression: "IsNetworkError() && Attempts() <= 2", - }, - }, - }, - Frontends: map[string]*types.Frontend{ - "frontend-foobar": { - EntryPoints: []string{ - "http", - "https", - }, - Backend: "backend-foobar", - Routes: map[string]types.Route{ - "route-frontend-foobar": { - Rule: "Host:traefik.io", - }, - }, - PassHostHeader: true, - PassTLSCert: true, - Priority: 666, - Auth: &types.Auth{ - Basic: &types.Basic{ - Users: []string{"test:$apr1$H6uskkkW$IgXLP6ewTrSuBkTrqE8wj/", - "test2:$apr1$d9hr9HBB$4HxwgUir3HP4EsggP/QNo0"}, - }, - }, - WhiteList: &types.WhiteList{ - SourceRange: []string{"10.10.10.10"}, - }, - Headers: &types.Headers{ - CustomRequestHeaders: map[string]string{ - "Access-Control-Allow-Methods": "POST,GET,OPTIONS", - "Content-Type": "application/json; charset=utf-8", - }, - CustomResponseHeaders: map[string]string{ - "Access-Control-Allow-Methods": "POST,GET,OPTIONS", - "Content-Type": "application/json; charset=utf-8", - }, - AllowedHosts: []string{ - "foo", - "bar", - "bor", - }, - HostsProxyHeaders: []string{ - "foo", - "bar", - "bor", - }, - SSLRedirect: true, - SSLTemporaryRedirect: true, - SSLForceHost: true, - SSLHost: "foo", - SSLProxyHeaders: map[string]string{ - "Access-Control-Allow-Methods": "POST,GET,OPTIONS", - "Content-Type": "application/json; charset=utf-8", - }, - STSSeconds: 666, - STSIncludeSubdomains: true, - STSPreload: true, - ForceSTSHeader: true, - FrameDeny: true, - CustomFrameOptionsValue: "foo", - ContentTypeNosniff: true, - BrowserXSSFilter: true, - CustomBrowserXSSValue: "foo", - ContentSecurityPolicy: "foo", - PublicKey: "foo", - ReferrerPolicy: "foo", - IsDevelopment: true, - }, - Errors: map[string]*types.ErrorPage{ - "bar": { - Status: []string{ - "500", - "600", - }, - Backend: "backend-foobar", - Query: "bar_query", - }, - "foo": { - Status: []string{ - "404", - }, - Backend: "backend-foobar", - Query: "foo_query", - }, - }, - RateLimit: &types.RateLimit{ - RateSet: map[string]*types.Rate{ - "bar": { - Period: parse.Duration(3 * time.Second), - Average: 6, - Burst: 9, - }, - "foo": { - Period: parse.Duration(6 * time.Second), - Average: 12, - Burst: 18, - }, - }, - ExtractorFunc: "client.ip", - }, - Redirect: &types.Redirect{ - EntryPoint: "https", - Regex: "", - Replacement: "", - Permanent: true, - }, - }, - }, - }, - }, - } - - for _, test := range testCases { - test := test - t.Run(test.desc, func(t *testing.T) { - t.Parallel() - - p := &Provider{ExposedByDefault: true} - - instances := fakeLoadTraefikLabels(test.instances) - - got, err := p.buildConfiguration(instances) - assert.Equal(t, test.err, err) // , err.Error() - assert.Equal(t, test.expected, got, test.desc) - }) - } -} - -func TestFilterInstance(t *testing.T) { - testCases := []struct { - desc string - instanceInfo ecsInstance - exposedByDefault bool - expected bool - constrain bool - }{ - { - desc: "Instance without enable label and exposed by default enabled should be not filtered", - instanceInfo: simpleEcsInstance(map[string]*string{}), - exposedByDefault: true, - expected: true, - }, - { - desc: "Instance without enable label and exposed by default disabled should be filtered", - instanceInfo: simpleEcsInstance(map[string]*string{}), - exposedByDefault: false, - expected: false, - }, - { - desc: "Instance with enable label set to false and exposed by default enabled should be filtered", - instanceInfo: simpleEcsInstance(map[string]*string{ - label.TraefikEnable: aws.String("false"), - }), - exposedByDefault: true, - expected: false, - }, - { - desc: "Instance with enable label set to true and exposed by default disabled should be not filtered", - instanceInfo: simpleEcsInstance(map[string]*string{ - label.TraefikEnable: aws.String("true"), - }), - exposedByDefault: false, - expected: true, - }, - { - desc: "Instance with empty private ip and exposed by default enabled should be filtered", - instanceInfo: func() ecsInstance { - nilPrivateIP := simpleEcsInstance(map[string]*string{}) - nilPrivateIP.machine.privateIP = "" - return nilPrivateIP - }(), - exposedByDefault: true, - expected: false, - }, - { - desc: "Instance with nil machine and exposed by default enabled should be filtered", - instanceInfo: func() ecsInstance { - nilMachine := simpleEcsInstance(map[string]*string{}) - nilMachine.machine = nil - return nilMachine - }(), - exposedByDefault: true, - expected: false, - }, - { - desc: "Instance with empty machine state and exposed by default enabled should be filtered", - instanceInfo: func() ecsInstance { - nilMachineState := simpleEcsInstance(map[string]*string{}) - nilMachineState.machine.state = "" - return nilMachineState - }(), - exposedByDefault: true, - expected: false, - }, - { - desc: "Instance with invalid machine state and exposed by default enabled should be filtered", - instanceInfo: func() ecsInstance { - invalidMachineState := simpleEcsInstance(map[string]*string{}) - invalidMachineState.machine.state = ec2.InstanceStateNameStopped - return invalidMachineState - }(), - exposedByDefault: true, - expected: false, - }, - { - desc: "Instance with no port mappings should be filtered", - instanceInfo: simpleEcsInstanceNoNetwork(map[string]*string{}), - exposedByDefault: true, - expected: false, - }, - { - desc: "Instance with no port mapping and with label should not be filtered", - instanceInfo: simpleEcsInstanceNoNetwork(map[string]*string{ - label.TraefikPort: aws.String("80"), - }), - exposedByDefault: true, - expected: true, - }, - { - desc: "Instance with failing constraint should be filtered", - instanceInfo: simpleEcsInstance(map[string]*string{ - label.TraefikTags: aws.String("private"), - }), - exposedByDefault: true, - expected: false, - constrain: true, - }, - { - desc: "Instance with passing constraint should not be filtered", - instanceInfo: simpleEcsInstance(map[string]*string{ - label.TraefikTags: aws.String("public"), - }), - exposedByDefault: true, - expected: true, - constrain: true, - }, - } - - for _, test := range testCases { - test := test - t.Run(test.desc, func(t *testing.T) { - t.Parallel() - - prov := &Provider{ - ExposedByDefault: test.exposedByDefault, - } - if test.constrain { - constraints := types.Constraints{} - assert.NoError(t, constraints.Set("tag==public")) - prov.Constraints = constraints - } - - actual := prov.filterInstance(test.instanceInfo) - assert.Equal(t, test.expected, actual) - }) - } -} - -func TestGetHost(t *testing.T) { - testCases := []struct { - desc string - expected string - instanceInfo ecsInstance - }{ - { - desc: "Default host should be 10.0.0.0", - expected: "10.0.0.0", - instanceInfo: simpleEcsInstance(map[string]*string{}), - }, - } - - for _, test := range testCases { - test := test - t.Run(test.desc, func(t *testing.T) { - t.Parallel() - - actual := getHost(test.instanceInfo) - assert.Equal(t, test.expected, actual) - }) - } -} - -func TestGetPort(t *testing.T) { - testCases := []struct { - desc string - expected string - instanceInfo ecsInstance - }{ - { - desc: "Default port should be 80", - expected: "80", - instanceInfo: simpleEcsInstance(map[string]*string{}), - }, - { - desc: "Label should override network port", - expected: "4242", - instanceInfo: simpleEcsInstance(map[string]*string{ - label.TraefikPort: aws.String("4242"), - }), - }, - { - desc: "Label should provide exposed port", - expected: "80", - instanceInfo: simpleEcsInstanceNoNetwork(map[string]*string{ - label.TraefikPort: aws.String("80"), - }), - }, - { - desc: "Container label should provide exposed port", - expected: "6536", - instanceInfo: simpleEcsInstanceDynamicPorts(map[string]*string{ - label.TraefikPort: aws.String("8080"), - }), - }, - { - desc: "Wrong port container label should provide default exposed port", - expected: "9000", - instanceInfo: simpleEcsInstanceDynamicPorts(map[string]*string{ - label.TraefikPort: aws.String("9000"), - }), - }, - { - desc: "Invalid port container label should provide default exposed port", - expected: "6535", - instanceInfo: simpleEcsInstanceDynamicPorts(map[string]*string{ - label.TraefikPort: aws.String("foo"), - }), - }, - } - - for _, test := range testCases { - test := test - t.Run(test.desc, func(t *testing.T) { - t.Parallel() - - actual := getPort(test.instanceInfo) - assert.Equal(t, test.expected, actual) - }) - } -} - -func makeEcsInstance(containerDef *ecs.ContainerDefinition) ecsInstance { - container := &ecs.Container{ - Name: containerDef.Name, - NetworkBindings: make([]*ecs.NetworkBinding, len(containerDef.PortMappings)), - } - - for i, pm := range containerDef.PortMappings { - container.NetworkBindings[i] = &ecs.NetworkBinding{ - HostPort: pm.HostPort, - ContainerPort: pm.ContainerPort, - Protocol: pm.Protocol, - BindIP: aws.String("0.0.0.0"), - } - } - - instance := ecsInstance{ - Name: "foo-http", - ID: "123456789abc", - containerDefinition: containerDef, - machine: &machine{ - state: ec2.InstanceStateNameRunning, - privateIP: "10.0.0.0", - ports: []portMapping{{hostPort: 1337}}, - }, - } - - if containerDef != nil { - instance.TraefikLabels = aws.StringValueMap(containerDef.DockerLabels) - } - - return instance -} - -func simpleEcsInstance(labels map[string]*string) ecsInstance { - instance := makeEcsInstance(&ecs.ContainerDefinition{ - Name: aws.String("http"), - DockerLabels: labels, - }) - instance.machine.ports = []portMapping{{hostPort: 80}} - return instance -} - -func simpleEcsInstanceNoNetwork(labels map[string]*string) ecsInstance { - instance := makeEcsInstance(&ecs.ContainerDefinition{ - Name: aws.String("http"), - DockerLabels: labels, - }) - instance.machine.ports = []portMapping{} - return instance -} - -func simpleEcsInstanceDynamicPorts(labels map[string]*string) ecsInstance { - instance := makeEcsInstance(&ecs.ContainerDefinition{ - Name: aws.String("http"), - DockerLabels: labels, - }) - instance.machine.ports = []portMapping{ - { - containerPort: 80, - hostPort: 6535, - }, - { - containerPort: 8080, - hostPort: 6536, - }, - } - return instance -} - -func fakeLoadTraefikLabels(instances []ecsInstance) []ecsInstance { - var result []ecsInstance - for _, instance := range instances { - instance.TraefikLabels = aws.StringValueMap(instance.containerDefinition.DockerLabels) - result = append(result, instance) - } - return result -} diff --git a/old/provider/ecs/ecs.go b/old/provider/ecs/ecs.go deleted file mode 100644 index f6e9d5ee0..000000000 --- a/old/provider/ecs/ecs.go +++ /dev/null @@ -1,442 +0,0 @@ -package ecs - -import ( - "context" - "fmt" - "strings" - "time" - - "github.com/aws/aws-sdk-go/aws" - "github.com/aws/aws-sdk-go/aws/credentials" - "github.com/aws/aws-sdk-go/aws/defaults" - "github.com/aws/aws-sdk-go/aws/ec2metadata" - "github.com/aws/aws-sdk-go/aws/session" - "github.com/aws/aws-sdk-go/service/ec2" - "github.com/aws/aws-sdk-go/service/ecs" - "github.com/cenkalti/backoff" - "github.com/containous/traefik/old/log" - "github.com/containous/traefik/old/provider" - "github.com/containous/traefik/old/types" - "github.com/containous/traefik/pkg/job" - "github.com/containous/traefik/pkg/safe" - "github.com/patrickmn/go-cache" -) - -var _ provider.Provider = (*Provider)(nil) -var existingTaskDefCache = cache.New(30*time.Minute, 5*time.Minute) - -// Provider holds configurations of the provider. -type Provider struct { - provider.BaseProvider `mapstructure:",squash" export:"true"` - - Domain string `description:"Default domain used"` - ExposedByDefault bool `description:"Expose containers by default" export:"true"` - RefreshSeconds int `description:"Polling interval (in seconds)" export:"true"` - - // Provider lookup parameters - Clusters Clusters `description:"ECS Clusters name"` - AutoDiscoverClusters bool `description:"Auto discover cluster" export:"true"` - Region string `description:"The AWS region to use for requests" export:"true"` - AccessKeyID string `description:"The AWS credentials access key to use for making requests"` - SecretAccessKey string `description:"The AWS credentials access key to use for making requests"` -} - -type ecsInstance struct { - Name string - ID string - containerDefinition *ecs.ContainerDefinition - machine *machine - TraefikLabels map[string]string - SegmentLabels map[string]string - SegmentName string -} - -type portMapping struct { - containerPort int64 - hostPort int64 -} - -type machine struct { - state string - privateIP string - ports []portMapping -} - -type awsClient struct { - ecs *ecs.ECS - ec2 *ec2.EC2 -} - -// Init the provider -func (p *Provider) Init(constraints types.Constraints) error { - return p.BaseProvider.Init(constraints) -} - -func (p *Provider) createClient() (*awsClient, error) { - sess, err := session.NewSession() - if err != nil { - return nil, err - } - - ec2meta := ec2metadata.New(sess) - if p.Region == "" { - log.Infoln("No EC2 region provided, querying instance metadata endpoint...") - identity, err := ec2meta.GetInstanceIdentityDocument() - if err != nil { - return nil, err - } - p.Region = identity.Region - } - - cfg := &aws.Config{ - Region: &p.Region, - Credentials: credentials.NewChainCredentials( - []credentials.Provider{ - &credentials.StaticProvider{ - Value: credentials.Value{ - AccessKeyID: p.AccessKeyID, - SecretAccessKey: p.SecretAccessKey, - }, - }, - &credentials.EnvProvider{}, - &credentials.SharedCredentialsProvider{}, - defaults.RemoteCredProvider(*(defaults.Config()), defaults.Handlers()), - }), - } - - if p.Trace { - cfg.WithLogger(aws.LoggerFunc(func(args ...interface{}) { - log.Debug(args...) - })) - } - - return &awsClient{ - ecs: ecs.New(sess, cfg), - ec2: ec2.New(sess, cfg), - }, nil -} - -// Provide allows the ecs provider to provide configurations to traefik -// using the given configuration channel. -func (p *Provider) Provide(configurationChan chan<- types.ConfigMessage, pool *safe.Pool) error { - handleCanceled := func(ctx context.Context, err error) error { - if ctx.Err() == context.Canceled || err == context.Canceled { - return nil - } - return err - } - - pool.Go(func(stop chan bool) { - ctx, cancel := context.WithCancel(context.Background()) - safe.Go(func() { - <-stop - cancel() - }) - - operation := func() error { - awsClient, err := p.createClient() - if err != nil { - return err - } - - configuration, err := p.loadECSConfig(ctx, awsClient) - if err != nil { - return handleCanceled(ctx, err) - } - - configurationChan <- types.ConfigMessage{ - ProviderName: "ecs", - Configuration: configuration, - } - - if p.Watch { - reload := time.NewTicker(time.Second * time.Duration(p.RefreshSeconds)) - defer reload.Stop() - for { - select { - case <-reload.C: - configuration, err := p.loadECSConfig(ctx, awsClient) - if err != nil { - return handleCanceled(ctx, err) - } - - configurationChan <- types.ConfigMessage{ - ProviderName: "ecs", - Configuration: configuration, - } - case <-ctx.Done(): - return handleCanceled(ctx, ctx.Err()) - } - } - } - - return nil - } - - notify := func(err error, time time.Duration) { - log.Errorf("Provider connection error %+v, retrying in %s", err, time) - } - err := backoff.RetryNotify(safe.OperationWithRecover(operation), job.NewBackOff(backoff.NewExponentialBackOff()), notify) - if err != nil { - log.Errorf("Cannot connect to Provider api %+v", err) - } - }) - - return nil -} - -// Find all running Provider tasks in a cluster, also collect the task definitions (for docker labels) -// and the EC2 instance data -func (p *Provider) listInstances(ctx context.Context, client *awsClient) ([]ecsInstance, error) { - var clustersArn []*string - var clusters Clusters - - if p.AutoDiscoverClusters { - input := &ecs.ListClustersInput{} - for { - result, err := client.ecs.ListClusters(input) - if err != nil { - return nil, err - } - if result != nil { - clustersArn = append(clustersArn, result.ClusterArns...) - input.NextToken = result.NextToken - if result.NextToken == nil { - break - } - } else { - break - } - } - for _, cArn := range clustersArn { - clusters = append(clusters, *cArn) - } - } else { - clusters = p.Clusters - } - - var instances []ecsInstance - - log.Debugf("ECS Clusters: %s", clusters) - - for _, c := range clusters { - - input := &ecs.ListTasksInput{ - Cluster: &c, - DesiredStatus: aws.String(ecs.DesiredStatusRunning), - } - tasks := make(map[string]*ecs.Task) - err := client.ecs.ListTasksPagesWithContext(ctx, input, func(page *ecs.ListTasksOutput, lastPage bool) bool { - if len(page.TaskArns) > 0 { - resp, err := client.ecs.DescribeTasksWithContext(ctx, &ecs.DescribeTasksInput{ - Tasks: page.TaskArns, - Cluster: &c, - }) - if err != nil { - log.Errorf("Unable to describe tasks for %v", page.TaskArns) - } else { - for _, t := range resp.Tasks { - if aws.StringValue(t.LastStatus) == ecs.DesiredStatusRunning { - tasks[aws.StringValue(t.TaskArn)] = t - } - } - } - } - return !lastPage - }) - - if err != nil { - log.Error("Unable to list tasks") - return nil, err - } - - // Skip to the next cluster if there are no tasks found on - // this cluster. - if len(tasks) == 0 { - continue - } - - ec2Instances, err := p.lookupEc2Instances(ctx, client, &c, tasks) - if err != nil { - return nil, err - } - - taskDefinitions, err := p.lookupTaskDefinitions(ctx, client, tasks) - if err != nil { - return nil, err - } - - for key, task := range tasks { - - containerInstance := ec2Instances[aws.StringValue(task.ContainerInstanceArn)] - taskDef := taskDefinitions[key] - - for _, container := range task.Containers { - - var containerDefinition *ecs.ContainerDefinition - for _, def := range taskDef.ContainerDefinitions { - if aws.StringValue(container.Name) == aws.StringValue(def.Name) { - containerDefinition = def - break - } - } - - if containerDefinition == nil { - log.Debugf("Unable to find container definition for %s", aws.StringValue(container.Name)) - continue - } - - var mach *machine - if len(task.Attachments) != 0 { - var ports []portMapping - for _, mapping := range containerDefinition.PortMappings { - if mapping != nil { - ports = append(ports, portMapping{ - hostPort: aws.Int64Value(mapping.HostPort), - containerPort: aws.Int64Value(mapping.ContainerPort), - }) - } - } - mach = &machine{ - privateIP: aws.StringValue(container.NetworkInterfaces[0].PrivateIpv4Address), - ports: ports, - state: aws.StringValue(task.LastStatus), - } - } else { - var ports []portMapping - for _, mapping := range container.NetworkBindings { - if mapping != nil { - ports = append(ports, portMapping{ - hostPort: aws.Int64Value(mapping.HostPort), - containerPort: aws.Int64Value(mapping.ContainerPort), - }) - } - } - mach = &machine{ - privateIP: aws.StringValue(containerInstance.PrivateIpAddress), - ports: ports, - state: aws.StringValue(containerInstance.State.Name), - } - } - - instances = append(instances, ecsInstance{ - Name: fmt.Sprintf("%s-%s", strings.Replace(aws.StringValue(task.Group), ":", "-", 1), *container.Name), - ID: key[len(key)-12:], - containerDefinition: containerDefinition, - machine: mach, - TraefikLabels: aws.StringValueMap(containerDefinition.DockerLabels), - }) - } - } - } - - return instances, nil -} - -func (p *Provider) lookupEc2Instances(ctx context.Context, client *awsClient, clusterName *string, ecsDatas map[string]*ecs.Task) (map[string]*ec2.Instance, error) { - - instanceIds := make(map[string]string) - ec2Instances := make(map[string]*ec2.Instance) - - var containerInstancesArns []*string - var instanceArns []*string - - for _, task := range ecsDatas { - if task.ContainerInstanceArn != nil { - containerInstancesArns = append(containerInstancesArns, task.ContainerInstanceArn) - } - } - - for _, arns := range p.chunkIDs(containerInstancesArns) { - resp, err := client.ecs.DescribeContainerInstancesWithContext(ctx, &ecs.DescribeContainerInstancesInput{ - ContainerInstances: arns, - Cluster: clusterName, - }) - - if err != nil { - log.Errorf("Unable to describe container instances: %v", err) - return nil, err - } - - for _, container := range resp.ContainerInstances { - instanceIds[aws.StringValue(container.Ec2InstanceId)] = aws.StringValue(container.ContainerInstanceArn) - instanceArns = append(instanceArns, container.Ec2InstanceId) - } - } - - if len(instanceArns) > 0 { - for _, ids := range p.chunkIDs(instanceArns) { - input := &ec2.DescribeInstancesInput{ - InstanceIds: ids, - } - - err := client.ec2.DescribeInstancesPagesWithContext(ctx, input, func(page *ec2.DescribeInstancesOutput, lastPage bool) bool { - if len(page.Reservations) > 0 { - for _, r := range page.Reservations { - for _, i := range r.Instances { - if i.InstanceId != nil { - ec2Instances[instanceIds[aws.StringValue(i.InstanceId)]] = i - } - } - } - } - return !lastPage - }) - - if err != nil { - log.Errorf("Unable to describe instances: %v", err) - return nil, err - } - } - } - - return ec2Instances, nil -} - -func (p *Provider) lookupTaskDefinitions(ctx context.Context, client *awsClient, taskDefArns map[string]*ecs.Task) (map[string]*ecs.TaskDefinition, error) { - taskDef := make(map[string]*ecs.TaskDefinition) - for arn, task := range taskDefArns { - if definition, ok := existingTaskDefCache.Get(arn); ok { - taskDef[arn] = definition.(*ecs.TaskDefinition) - log.Debugf("Found cached task definition for %s. Skipping the call", arn) - } else { - resp, err := client.ecs.DescribeTaskDefinitionWithContext(ctx, &ecs.DescribeTaskDefinitionInput{ - TaskDefinition: task.TaskDefinitionArn, - }) - - if err != nil { - log.Errorf("Unable to describe task definition: %s", err) - return nil, err - } - - taskDef[arn] = resp.TaskDefinition - existingTaskDefCache.Set(arn, resp.TaskDefinition, cache.DefaultExpiration) - } - } - return taskDef, nil -} - -func (p *Provider) loadECSConfig(ctx context.Context, client *awsClient) (*types.Configuration, error) { - instances, err := p.listInstances(ctx, client) - if err != nil { - return nil, err - } - - return p.buildConfiguration(instances) -} - -// chunkIDs ECS expects no more than 100 parameters be passed to a API call; -// thus, pack each string into an array capped at 100 elements -func (p *Provider) chunkIDs(ids []*string) [][]*string { - var chuncked [][]*string - for i := 0; i < len(ids); i += 100 { - var sliceEnd int - if i+100 < len(ids) { - sliceEnd = i + 100 - } else { - sliceEnd = len(ids) - } - chuncked = append(chuncked, ids[i:sliceEnd]) - } - return chuncked -} diff --git a/old/provider/ecs/ecs_test.go b/old/provider/ecs/ecs_test.go deleted file mode 100644 index f55510305..000000000 --- a/old/provider/ecs/ecs_test.go +++ /dev/null @@ -1,88 +0,0 @@ -package ecs - -import ( - "testing" - - "github.com/aws/aws-sdk-go/aws" - "github.com/stretchr/testify/assert" -) - -func TestChunkIDs(t *testing.T) { - provider := &Provider{} - - testCases := []struct { - desc string - count int - expected []int - }{ - { - desc: "0 element", - count: 0, - expected: []int(nil), - }, - { - desc: "1 element", - count: 1, - expected: []int{1}, - }, - { - desc: "99 elements, 1 chunk", - count: 99, - expected: []int{99}, - }, - { - desc: "100 elements, 1 chunk", - count: 100, - expected: []int{100}, - }, - { - desc: "101 elements, 2 chunks", - count: 101, - expected: []int{100, 1}, - }, - { - desc: "199 elements, 2 chunks", - count: 199, - expected: []int{100, 99}, - }, - { - desc: "200 elements, 2 chunks", - count: 200, - expected: []int{100, 100}, - }, - { - desc: "201 elements, 3 chunks", - count: 201, - expected: []int{100, 100, 1}, - }, - { - desc: "555 elements, 5 chunks", - count: 555, - expected: []int{100, 100, 100, 100, 100, 55}, - }, - { - desc: "1001 elements, 11 chunks", - count: 1001, - expected: []int{100, 100, 100, 100, 100, 100, 100, 100, 100, 100, 1}, - }, - } - - for _, test := range testCases { - test := test - t.Run(test.desc, func(t *testing.T) { - t.Parallel() - - var IDs []*string - for v := 0; v < test.count; v++ { - IDs = append(IDs, aws.String("a")) - } - - var outCount []int - for _, el := range provider.chunkIDs(IDs) { - outCount = append(outCount, len(el)) - } - - assert.Equal(t, test.expected, outCount) - }) - } -} diff --git a/old/provider/etcd/etcd.go b/old/provider/etcd/etcd.go deleted file mode 100644 index bf045ff0e..000000000 --- a/old/provider/etcd/etcd.go +++ /dev/null @@ -1,48 +0,0 @@ -package etcd - -import ( - "fmt" - - "github.com/abronan/valkeyrie/store" - etcdv3 "github.com/abronan/valkeyrie/store/etcd/v3" - "github.com/containous/traefik/old/provider" - "github.com/containous/traefik/old/provider/kv" - "github.com/containous/traefik/old/types" - "github.com/containous/traefik/pkg/safe" -) - -var _ provider.Provider = (*Provider)(nil) - -// Provider holds configurations of the provider. -type Provider struct { - kv.Provider `mapstructure:",squash" export:"true"` -} - -// Init the provider -func (p *Provider) Init(constraints types.Constraints) error { - err := p.Provider.Init(constraints) - if err != nil { - return err - } - - store, err := p.CreateStore() - if err != nil { - return fmt.Errorf("failed to Connect to KV store: %v", err) - } - - p.SetKVClient(store) - return nil -} - -// Provide allows the etcd provider to Provide configurations to traefik -// using the given configuration channel. -func (p *Provider) Provide(configurationChan chan<- types.ConfigMessage, pool *safe.Pool) error { - return p.Provider.Provide(configurationChan, pool) -} - -// CreateStore creates the KV store -func (p *Provider) CreateStore() (store.Store, error) { - etcdv3.Register() - p.SetStoreType(store.ETCDV3) - return p.Provider.CreateStore() -} diff --git a/old/provider/eureka/config.go b/old/provider/eureka/config.go deleted file mode 100644 index 7f1a71494..000000000 --- a/old/provider/eureka/config.go +++ /dev/null @@ -1,57 +0,0 @@ -package eureka - -import ( - "strconv" - "text/template" - - "github.com/ArthurHlt/go-eureka-client/eureka" - "github.com/containous/traefik/old/log" - "github.com/containous/traefik/old/provider" - "github.com/containous/traefik/old/provider/label" - "github.com/containous/traefik/old/types" -) - -// Build the configuration from Provider server -func (p *Provider) buildConfiguration(apps *eureka.Applications) (*types.Configuration, error) { - var eurekaFuncMap = template.FuncMap{ - "getPort": getPort, - "getProtocol": getProtocol, - "getWeight": getWeight, - "getInstanceID": getInstanceID, - } - - templateObjects := struct { - Applications []eureka.Application - }{ - Applications: apps.Applications, - } - - configuration, err := p.GetConfiguration("templates/eureka.tmpl", eurekaFuncMap, templateObjects) - if err != nil { - log.Error(err) - } - return configuration, nil -} - -func getInstanceID(instance eureka.InstanceInfo) string { - defaultID := provider.Normalize(instance.IpAddr) + "-" + getPort(instance) - return label.GetStringValue(instance.Metadata.Map, label.TraefikBackendID, defaultID) -} - -func getPort(instance eureka.InstanceInfo) string { - if instance.SecurePort.Enabled { - return strconv.Itoa(instance.SecurePort.Port) - } - return strconv.Itoa(instance.Port.Port) -} - -func getProtocol(instance eureka.InstanceInfo) string { - if instance.SecurePort.Enabled { - return "https" - } - return label.DefaultProtocol -} - -func getWeight(instance eureka.InstanceInfo) int { - return label.GetIntValue(instance.Metadata.Map, label.TraefikWeight, label.DefaultWeight) -} diff --git a/old/provider/eureka/config_test.go b/old/provider/eureka/config_test.go deleted file mode 100644 index 101b4ad17..000000000 --- a/old/provider/eureka/config_test.go +++ /dev/null @@ -1,182 +0,0 @@ -package eureka - -import ( - "strconv" - "testing" - - "github.com/ArthurHlt/go-eureka-client/eureka" - "github.com/containous/traefik/old/provider/label" - "github.com/stretchr/testify/assert" -) - -func TestGetPort(t *testing.T) { - testCases := []struct { - expectedPort string - instanceInfo eureka.InstanceInfo - }{ - { - expectedPort: "80", - instanceInfo: eureka.InstanceInfo{ - SecurePort: &eureka.Port{ - Port: 443, Enabled: false, - }, - Port: &eureka.Port{ - Port: 80, Enabled: true, - }, - }, - }, - { - expectedPort: "443", - instanceInfo: eureka.InstanceInfo{ - SecurePort: &eureka.Port{ - Port: 443, Enabled: true, - }, - Port: &eureka.Port{ - Port: 80, Enabled: false, - }, - }, - }, - } - - for i, test := range testCases { - test := test - t.Run(strconv.Itoa(i), func(t *testing.T) { - t.Parallel() - - port := getPort(test.instanceInfo) - assert.Equal(t, test.expectedPort, port) - }) - } -} - -func TestGetProtocol(t *testing.T) { - testCases := []struct { - expectedProtocol string - instanceInfo eureka.InstanceInfo - }{ - { - expectedProtocol: "http", - instanceInfo: eureka.InstanceInfo{ - SecurePort: &eureka.Port{ - Port: 443, Enabled: false, - }, - Port: &eureka.Port{ - Port: 80, Enabled: true, - }, - }, - }, - { - expectedProtocol: "https", - instanceInfo: eureka.InstanceInfo{ - SecurePort: &eureka.Port{ - Port: 443, Enabled: true, - }, - Port: &eureka.Port{ - Port: 80, Enabled: false, - }, - }, - }, - } - - for i, test := range testCases { - test := test - t.Run(strconv.Itoa(i), func(t *testing.T) { - t.Parallel() - - protocol := getProtocol(test.instanceInfo) - assert.Equal(t, test.expectedProtocol, protocol) - }) - } -} - -func TestGetWeight(t *testing.T) { - testCases := []struct { - expectedWeight int - instanceInfo eureka.InstanceInfo - }{ - { - expectedWeight: label.DefaultWeight, - instanceInfo: eureka.InstanceInfo{ - Port: &eureka.Port{ - Port: 80, Enabled: true, - }, - Metadata: &eureka.MetaData{ - Map: map[string]string{}, - }, - }, - }, - { - expectedWeight: 10, - instanceInfo: eureka.InstanceInfo{ - Port: &eureka.Port{ - Port: 80, Enabled: true, - }, - Metadata: &eureka.MetaData{ - Map: map[string]string{ - label.TraefikWeight: "10", - }, - }, - }, - }, - } - - for i, test := range testCases { - test := test - t.Run(strconv.Itoa(i), func(t *testing.T) { - t.Parallel() - - weight := getWeight(test.instanceInfo) - assert.Equal(t, test.expectedWeight, weight) - }) - } -} - -func TestGetInstanceId(t *testing.T) { - testCases := []struct { - expectedID string - instanceInfo eureka.InstanceInfo - }{ - { - expectedID: "MyInstanceId", - instanceInfo: eureka.InstanceInfo{ - IpAddr: "10.11.12.13", - SecurePort: &eureka.Port{ - Port: 443, Enabled: false, - }, - Port: &eureka.Port{ - Port: 80, Enabled: true, - }, - Metadata: &eureka.MetaData{ - Map: map[string]string{ - label.TraefikBackendID: "MyInstanceId", - }, - }, - }, - }, - { - expectedID: "10-11-12-13-80", - instanceInfo: eureka.InstanceInfo{ - IpAddr: "10.11.12.13", - SecurePort: &eureka.Port{ - Port: 443, Enabled: false, - }, - Port: &eureka.Port{ - Port: 80, Enabled: true, - }, - Metadata: &eureka.MetaData{ - Map: map[string]string{}, - }, - }, - }, - } - - for i, test := range testCases { - test := test - t.Run(strconv.Itoa(i), func(t *testing.T) { - t.Parallel() - - id := getInstanceID(test.instanceInfo) - assert.Equal(t, test.expectedID, id) - }) - } -} diff --git a/old/provider/eureka/eureka.go b/old/provider/eureka/eureka.go deleted file mode 100644 index fb7211b04..000000000 --- a/old/provider/eureka/eureka.go +++ /dev/null @@ -1,92 +0,0 @@ -package eureka - -import ( - "io/ioutil" - "time" - - "github.com/ArthurHlt/go-eureka-client/eureka" - "github.com/cenkalti/backoff" - "github.com/containous/flaeg/parse" - "github.com/containous/traefik/old/log" - "github.com/containous/traefik/old/provider" - "github.com/containous/traefik/old/types" - "github.com/containous/traefik/pkg/job" - "github.com/containous/traefik/pkg/safe" -) - -// Provider holds configuration of the Provider provider. -type Provider struct { - provider.BaseProvider `mapstructure:",squash" export:"true"` - Endpoint string `description:"Eureka server endpoint"` - RefreshSeconds parse.Duration `description:"Override default configuration time between refresh" export:"true"` -} - -// Init the provider -func (p *Provider) Init(constraints types.Constraints) error { - return p.BaseProvider.Init(constraints) -} - -// Provide allows the eureka provider to provide configurations to traefik -// using the given configuration channel. -func (p *Provider) Provide(configurationChan chan<- types.ConfigMessage, pool *safe.Pool) error { - eureka.GetLogger().SetOutput(ioutil.Discard) - - operation := func() error { - client := eureka.NewClient([]string{p.Endpoint}) - - applications, err := client.GetApplications() - if err != nil { - log.Errorf("Failed to retrieve applications, error: %s", err) - return err - } - - configuration, err := p.buildConfiguration(applications) - if err != nil { - log.Errorf("Failed to build configuration for Provider, error: %s", err) - return err - } - - configurationChan <- types.ConfigMessage{ - ProviderName: "eureka", - Configuration: configuration, - } - - ticker := time.NewTicker(time.Duration(p.RefreshSeconds)) - pool.Go(func(stop chan bool) { - for { - select { - case t := <-ticker.C: - log.Debugf("Refreshing Provider %s", t.String()) - applications, err := client.GetApplications() - if err != nil { - log.Errorf("Failed to retrieve applications, error: %s", err) - continue - } - configuration, err := p.buildConfiguration(applications) - if err != nil { - log.Errorf("Failed to refresh Provider configuration, error: %s", err) - continue - } - configurationChan <- types.ConfigMessage{ - ProviderName: "eureka", - Configuration: configuration, - } - case <-stop: - return - } - } - }) - return nil - } - - err := backoff.RetryNotify(operation, job.NewBackOff(backoff.NewExponentialBackOff()), notify) - if err != nil { - log.Errorf("Cannot connect to Provider server %+v", err) - return err - } - return nil -} - -func notify(err error, time time.Duration) { - log.Errorf("Provider connection error %+v, retrying in %s", err, time) -} diff --git a/old/provider/kv/filler_test.go b/old/provider/kv/filler_test.go deleted file mode 100644 index 3660b23c9..000000000 --- a/old/provider/kv/filler_test.go +++ /dev/null @@ -1,203 +0,0 @@ -package kv - -import ( - "sort" - "strconv" - "strings" - "testing" - - "github.com/abronan/valkeyrie/store" - "github.com/stretchr/testify/assert" -) - -type ByKey []*store.KVPair - -func (a ByKey) Len() int { return len(a) } -func (a ByKey) Swap(i, j int) { a[i], a[j] = a[j], a[i] } -func (a ByKey) Less(i, j int) bool { return a[i].Key < a[j].Key } - -func filler(prefix string, opts ...func(string, map[string]*store.KVPair)) []*store.KVPair { - buf := make(map[string]*store.KVPair) - for _, opt := range opts { - opt(prefix, buf) - } - - var result ByKey - for _, value := range buf { - result = append(result, value) - } - - sort.Sort(result) - return result -} - -func backend(name string, opts ...func(map[string]string)) func(string, map[string]*store.KVPair) { - return entry(pathBackends+name, opts...) -} - -func frontend(name string, opts ...func(map[string]string)) func(string, map[string]*store.KVPair) { - return entry(pathFrontends+name, opts...) -} - -func entry(root string, opts ...func(map[string]string)) func(string, map[string]*store.KVPair) { - return func(prefix string, pairs map[string]*store.KVPair) { - prefixedRoot := prefix + pathSeparator + strings.TrimPrefix(root, pathSeparator) - pairs[prefixedRoot] = &store.KVPair{Key: prefixedRoot, Value: []byte("")} - - transit := make(map[string]string) - for _, opt := range opts { - opt(transit) - } - - for key, value := range transit { - fill(pairs, prefixedRoot, key, value) - } - } -} - -func fill(pairs map[string]*store.KVPair, previous string, current string, value string) { - clean := strings.TrimPrefix(current, pathSeparator) - - i := strings.IndexRune(clean, '/') - if i > 0 { - key := previous + pathSeparator + clean[:i] - - if _, ok := pairs[key]; !ok || len(pairs[key].Value) == 0 { - pairs[key] = &store.KVPair{Key: key, Value: []byte("")} - } - - fill(pairs, key, clean[i:], value) - } - - key := previous + pathSeparator + clean - pairs[key] = &store.KVPair{Key: key, Value: []byte(value)} -} - -func withPair(key string, value string) func(map[string]string) { - return func(pairs map[string]string) { - if len(key) == 0 { - return - } - pairs[key] = value - } -} - -func withList(key string, values ...string) func(map[string]string) { - return func(pairs map[string]string) { - if len(key) == 0 { - return - } - for i, value := range values { - pairs[key+"/"+strconv.Itoa(i)] = value - } - } -} - -func withErrorPage(name string, backend string, query string, statuses ...string) func(map[string]string) { - return func(pairs map[string]string) { - if len(name) == 0 { - return - } - - withPair(pathFrontendErrorPages+name+pathFrontendErrorPagesBackend, backend)(pairs) - withPair(pathFrontendErrorPages+name+pathFrontendErrorPagesQuery, query)(pairs) - withList(pathFrontendErrorPages+name+pathFrontendErrorPagesStatus, statuses...)(pairs) - } -} - -func withRateLimit(extractorFunc string, opts ...func(map[string]string)) func(map[string]string) { - return func(pairs map[string]string) { - pairs[pathFrontendRateLimitExtractorFunc] = extractorFunc - for _, opt := range opts { - opt(pairs) - } - } -} - -func withLimit(name string, average, burst, period string) func(map[string]string) { - return func(pairs map[string]string) { - pairs[pathFrontendRateLimitRateSet+name+pathFrontendRateLimitAverage] = average - pairs[pathFrontendRateLimitRateSet+name+pathFrontendRateLimitBurst] = burst - pairs[pathFrontendRateLimitRateSet+name+pathFrontendRateLimitPeriod] = period - } -} - -func TestFiller(t *testing.T) { - expected := []*store.KVPair{ - {Key: "traefik/backends/backend.with.dot.too", Value: []byte("")}, - {Key: "traefik/backends/backend.with.dot.too/servers", Value: []byte("")}, - {Key: "traefik/backends/backend.with.dot.too/servers/server.with.dot", Value: []byte("")}, - {Key: "traefik/backends/backend.with.dot.too/servers/server.with.dot.without.url", Value: []byte("")}, - {Key: "traefik/backends/backend.with.dot.too/servers/server.with.dot.without.url/weight", Value: []byte("1")}, - {Key: "traefik/backends/backend.with.dot.too/servers/server.with.dot/url", Value: []byte("http://172.17.0.2:80")}, - {Key: "traefik/backends/backend.with.dot.too/servers/server.with.dot/weight", Value: []byte("1")}, - {Key: "traefik/frontends/frontend.with.dot", Value: []byte("")}, - {Key: "traefik/frontends/frontend.with.dot/backend", Value: []byte("backend.with.dot.too")}, - {Key: "traefik/frontends/frontend.with.dot/errors", Value: []byte("")}, - {Key: "traefik/frontends/frontend.with.dot/errors/bar", Value: []byte("")}, - {Key: "traefik/frontends/frontend.with.dot/errors/bar/backend", Value: []byte("error")}, - {Key: "traefik/frontends/frontend.with.dot/errors/bar/query", Value: []byte("/test2")}, - {Key: "traefik/frontends/frontend.with.dot/errors/bar/status", Value: []byte("")}, - {Key: "traefik/frontends/frontend.with.dot/errors/bar/status/0", Value: []byte("400-405")}, - {Key: "traefik/frontends/frontend.with.dot/errors/foo", Value: []byte("")}, - {Key: "traefik/frontends/frontend.with.dot/errors/foo/backend", Value: []byte("error")}, - {Key: "traefik/frontends/frontend.with.dot/errors/foo/query", Value: []byte("/test1")}, - {Key: "traefik/frontends/frontend.with.dot/errors/foo/status", Value: []byte("")}, - {Key: "traefik/frontends/frontend.with.dot/errors/foo/status/0", Value: []byte("500-501")}, - {Key: "traefik/frontends/frontend.with.dot/errors/foo/status/1", Value: []byte("503-599")}, - {Key: "traefik/frontends/frontend.with.dot/ratelimit", Value: []byte("")}, - {Key: "traefik/frontends/frontend.with.dot/ratelimit/extractorfunc", Value: []byte("client.ip")}, - {Key: "traefik/frontends/frontend.with.dot/ratelimit/rateset", Value: []byte("")}, - {Key: "traefik/frontends/frontend.with.dot/ratelimit/rateset/bar", Value: []byte("")}, - {Key: "traefik/frontends/frontend.with.dot/ratelimit/rateset/bar/average", Value: []byte("3")}, - {Key: "traefik/frontends/frontend.with.dot/ratelimit/rateset/bar/burst", Value: []byte("6")}, - {Key: "traefik/frontends/frontend.with.dot/ratelimit/rateset/bar/period", Value: []byte("9")}, - {Key: "traefik/frontends/frontend.with.dot/ratelimit/rateset/foo", Value: []byte("")}, - {Key: "traefik/frontends/frontend.with.dot/ratelimit/rateset/foo/average", Value: []byte("6")}, - {Key: "traefik/frontends/frontend.with.dot/ratelimit/rateset/foo/burst", Value: []byte("12")}, - {Key: "traefik/frontends/frontend.with.dot/ratelimit/rateset/foo/period", Value: []byte("18")}, - {Key: "traefik/frontends/frontend.with.dot/routes", Value: []byte("")}, - {Key: "traefik/frontends/frontend.with.dot/routes/route.with.dot", Value: []byte("")}, - {Key: "traefik/frontends/frontend.with.dot/routes/route.with.dot/rule", Value: []byte("Host:test.localhost")}, - } - - pairs1 := filler("traefik", - frontend("frontend.with.dot", - withPair("backend", "backend.with.dot.too"), - withPair("routes/route.with.dot/rule", "Host:test.localhost"), - withErrorPage("foo", "error", "/test1", "500-501", "503-599"), - withErrorPage("bar", "error", "/test2", "400-405"), - withRateLimit("client.ip", - withLimit("foo", "6", "12", "18"), - withLimit("bar", "3", "6", "9"))), - backend("backend.with.dot.too", - withPair("servers/server.with.dot/url", "http://172.17.0.2:80"), - withPair("servers/server.with.dot/weight", "1"), - withPair("servers/server.with.dot.without.url/weight", "1")), - ) - assert.EqualValues(t, expected, pairs1) - - pairs2 := filler("traefik", - entry("frontends/frontend.with.dot", - withPair("backend", "backend.with.dot.too"), - withPair("routes/route.with.dot/rule", "Host:test.localhost"), - withPair("errors/foo/backend", "error"), - withPair("errors/foo/query", "/test1"), - withList("errors/foo/status", "500-501", "503-599"), - withPair("errors/bar/backend", "error"), - withPair("errors/bar/query", "/test2"), - withList("errors/bar/status", "400-405"), - withPair("ratelimit/extractorfunc", "client.ip"), - withPair("ratelimit/rateset/foo/average", "6"), - withPair("ratelimit/rateset/foo/burst", "12"), - withPair("ratelimit/rateset/foo/period", "18"), - withPair("ratelimit/rateset/bar/average", "3"), - withPair("ratelimit/rateset/bar/burst", "6"), - withPair("ratelimit/rateset/bar/period", "9")), - entry("backends/backend.with.dot.too", - withPair("servers/server.with.dot/url", "http://172.17.0.2:80"), - withPair("servers/server.with.dot/weight", "1"), - withPair("servers/server.with.dot.without.url/weight", "1")), - ) - assert.EqualValues(t, expected, pairs2) -} diff --git a/old/provider/kv/keynames.go b/old/provider/kv/keynames.go deleted file mode 100644 index c17e67329..000000000 --- a/old/provider/kv/keynames.go +++ /dev/null @@ -1,132 +0,0 @@ -package kv - -const ( - pathBackends = "/backends/" - pathBackendCircuitBreakerExpression = "/circuitbreaker/expression" - pathBackendResponseForwardingFlushInterval = "/responseforwarding/flushinterval" - pathBackendHealthCheckScheme = "/healthcheck/scheme" - pathBackendHealthCheckPath = "/healthcheck/path" - pathBackendHealthCheckPort = "/healthcheck/port" - pathBackendHealthCheckInterval = "/healthcheck/interval" - pathBackendHealthCheckTimeout = "/healthcheck/timeout" - pathBackendHealthCheckHostname = "/healthcheck/hostname" - pathBackendHealthCheckHeaders = "/healthcheck/headers/" - pathBackendLoadBalancerMethod = "/loadbalancer/method" - pathBackendLoadBalancerStickiness = "/loadbalancer/stickiness" - pathBackendLoadBalancerStickinessCookieName = "/loadbalancer/stickiness/cookiename" - pathBackendMaxConnAmount = "/maxconn/amount" - pathBackendMaxConnExtractorFunc = "/maxconn/extractorfunc" - pathBackendServers = "/servers/" - pathBackendServerURL = "/url" - pathBackendServerWeight = "/weight" - pathBackendBuffering = "/buffering/" - pathBackendBufferingMaxResponseBodyBytes = pathBackendBuffering + "maxresponsebodybytes" - pathBackendBufferingMemResponseBodyBytes = pathBackendBuffering + "memresponsebodybytes" - pathBackendBufferingMaxRequestBodyBytes = pathBackendBuffering + "maxrequestbodybytes" - pathBackendBufferingMemRequestBodyBytes = pathBackendBuffering + "memrequestbodybytes" - pathBackendBufferingRetryExpression = pathBackendBuffering + "retryexpression" - - pathFrontends = "/frontends/" - pathFrontendBackend = "/backend" - pathFrontendPriority = "/priority" - pathFrontendPassHostHeader = "/passhostheader" - pathFrontendPassTLSClientCert = "/passtlsclientcert" - pathFrontendPassTLSClientCertPem = pathFrontendPassTLSClientCert + "/pem" - pathFrontendPassTLSClientCertInfos = pathFrontendPassTLSClientCert + "/infos" - pathFrontendPassTLSClientCertInfosNotAfter = pathFrontendPassTLSClientCertInfos + "/notafter" - pathFrontendPassTLSClientCertInfosNotBefore = pathFrontendPassTLSClientCertInfos + "/notbefore" - pathFrontendPassTLSClientCertInfosSans = pathFrontendPassTLSClientCertInfos + "/sans" - pathFrontendPassTLSClientCertInfosIssuer = pathFrontendPassTLSClientCertInfos + "/issuer" - pathFrontendPassTLSClientCertInfosIssuerCommonName = pathFrontendPassTLSClientCertInfosIssuer + "/commonname" - pathFrontendPassTLSClientCertInfosIssuerCountry = pathFrontendPassTLSClientCertInfosIssuer + "/country" - pathFrontendPassTLSClientCertInfosIssuerDomainComponent = pathFrontendPassTLSClientCertInfosIssuer + "/domaincomponent" - pathFrontendPassTLSClientCertInfosIssuerLocality = pathFrontendPassTLSClientCertInfosIssuer + "/locality" - pathFrontendPassTLSClientCertInfosIssuerOrganization = pathFrontendPassTLSClientCertInfosIssuer + "/organization" - pathFrontendPassTLSClientCertInfosIssuerProvince = pathFrontendPassTLSClientCertInfosIssuer + "/province" - pathFrontendPassTLSClientCertInfosIssuerSerialNumber = pathFrontendPassTLSClientCertInfosIssuer + "/serialnumber" - pathFrontendPassTLSClientCertInfosSubject = pathFrontendPassTLSClientCertInfos + "/subject" - pathFrontendPassTLSClientCertInfosSubjectCommonName = pathFrontendPassTLSClientCertInfosSubject + "/commonname" - pathFrontendPassTLSClientCertInfosSubjectCountry = pathFrontendPassTLSClientCertInfosSubject + "/country" - pathFrontendPassTLSClientCertInfosSubjectDomainComponent = pathFrontendPassTLSClientCertInfosSubject + "/domaincomponent" - pathFrontendPassTLSClientCertInfosSubjectLocality = pathFrontendPassTLSClientCertInfosSubject + "/locality" - pathFrontendPassTLSClientCertInfosSubjectOrganization = pathFrontendPassTLSClientCertInfosSubject + "/organization" - pathFrontendPassTLSClientCertInfosSubjectProvince = pathFrontendPassTLSClientCertInfosSubject + "/province" - pathFrontendPassTLSClientCertInfosSubjectSerialNumber = pathFrontendPassTLSClientCertInfosSubject + "/serialnumber" - pathFrontendPassTLSCert = "/passtlscert" - pathFrontendWhiteListSourceRange = "/whitelist/sourcerange" - pathFrontendWhiteListIPStrategy = "/whitelist/ipstrategy" - pathFrontendWhiteListIPStrategyDepth = pathFrontendWhiteListIPStrategy + "/depth" - pathFrontendWhiteListIPStrategyExcludedIPs = pathFrontendWhiteListIPStrategy + "/excludedips" - - pathFrontendAuth = "/auth/" - pathFrontendAuthHeaderField = pathFrontendAuth + "headerfield" - pathFrontendAuthBasic = pathFrontendAuth + "basic/" - pathFrontendAuthBasicRemoveHeader = pathFrontendAuthBasic + "removeheader" - pathFrontendAuthBasicUsers = pathFrontendAuthBasic + "users" - pathFrontendAuthBasicUsersFile = pathFrontendAuthBasic + "usersfile" - pathFrontendAuthDigest = pathFrontendAuth + "digest/" - pathFrontendAuthDigestRemoveHeader = pathFrontendAuthDigest + "removeheader" - pathFrontendAuthDigestUsers = pathFrontendAuthDigest + "users" - pathFrontendAuthDigestUsersFile = pathFrontendAuthDigest + "usersfile" - pathFrontendAuthForward = pathFrontendAuth + "forward/" - pathFrontendAuthForwardAddress = pathFrontendAuthForward + "address" - pathFrontendAuthForwardAuthResponseHeaders = pathFrontendAuthForward + ".authresponseheaders" - pathFrontendAuthForwardTLS = pathFrontendAuthForward + "tls/" - pathFrontendAuthForwardTLSCa = pathFrontendAuthForwardTLS + "ca" - pathFrontendAuthForwardTLSCaOptional = pathFrontendAuthForwardTLS + "caoptional" - pathFrontendAuthForwardTLSCert = pathFrontendAuthForwardTLS + "cert" - pathFrontendAuthForwardTLSInsecureSkipVerify = pathFrontendAuthForwardTLS + "insecureskipverify" - pathFrontendAuthForwardTLSKey = pathFrontendAuthForwardTLS + "key" - pathFrontendAuthForwardTrustForwardHeader = pathFrontendAuthForward + "trustforwardheader" - - pathFrontendEntryPoints = "/entrypoints" - pathFrontendRedirectEntryPoint = "/redirect/entrypoint" - pathFrontendRedirectRegex = "/redirect/regex" - pathFrontendRedirectReplacement = "/redirect/replacement" - pathFrontendRedirectPermanent = "/redirect/permanent" - pathFrontendErrorPages = "/errors/" - pathFrontendErrorPagesBackend = "/backend" - pathFrontendErrorPagesQuery = "/query" - pathFrontendErrorPagesStatus = "/status" - pathFrontendRateLimit = "/ratelimit/" - pathFrontendRateLimitRateSet = pathFrontendRateLimit + "rateset/" - pathFrontendRateLimitExtractorFunc = pathFrontendRateLimit + "extractorfunc" - pathFrontendRateLimitPeriod = "/period" - pathFrontendRateLimitAverage = "/average" - pathFrontendRateLimitBurst = "/burst" - - pathFrontendCustomRequestHeaders = "/headers/customrequestheaders/" - pathFrontendCustomResponseHeaders = "/headers/customresponseheaders/" - pathFrontendAllowedHosts = "/headers/allowedhosts" - pathFrontendHostsProxyHeaders = "/headers/hostsproxyheaders" - pathFrontendSSLForceHost = "/headers/sslforcehost" - pathFrontendSSLRedirect = "/headers/sslredirect" - pathFrontendSSLTemporaryRedirect = "/headers/ssltemporaryredirect" - pathFrontendSSLHost = "/headers/sslhost" - pathFrontendSSLProxyHeaders = "/headers/sslproxyheaders/" - pathFrontendSTSSeconds = "/headers/stsseconds" - pathFrontendSTSIncludeSubdomains = "/headers/stsincludesubdomains" - pathFrontendSTSPreload = "/headers/stspreload" - pathFrontendForceSTSHeader = "/headers/forcestsheader" - pathFrontendFrameDeny = "/headers/framedeny" - pathFrontendCustomFrameOptionsValue = "/headers/customframeoptionsvalue" - pathFrontendContentTypeNosniff = "/headers/contenttypenosniff" - pathFrontendBrowserXSSFilter = "/headers/browserxssfilter" - pathFrontendCustomBrowserXSSValue = "/headers/custombrowserxssvalue" - pathFrontendContentSecurityPolicy = "/headers/contentsecuritypolicy" - pathFrontendPublicKey = "/headers/publickey" - pathFrontendReferrerPolicy = "/headers/referrerpolicy" - pathFrontendIsDevelopment = "/headers/isdevelopment" - - pathFrontendRoutes = "/routes/" - pathFrontendRule = "/rule" - - pathTLS = "/tls/" - pathTLSEntryPoints = "/entrypoints" - pathTLSCertFile = "/certificate/certfile" - pathTLSKeyFile = "/certificate/keyfile" - - pathTags = "/tags" - pathAlias = "/alias" - pathSeparator = "/" -) diff --git a/old/provider/kv/kv.go b/old/provider/kv/kv.go deleted file mode 100644 index 451d7e4ea..000000000 --- a/old/provider/kv/kv.go +++ /dev/null @@ -1,128 +0,0 @@ -package kv - -import ( - "errors" - "fmt" - "strings" - "time" - - "github.com/abronan/valkeyrie" - "github.com/abronan/valkeyrie/store" - "github.com/cenkalti/backoff" - "github.com/containous/traefik/old/log" - "github.com/containous/traefik/old/provider" - "github.com/containous/traefik/old/types" - "github.com/containous/traefik/pkg/job" - "github.com/containous/traefik/pkg/safe" -) - -// Provider holds common configurations of key-value providers. -type Provider struct { - provider.BaseProvider `mapstructure:",squash" export:"true"` - Endpoint string `description:"Comma separated server endpoints"` - Prefix string `description:"Prefix used for KV store" export:"true"` - TLS *types.ClientTLS `description:"Enable TLS support" export:"true"` - Username string `description:"KV Username"` - Password string `description:"KV Password"` - storeType store.Backend - kvClient store.Store -} - -// CreateStore create the K/V store -func (p *Provider) CreateStore() (store.Store, error) { - storeConfig := &store.Config{ - ConnectionTimeout: 30 * time.Second, - Bucket: "traefik", - Username: p.Username, - Password: p.Password, - } - - if p.TLS != nil { - var err error - storeConfig.TLS, err = p.TLS.CreateTLSConfig() - if err != nil { - return nil, err - } - } - return valkeyrie.NewStore( - p.storeType, - strings.Split(p.Endpoint, ","), - storeConfig, - ) -} - -// SetStoreType storeType setter -func (p *Provider) SetStoreType(storeType store.Backend) { - p.storeType = storeType -} - -// SetKVClient kvClient setter -func (p *Provider) SetKVClient(kvClient store.Store) { - p.kvClient = kvClient -} - -func (p *Provider) watchKv(configurationChan chan<- types.ConfigMessage, prefix string, stop chan bool) error { - operation := func() error { - events, err := p.kvClient.WatchTree(p.Prefix, make(chan struct{}), nil) - if err != nil { - return fmt.Errorf("failed to KV WatchTree: %v", err) - } - for { - select { - case <-stop: - return nil - case _, ok := <-events: - if !ok { - return errors.New("watchtree channel closed") - } - configuration := p.buildConfiguration() - if configuration != nil { - configurationChan <- types.ConfigMessage{ - ProviderName: string(p.storeType), - Configuration: configuration, - } - } - } - } - } - - notify := func(err error, time time.Duration) { - log.Errorf("KV connection error: %+v, retrying in %s", err, time) - } - err := backoff.RetryNotify(safe.OperationWithRecover(operation), job.NewBackOff(backoff.NewExponentialBackOff()), notify) - if err != nil { - return fmt.Errorf("cannot connect to KV server: %v", err) - } - return nil -} - -// Provide provides the configuration to traefik via the configuration channel -func (p *Provider) Provide(configurationChan chan<- types.ConfigMessage, pool *safe.Pool) error { - operation := func() error { - if _, err := p.kvClient.Exists(p.Prefix+"/qmslkjdfmqlskdjfmqlksjazçueznbvbwzlkajzebvkwjdcqmlsfj", nil); err != nil { - return fmt.Errorf("failed to test KV store connection: %v", err) - } - if p.Watch { - pool.Go(func(stop chan bool) { - err := p.watchKv(configurationChan, p.Prefix, stop) - if err != nil { - log.Errorf("Cannot watch KV store: %v", err) - } - }) - } - configuration := p.buildConfiguration() - configurationChan <- types.ConfigMessage{ - ProviderName: string(p.storeType), - Configuration: configuration, - } - return nil - } - notify := func(err error, time time.Duration) { - log.Errorf("KV connection error: %+v, retrying in %s", err, time) - } - err := backoff.RetryNotify(safe.OperationWithRecover(operation), job.NewBackOff(backoff.NewExponentialBackOff()), notify) - if err != nil { - return fmt.Errorf("cannot connect to KV server: %v", err) - } - return nil -} diff --git a/old/provider/kv/kv_config.go b/old/provider/kv/kv_config.go deleted file mode 100644 index a916e4532..000000000 --- a/old/provider/kv/kv_config.go +++ /dev/null @@ -1,730 +0,0 @@ -package kv - -import ( - "fmt" - "math" - "net/http" - "sort" - "strconv" - "strings" - "text/template" - - "github.com/BurntSushi/ty/fun" - "github.com/abronan/valkeyrie/store" - "github.com/containous/flaeg/parse" - "github.com/containous/traefik/old/log" - "github.com/containous/traefik/old/provider/label" - "github.com/containous/traefik/old/types" - "github.com/containous/traefik/pkg/tls" -) - -func (p *Provider) buildConfiguration() *types.Configuration { - templateObjects := struct { - Prefix string - }{ - // Allow `/traefik/alias` to supersede `p.Prefix` - Prefix: strings.TrimSuffix(p.get(p.Prefix, p.Prefix+pathAlias), pathSeparator), - } - - var KvFuncMap = template.FuncMap{ - "List": p.list, - "ListServers": p.listServers, - "Get": p.get, - "GetBool": p.getBool, - "GetInt": p.getInt, - "GetInt64": p.getInt64, - "GetList": p.getList, - "SplitGet": p.splitGet, - "Last": p.last, - "Has": p.has, - - "getTLSSection": p.getTLSSection, - - // Frontend functions - "getBackendName": p.getFuncString(pathFrontendBackend, ""), - "getPriority": p.getFuncInt(pathFrontendPriority, label.DefaultFrontendPriority), - "getPassHostHeader": p.getFuncBool(pathFrontendPassHostHeader, label.DefaultPassHostHeader), - "getPassTLSCert": p.getFuncBool(pathFrontendPassTLSCert, label.DefaultPassTLSCert), - "getPassTLSClientCert": p.getTLSClientCert, - "getEntryPoints": p.getFuncList(pathFrontendEntryPoints), - "getAuth": p.getAuth, - "getRoutes": p.getRoutes, - "getRedirect": p.getRedirect, - "getErrorPages": p.getErrorPages, - "getRateLimit": p.getRateLimit, - "getHeaders": p.getHeaders, - "getWhiteList": p.getWhiteList, - - // Backend functions - "getServers": p.getServers, - "getCircuitBreaker": p.getCircuitBreaker, - "getResponseForwarding": p.getResponseForwarding, - "getLoadBalancer": p.getLoadBalancer, - "getMaxConn": p.getMaxConn, - "getHealthCheck": p.getHealthCheck, - "getBuffering": p.getBuffering, - } - - configuration, err := p.GetConfiguration("templates/kv.tmpl", KvFuncMap, templateObjects) - if err != nil { - log.Error(err) - } - - for key, frontend := range configuration.Frontends { - if _, ok := configuration.Backends[frontend.Backend]; !ok { - delete(configuration.Frontends, key) - } - } - - return configuration -} - -func (p *Provider) getWhiteList(rootPath string) *types.WhiteList { - ranges := p.getList(rootPath, pathFrontendWhiteListSourceRange) - - if len(ranges) == 0 { - return nil - } - - return &types.WhiteList{ - SourceRange: ranges, - IPStrategy: p.getIPStrategy(rootPath), - } -} - -func (p *Provider) getIPStrategy(rootPath string) *types.IPStrategy { - ipStrategy := p.getBool(false, rootPath, pathFrontendWhiteListIPStrategy) - depth := p.getInt(0, rootPath, pathFrontendWhiteListIPStrategyDepth) - excludedIPs := p.getList(rootPath, pathFrontendWhiteListIPStrategyExcludedIPs) - - if depth == 0 && len(excludedIPs) == 0 && !ipStrategy { - return nil - } - - return &types.IPStrategy{ - Depth: depth, - ExcludedIPs: excludedIPs, - } -} - -func (p *Provider) getRedirect(rootPath string) *types.Redirect { - permanent := p.getBool(false, rootPath, pathFrontendRedirectPermanent) - - if p.has(rootPath, pathFrontendRedirectEntryPoint) { - return &types.Redirect{ - EntryPoint: p.get("", rootPath, pathFrontendRedirectEntryPoint), - Permanent: permanent, - } - } - - if p.has(rootPath, pathFrontendRedirectRegex) && p.has(rootPath, pathFrontendRedirectReplacement) { - return &types.Redirect{ - Regex: p.get("", rootPath, pathFrontendRedirectRegex), - Replacement: p.get("", rootPath, pathFrontendRedirectReplacement), - Permanent: permanent, - } - } - - return nil -} - -func (p *Provider) getErrorPages(rootPath string) map[string]*types.ErrorPage { - var errorPages map[string]*types.ErrorPage - - pathErrors := p.list(rootPath, pathFrontendErrorPages) - - for _, pathPage := range pathErrors { - if errorPages == nil { - errorPages = make(map[string]*types.ErrorPage) - } - - pageName := p.last(pathPage) - - errorPages[pageName] = &types.ErrorPage{ - Backend: p.get("", pathPage, pathFrontendErrorPagesBackend), - Query: p.get("", pathPage, pathFrontendErrorPagesQuery), - Status: p.getList(pathPage, pathFrontendErrorPagesStatus), - } - } - - return errorPages -} - -func (p *Provider) getRateLimit(rootPath string) *types.RateLimit { - extractorFunc := p.get("", rootPath, pathFrontendRateLimitExtractorFunc) - if len(extractorFunc) == 0 { - return nil - } - - var limits map[string]*types.Rate - - pathRateSet := p.list(rootPath, pathFrontendRateLimitRateSet) - for _, pathLimits := range pathRateSet { - if limits == nil { - limits = make(map[string]*types.Rate) - } - - rawPeriod := p.get("", pathLimits+pathFrontendRateLimitPeriod) - - var period parse.Duration - err := period.Set(rawPeriod) - if err != nil { - log.Errorf("Invalid %q value: %q", pathLimits+pathFrontendRateLimitPeriod, rawPeriod) - continue - } - - limitName := p.last(pathLimits) - - limits[limitName] = &types.Rate{ - Average: p.getInt64(0, pathLimits+pathFrontendRateLimitAverage), - Burst: p.getInt64(0, pathLimits+pathFrontendRateLimitBurst), - Period: period, - } - } - - return &types.RateLimit{ - ExtractorFunc: extractorFunc, - RateSet: limits, - } -} - -func (p *Provider) getHeaders(rootPath string) *types.Headers { - headers := &types.Headers{ - CustomRequestHeaders: p.getMap(rootPath, pathFrontendCustomRequestHeaders), - CustomResponseHeaders: p.getMap(rootPath, pathFrontendCustomResponseHeaders), - SSLProxyHeaders: p.getMap(rootPath, pathFrontendSSLProxyHeaders), - AllowedHosts: p.getList("", rootPath, pathFrontendAllowedHosts), - HostsProxyHeaders: p.getList(rootPath, pathFrontendHostsProxyHeaders), - SSLForceHost: p.getBool(false, rootPath, pathFrontendSSLForceHost), - SSLRedirect: p.getBool(false, rootPath, pathFrontendSSLRedirect), - SSLTemporaryRedirect: p.getBool(false, rootPath, pathFrontendSSLTemporaryRedirect), - SSLHost: p.get("", rootPath, pathFrontendSSLHost), - STSSeconds: p.getInt64(0, rootPath, pathFrontendSTSSeconds), - STSIncludeSubdomains: p.getBool(false, rootPath, pathFrontendSTSIncludeSubdomains), - STSPreload: p.getBool(false, rootPath, pathFrontendSTSPreload), - ForceSTSHeader: p.getBool(false, rootPath, pathFrontendForceSTSHeader), - FrameDeny: p.getBool(false, rootPath, pathFrontendFrameDeny), - CustomFrameOptionsValue: p.get("", rootPath, pathFrontendCustomFrameOptionsValue), - ContentTypeNosniff: p.getBool(false, rootPath, pathFrontendContentTypeNosniff), - BrowserXSSFilter: p.getBool(false, rootPath, pathFrontendBrowserXSSFilter), - CustomBrowserXSSValue: p.get("", rootPath, pathFrontendCustomBrowserXSSValue), - ContentSecurityPolicy: p.get("", rootPath, pathFrontendContentSecurityPolicy), - PublicKey: p.get("", rootPath, pathFrontendPublicKey), - ReferrerPolicy: p.get("", rootPath, pathFrontendReferrerPolicy), - IsDevelopment: p.getBool(false, rootPath, pathFrontendIsDevelopment), - } - - if !headers.HasSecureHeadersDefined() && !headers.HasCustomHeadersDefined() { - return nil - } - - return headers -} - -func (p *Provider) getLoadBalancer(rootPath string) *types.LoadBalancer { - lb := &types.LoadBalancer{ - Method: p.get(label.DefaultBackendLoadBalancerMethod, rootPath, pathBackendLoadBalancerMethod), - } - - if p.getBool(false, rootPath, pathBackendLoadBalancerStickiness) { - lb.Stickiness = &types.Stickiness{ - CookieName: p.get("", rootPath, pathBackendLoadBalancerStickinessCookieName), - } - } - - return lb -} - -func (p *Provider) getResponseForwarding(rootPath string) *types.ResponseForwarding { - if !p.has(rootPath, pathBackendResponseForwardingFlushInterval) { - return nil - } - value := p.get("", rootPath, pathBackendResponseForwardingFlushInterval) - if len(value) == 0 { - return nil - } - - return &types.ResponseForwarding{ - FlushInterval: value, - } -} - -func (p *Provider) getCircuitBreaker(rootPath string) *types.CircuitBreaker { - if !p.has(rootPath, pathBackendCircuitBreakerExpression) { - return nil - } - - circuitBreaker := p.get(label.DefaultCircuitBreakerExpression, rootPath, pathBackendCircuitBreakerExpression) - if len(circuitBreaker) == 0 { - return nil - } - - return &types.CircuitBreaker{Expression: circuitBreaker} -} - -func (p *Provider) getMaxConn(rootPath string) *types.MaxConn { - amount := p.getInt64(math.MinInt64, rootPath, pathBackendMaxConnAmount) - extractorFunc := p.get(label.DefaultBackendMaxconnExtractorFunc, rootPath, pathBackendMaxConnExtractorFunc) - - if amount == math.MinInt64 || len(extractorFunc) == 0 { - return nil - } - - return &types.MaxConn{ - Amount: amount, - ExtractorFunc: extractorFunc, - } -} - -func (p *Provider) getHealthCheck(rootPath string) *types.HealthCheck { - path := p.get("", rootPath, pathBackendHealthCheckPath) - - if len(path) == 0 { - return nil - } - - scheme := p.get("", rootPath, pathBackendHealthCheckScheme) - port := p.getInt(label.DefaultBackendHealthCheckPort, rootPath, pathBackendHealthCheckPort) - interval := p.get("30s", rootPath, pathBackendHealthCheckInterval) - timeout := p.get("5s", rootPath, pathBackendHealthCheckTimeout) - hostname := p.get("", rootPath, pathBackendHealthCheckHostname) - headers := p.getMap(rootPath, pathBackendHealthCheckHeaders) - - return &types.HealthCheck{ - Scheme: scheme, - Path: path, - Port: port, - Interval: interval, - Timeout: timeout, - Hostname: hostname, - Headers: headers, - } -} - -func (p *Provider) getBuffering(rootPath string) *types.Buffering { - pathsBuffering := p.list(rootPath, pathBackendBuffering) - - var buffering *types.Buffering - if len(pathsBuffering) > 0 { - if buffering == nil { - buffering = &types.Buffering{} - } - - buffering.MaxRequestBodyBytes = p.getInt64(0, rootPath, pathBackendBufferingMaxRequestBodyBytes) - buffering.MaxResponseBodyBytes = p.getInt64(0, rootPath, pathBackendBufferingMaxResponseBodyBytes) - buffering.MemRequestBodyBytes = p.getInt64(0, rootPath, pathBackendBufferingMemRequestBodyBytes) - buffering.MemResponseBodyBytes = p.getInt64(0, rootPath, pathBackendBufferingMemResponseBodyBytes) - buffering.RetryExpression = p.get("", rootPath, pathBackendBufferingRetryExpression) - } - - return buffering -} - -func (p *Provider) getTLSSection(prefix string) []*tls.Configuration { - var tlsSection []*tls.Configuration - - for _, tlsConfPath := range p.list(prefix, pathTLS) { - certFile := p.get("", tlsConfPath, pathTLSCertFile) - keyFile := p.get("", tlsConfPath, pathTLSKeyFile) - - if len(certFile) == 0 && len(keyFile) == 0 { - log.Warnf("Invalid TLS configuration (no cert and no key): %s", tlsConfPath) - continue - } - - entryPoints := p.getList(tlsConfPath, pathTLSEntryPoints) - if len(entryPoints) == 0 { - log.Warnf("Invalid TLS configuration (no entry points): %s", tlsConfPath) - continue - } - - tlsConf := &tls.Configuration{ - Stores: entryPoints, - Certificate: &tls.Certificate{ - CertFile: tls.FileOrContent(certFile), - KeyFile: tls.FileOrContent(keyFile), - }, - } - - tlsSection = append(tlsSection, tlsConf) - } - - return tlsSection -} - -// getTLSClientCert create TLS client header configuration from labels -func (p *Provider) getTLSClientCert(rootPath string) *types.TLSClientHeaders { - if !p.hasPrefix(rootPath, pathFrontendPassTLSClientCert) { - return nil - } - - tlsClientHeaders := &types.TLSClientHeaders{ - PEM: p.getBool(false, rootPath, pathFrontendPassTLSClientCertPem), - } - - if p.hasPrefix(rootPath, pathFrontendPassTLSClientCertInfos) { - infos := &types.TLSClientCertificateInfos{ - NotAfter: p.getBool(false, rootPath, pathFrontendPassTLSClientCertInfosNotAfter), - NotBefore: p.getBool(false, rootPath, pathFrontendPassTLSClientCertInfosNotBefore), - Sans: p.getBool(false, rootPath, pathFrontendPassTLSClientCertInfosSans), - } - - if p.hasPrefix(rootPath, pathFrontendPassTLSClientCertInfosSubject) { - subject := &types.TLSCLientCertificateDNInfos{ - CommonName: p.getBool(false, rootPath, pathFrontendPassTLSClientCertInfosSubjectCommonName), - Country: p.getBool(false, rootPath, pathFrontendPassTLSClientCertInfosSubjectCountry), - DomainComponent: p.getBool(false, rootPath, pathFrontendPassTLSClientCertInfosSubjectDomainComponent), - Locality: p.getBool(false, rootPath, pathFrontendPassTLSClientCertInfosSubjectLocality), - Organization: p.getBool(false, rootPath, pathFrontendPassTLSClientCertInfosSubjectOrganization), - Province: p.getBool(false, rootPath, pathFrontendPassTLSClientCertInfosSubjectProvince), - SerialNumber: p.getBool(false, rootPath, pathFrontendPassTLSClientCertInfosSubjectSerialNumber), - } - infos.Subject = subject - } - - if p.hasPrefix(rootPath, pathFrontendPassTLSClientCertInfosIssuer) { - issuer := &types.TLSCLientCertificateDNInfos{ - CommonName: p.getBool(false, rootPath, pathFrontendPassTLSClientCertInfosIssuerCommonName), - Country: p.getBool(false, rootPath, pathFrontendPassTLSClientCertInfosIssuerCountry), - DomainComponent: p.getBool(false, rootPath, pathFrontendPassTLSClientCertInfosIssuerDomainComponent), - Locality: p.getBool(false, rootPath, pathFrontendPassTLSClientCertInfosIssuerLocality), - Organization: p.getBool(false, rootPath, pathFrontendPassTLSClientCertInfosIssuerOrganization), - Province: p.getBool(false, rootPath, pathFrontendPassTLSClientCertInfosIssuerProvince), - SerialNumber: p.getBool(false, rootPath, pathFrontendPassTLSClientCertInfosIssuerSerialNumber), - } - infos.Issuer = issuer - } - - tlsClientHeaders.Infos = infos - } - return tlsClientHeaders -} - -// GetAuth Create auth from path -func (p *Provider) getAuth(rootPath string) *types.Auth { - if p.hasPrefix(rootPath, pathFrontendAuth) { - auth := &types.Auth{ - HeaderField: p.get("", rootPath, pathFrontendAuthHeaderField), - } - - if p.hasPrefix(rootPath, pathFrontendAuthBasic) { - auth.Basic = p.getAuthBasic(rootPath) - } else if p.hasPrefix(rootPath, pathFrontendAuthDigest) { - auth.Digest = p.getAuthDigest(rootPath) - } else if p.hasPrefix(rootPath, pathFrontendAuthForward) { - auth.Forward = p.getAuthForward(rootPath) - } - - return auth - } - return nil -} - -// getAuthBasic Create Basic Auth from path -func (p *Provider) getAuthBasic(rootPath string) *types.Basic { - return &types.Basic{ - UsersFile: p.get("", rootPath, pathFrontendAuthBasicUsersFile), - RemoveHeader: p.getBool(false, rootPath, pathFrontendAuthBasicRemoveHeader), - Users: p.getList(rootPath, pathFrontendAuthBasicUsers), - } -} - -// getAuthDigest Create Digest Auth from path -func (p *Provider) getAuthDigest(rootPath string) *types.Digest { - return &types.Digest{ - Users: p.getList(rootPath, pathFrontendAuthDigestUsers), - UsersFile: p.get("", rootPath, pathFrontendAuthDigestUsersFile), - RemoveHeader: p.getBool(false, rootPath, pathFrontendAuthDigestRemoveHeader), - } -} - -// getAuthForward Create Forward Auth from path -func (p *Provider) getAuthForward(rootPath string) *types.Forward { - forwardAuth := &types.Forward{ - Address: p.get("", rootPath, pathFrontendAuthForwardAddress), - TrustForwardHeader: p.getBool(false, rootPath, pathFrontendAuthForwardTrustForwardHeader), - AuthResponseHeaders: p.getList(rootPath, pathFrontendAuthForwardAuthResponseHeaders), - } - - // TLS configuration - if len(p.getList(rootPath, pathFrontendAuthForwardTLS)) > 0 { - forwardAuth.TLS = &types.ClientTLS{ - CA: p.get("", rootPath, pathFrontendAuthForwardTLSCa), - CAOptional: p.getBool(false, rootPath, pathFrontendAuthForwardTLSCaOptional), - Cert: p.get("", rootPath, pathFrontendAuthForwardTLSCert), - InsecureSkipVerify: p.getBool(false, rootPath, pathFrontendAuthForwardTLSInsecureSkipVerify), - Key: p.get("", rootPath, pathFrontendAuthForwardTLSKey), - } - } - - return forwardAuth -} - -func (p *Provider) getRoutes(rootPath string) map[string]types.Route { - var routes map[string]types.Route - - rts := p.list(rootPath, pathFrontendRoutes) - for _, rt := range rts { - - rule := p.get("", rt, pathFrontendRule) - if len(rule) == 0 { - continue - } - - if routes == nil { - routes = make(map[string]types.Route) - } - - routeName := p.last(rt) - routes[routeName] = types.Route{ - Rule: rule, - } - } - - return routes -} - -func (p *Provider) getServers(rootPath string) map[string]types.Server { - var servers map[string]types.Server - - serverKeys := p.listServers(rootPath) - for _, serverKey := range serverKeys { - serverURL := p.get("", serverKey, pathBackendServerURL) - if len(serverURL) == 0 { - continue - } - - if servers == nil { - servers = make(map[string]types.Server) - } - - serverName := p.last(serverKey) - servers[serverName] = types.Server{ - URL: serverURL, - Weight: p.getInt(label.DefaultWeight, serverKey, pathBackendServerWeight), - } - } - - return servers -} - -func (p *Provider) listServers(backend string) []string { - serverNames := p.list(backend, pathBackendServers) - return fun.Filter(p.serverFilter, serverNames).([]string) -} - -func (p *Provider) serverFilter(serverName string) bool { - key := fmt.Sprint(serverName, pathBackendServerURL) - if _, err := p.kvClient.Get(key, nil); err != nil { - if err != store.ErrKeyNotFound { - log.Errorf("Failed to retrieve value for key %s: %s", key, err) - } - return false - } - return p.checkConstraints(serverName, pathTags) -} - -func (p *Provider) checkConstraints(keys ...string) bool { - joinedKeys := strings.Join(keys, "") - keyPair, err := p.kvClient.Get(joinedKeys, nil) - - value := "" - if err == nil && keyPair != nil && keyPair.Value != nil { - value = string(keyPair.Value) - } - - constraintTags := label.SplitAndTrimString(value, ",") - ok, failingConstraint := p.MatchConstraints(constraintTags) - if !ok { - if failingConstraint != nil { - log.Debugf("Constraint %v not matching with following tags: %v", failingConstraint.String(), value) - } - return false - } - return true -} - -func (p *Provider) getFuncString(key string, defaultValue string) func(rootPath string) string { - return func(rootPath string) string { - return p.get(defaultValue, rootPath, key) - } -} - -func (p *Provider) getFuncBool(key string, defaultValue bool) func(rootPath string) bool { - return func(rootPath string) bool { - return p.getBool(defaultValue, rootPath, key) - } -} - -func (p *Provider) getFuncInt(key string, defaultValue int) func(rootPath string) int { - return func(rootPath string) int { - return p.getInt(defaultValue, rootPath, key) - } -} - -func (p *Provider) getFuncList(key string) func(rootPath string) []string { - return func(rootPath string) []string { - return p.getList(rootPath, key) - } -} - -func (p *Provider) get(defaultValue string, keyParts ...string) string { - key := strings.Join(keyParts, "") - - if p.storeType == store.ETCD { - key = strings.TrimPrefix(key, pathSeparator) - } - - keyPair, err := p.kvClient.Get(key, nil) - if err != nil { - log.Debugf("Cannot get key %s %s, setting default %s", key, err, defaultValue) - return defaultValue - } else if keyPair == nil { - log.Debugf("Cannot get key %s, setting default %s", key, defaultValue) - return defaultValue - } - - return string(keyPair.Value) -} - -func (p *Provider) getBool(defaultValue bool, keyParts ...string) bool { - rawValue := p.get(strconv.FormatBool(defaultValue), keyParts...) - - if len(rawValue) == 0 { - return defaultValue - } - - value, err := strconv.ParseBool(rawValue) - if err != nil { - log.Errorf("Invalid value for %v: %s", keyParts, rawValue) - return defaultValue - } - return value -} - -func (p *Provider) has(keyParts ...string) bool { - value := p.get("", keyParts...) - return len(value) > 0 -} - -func (p *Provider) hasPrefix(keyParts ...string) bool { - baseKey := strings.Join(keyParts, "") - if !strings.HasSuffix(baseKey, "/") { - baseKey += "/" - } - - listKeys, err := p.kvClient.List(baseKey, nil) - if err != nil { - log.Debugf("Cannot list keys under %q: %v", baseKey, err) - return false - } - - return len(listKeys) > 0 -} - -func (p *Provider) getInt(defaultValue int, keyParts ...string) int { - rawValue := p.get("", keyParts...) - - if len(rawValue) == 0 { - return defaultValue - } - - value, err := strconv.Atoi(rawValue) - if err != nil { - log.Errorf("Invalid value for %v: %s", keyParts, rawValue) - return defaultValue - } - return value -} - -func (p *Provider) getInt64(defaultValue int64, keyParts ...string) int64 { - rawValue := p.get("", keyParts...) - - if len(rawValue) == 0 { - return defaultValue - } - - value, err := strconv.ParseInt(rawValue, 10, 64) - if err != nil { - log.Errorf("Invalid value for %v: %s", keyParts, rawValue) - return defaultValue - } - return value -} - -func (p *Provider) list(keyParts ...string) []string { - rootKey := strings.Join(keyParts, "") - - keysPairs, err := p.kvClient.List(rootKey, nil) - if err != nil { - log.Debugf("Cannot list keys under %q: %v", rootKey, err) - return nil - } - - directoryKeys := make(map[string]string) - for _, key := range keysPairs { - directory := strings.Split(strings.TrimPrefix(key.Key, rootKey), pathSeparator)[0] - directoryKeys[directory] = rootKey + directory - } - - keys := fun.Values(directoryKeys).([]string) - sort.Strings(keys) - return keys -} - -func (p *Provider) getList(keyParts ...string) []string { - values := p.splitGet(keyParts...) - if len(values) > 0 { - return values - } - - return p.getSlice(keyParts...) -} - -// get sub keys. ex: foo/0, foo/1, foo/2 -func (p *Provider) getSlice(keyParts ...string) []string { - baseKey := strings.Join(keyParts, "") - if !strings.HasSuffix(baseKey, "/") { - baseKey += "/" - } - - listKeys := p.list(baseKey) - - var values []string - for _, entryKey := range listKeys { - val := p.get("", entryKey) - if len(val) > 0 { - values = append(values, val) - } - } - return values -} - -func (p *Provider) splitGet(keyParts ...string) []string { - value := p.get("", keyParts...) - - if len(value) == 0 { - return nil - } - return label.SplitAndTrimString(value, ",") -} - -func (p *Provider) last(key string) string { - index := strings.LastIndex(key, pathSeparator) - return key[index+1:] -} - -func (p *Provider) getMap(keyParts ...string) map[string]string { - var mapData map[string]string - - list := p.list(keyParts...) - for _, name := range list { - if mapData == nil { - mapData = make(map[string]string) - } - - mapData[http.CanonicalHeaderKey(p.last(name))] = p.get("", name) - } - - return mapData -} diff --git a/old/provider/kv/kv_config_test.go b/old/provider/kv/kv_config_test.go deleted file mode 100644 index 554847182..000000000 --- a/old/provider/kv/kv_config_test.go +++ /dev/null @@ -1,2410 +0,0 @@ -package kv - -import ( - "sort" - "strconv" - "testing" - "time" - - "github.com/abronan/valkeyrie/store" - "github.com/containous/flaeg/parse" - "github.com/containous/traefik/old/provider/label" - "github.com/containous/traefik/old/types" - "github.com/containous/traefik/pkg/tls" - "github.com/stretchr/testify/assert" -) - -func aKVPair(key string, value string) *store.KVPair { - return &store.KVPair{Key: key, Value: []byte(value)} -} - -func TestProviderBuildConfiguration(t *testing.T) { - t.Skip("old tests") - testCases := []struct { - desc string - kvPairs []*store.KVPair - expected *types.Configuration - }{ - { - desc: "name with dot", - kvPairs: filler("traefik", - frontend("frontend.with.dot", - withPair("backend", "backend.with.dot.too"), - withPair("routes/route.with.dot/rule", "Host:test.localhost")), - backend("backend.with.dot.too", - withPair("servers/server.with.dot/url", "http://172.17.0.2:80"), - withPair("servers/server.with.dot/weight", strconv.Itoa(label.DefaultWeight)), - withPair("servers/server.with.dot.without.url/weight", strconv.Itoa(label.DefaultWeight))), - ), - expected: &types.Configuration{ - Backends: map[string]*types.Backend{ - "backend.with.dot.too": { - LoadBalancer: &types.LoadBalancer{Method: label.DefaultBackendLoadBalancerMethod}, - Servers: map[string]types.Server{ - "server.with.dot": { - URL: "http://172.17.0.2:80", - Weight: label.DefaultWeight, - }, - }, - }, - }, - Frontends: map[string]*types.Frontend{ - "frontend.with.dot": { - Backend: "backend.with.dot.too", - PassHostHeader: true, - EntryPoints: []string{}, - Routes: map[string]types.Route{ - "route.with.dot": { - Rule: "Host:test.localhost", - }, - }, - }, - }, - }, - }, - { - desc: "basic auth Users", - kvPairs: filler("traefik", - frontend("frontend", - withPair(pathFrontendBackend, "backend"), - withPair(pathFrontendAuthHeaderField, "X-WebAuth-User"), - withPair(pathFrontendAuthBasicRemoveHeader, "true"), - withList(pathFrontendAuthBasicUsers, "test:$apr1$H6uskkkW$IgXLP6ewTrSuBkTrqE8wj/", "test2:$apr1$d9hr9HBB$4HxwgUir3HP4EsggP/QNo0"), - ), - backend("backend"), - ), - expected: &types.Configuration{ - Backends: map[string]*types.Backend{ - "backend": { - LoadBalancer: &types.LoadBalancer{ - Method: "wrr", - }, - }, - }, - Frontends: map[string]*types.Frontend{ - "frontend": { - Backend: "backend", - PassHostHeader: true, - EntryPoints: []string{}, - Auth: &types.Auth{ - HeaderField: "X-WebAuth-User", - Basic: &types.Basic{ - RemoveHeader: true, - Users: []string{"test:$apr1$H6uskkkW$IgXLP6ewTrSuBkTrqE8wj/", - "test2:$apr1$d9hr9HBB$4HxwgUir3HP4EsggP/QNo0"}, - }, - }, - }, - }, - }, - }, - { - desc: "basic auth UsersFile", - kvPairs: filler("traefik", - frontend("frontend", - withPair(pathFrontendBackend, "backend"), - withPair(pathFrontendAuthHeaderField, "X-WebAuth-User"), - withPair(pathFrontendAuthBasicUsersFile, ".htpasswd"), - ), - backend("backend"), - ), - expected: &types.Configuration{ - Backends: map[string]*types.Backend{ - "backend": { - LoadBalancer: &types.LoadBalancer{ - Method: "wrr", - }, - }, - }, - Frontends: map[string]*types.Frontend{ - "frontend": { - Backend: "backend", - PassHostHeader: true, - EntryPoints: []string{}, - Auth: &types.Auth{ - HeaderField: "X-WebAuth-User", - Basic: &types.Basic{ - UsersFile: ".htpasswd", - }, - }, - }, - }, - }, - }, - { - desc: "digest auth", - kvPairs: filler("traefik", - frontend("frontend", - withPair(pathFrontendBackend, "backend"), - withPair(pathFrontendAuthHeaderField, "X-WebAuth-User"), - withPair(pathFrontendAuthDigestRemoveHeader, "true"), - withList(pathFrontendAuthDigestUsers, "test:$apr1$H6uskkkW$IgXLP6ewTrSuBkTrqE8wj/", "test2:$apr1$d9hr9HBB$4HxwgUir3HP4EsggP/QNo0"), - withPair(pathFrontendAuthDigestUsersFile, ".htpasswd"), - ), - backend("backend"), - ), - expected: &types.Configuration{ - Backends: map[string]*types.Backend{ - "backend": { - LoadBalancer: &types.LoadBalancer{ - Method: "wrr", - }, - }, - }, - Frontends: map[string]*types.Frontend{ - "frontend": { - Backend: "backend", - PassHostHeader: true, - EntryPoints: []string{}, - Auth: &types.Auth{ - HeaderField: "X-WebAuth-User", - Digest: &types.Digest{ - RemoveHeader: true, - Users: []string{"test:$apr1$H6uskkkW$IgXLP6ewTrSuBkTrqE8wj/", - "test2:$apr1$d9hr9HBB$4HxwgUir3HP4EsggP/QNo0"}, - UsersFile: ".htpasswd", - }, - }, - }, - }, - }, - }, - { - desc: "forward auth", - kvPairs: filler("traefik", - frontend("frontend", - withPair(pathFrontendBackend, "backend"), - withPair(pathFrontendAuthHeaderField, "X-WebAuth-User"), - withPair(pathFrontendAuthForwardAddress, "auth.server"), - withPair(pathFrontendAuthForwardTrustForwardHeader, "true"), - withPair(pathFrontendAuthForwardTLSCa, "ca.crt"), - withPair(pathFrontendAuthForwardTLSCaOptional, "true"), - withPair(pathFrontendAuthForwardTLSCert, "server.crt"), - withPair(pathFrontendAuthForwardTLSKey, "server.key"), - withPair(pathFrontendAuthForwardTLSInsecureSkipVerify, "true"), - withPair(pathFrontendAuthForwardAuthResponseHeaders, "X-Auth-User,X-Auth-Token"), - ), - backend("backend"), - ), - expected: &types.Configuration{ - Backends: map[string]*types.Backend{ - "backend": { - LoadBalancer: &types.LoadBalancer{ - Method: "wrr", - }, - }, - }, - Frontends: map[string]*types.Frontend{ - "frontend": { - Backend: "backend", - PassHostHeader: true, - EntryPoints: []string{}, - Auth: &types.Auth{ - HeaderField: "X-WebAuth-User", - Forward: &types.Forward{ - Address: "auth.server", - TLS: &types.ClientTLS{ - CA: "ca.crt", - CAOptional: true, - InsecureSkipVerify: true, - Cert: "server.crt", - Key: "server.key", - }, - TrustForwardHeader: true, - AuthResponseHeaders: []string{"X-Auth-User", "X-Auth-Token"}, - }, - }, - }, - }, - }, - }, - { - desc: "forward auth", - kvPairs: filler("traefik", - frontend("frontend", - withPair(pathFrontendBackend, "backend"), - withList(pathFrontendWhiteListSourceRange, "1.1.1.1/24", "1234:abcd::42/32"), - withPair(pathFrontendWhiteListIPStrategy, "true"), - ), - backend("backend"), - ), - expected: &types.Configuration{ - Backends: map[string]*types.Backend{ - "backend": { - LoadBalancer: &types.LoadBalancer{ - Method: "wrr", - }, - }, - }, - Frontends: map[string]*types.Frontend{ - "frontend": { - Backend: "backend", - PassHostHeader: true, - EntryPoints: []string{}, - WhiteList: &types.WhiteList{ - SourceRange: []string{"1.1.1.1/24", "1234:abcd::42/32"}, - IPStrategy: &types.IPStrategy{ - ExcludedIPs: []string{}, - }, - }, - }, - }, - }, - }, - { - desc: "all parameters", - kvPairs: filler("traefik", - backend("backend1", - withPair(pathBackendCircuitBreakerExpression, label.DefaultCircuitBreakerExpression), - withPair(pathBackendLoadBalancerMethod, "drr"), - withPair(pathBackendLoadBalancerStickiness, "true"), - withPair(pathBackendLoadBalancerStickinessCookieName, "tomate"), - withPair(pathBackendHealthCheckScheme, "http"), - withPair(pathBackendHealthCheckPath, "/health"), - withPair(pathBackendHealthCheckPort, "80"), - withPair(pathBackendHealthCheckInterval, "30s"), - withPair(pathBackendHealthCheckTimeout, "5s"), - withPair(pathBackendHealthCheckHostname, "foo.com"), - withPair(pathBackendHealthCheckHeaders+"Foo", "bar"), - withPair(pathBackendHealthCheckHeaders+"Bar", "foo"), - withPair(pathBackendMaxConnAmount, "5"), - withPair(pathBackendMaxConnExtractorFunc, "client.ip"), - withPair(pathBackendBufferingMaxResponseBodyBytes, "10485760"), - withPair(pathBackendBufferingMemResponseBodyBytes, "2097152"), - withPair(pathBackendBufferingMaxRequestBodyBytes, "10485760"), - withPair(pathBackendBufferingMemRequestBodyBytes, "2097152"), - withPair(pathBackendBufferingRetryExpression, "IsNetworkError() && Attempts() <= 2"), - withPair("servers/server1/url", "http://172.17.0.2:80"), - withPair("servers/server1/weight", strconv.Itoa(label.DefaultWeight)), - withPair("servers/server2/weight", strconv.Itoa(label.DefaultWeight))), - frontend("frontend1", - withPair(pathFrontendBackend, "backend1"), - withPair(pathFrontendPriority, "6"), - withPair(pathFrontendPassHostHeader, "false"), - - withPair(pathFrontendPassTLSClientCertPem, "true"), - withPair(pathFrontendPassTLSClientCertInfosNotBefore, "true"), - withPair(pathFrontendPassTLSClientCertInfosNotAfter, "true"), - withPair(pathFrontendPassTLSClientCertInfosSans, "true"), - withPair(pathFrontendPassTLSClientCertInfosIssuerCommonName, "true"), - withPair(pathFrontendPassTLSClientCertInfosIssuerCountry, "true"), - withPair(pathFrontendPassTLSClientCertInfosIssuerDomainComponent, "true"), - withPair(pathFrontendPassTLSClientCertInfosIssuerLocality, "true"), - withPair(pathFrontendPassTLSClientCertInfosIssuerOrganization, "true"), - withPair(pathFrontendPassTLSClientCertInfosIssuerProvince, "true"), - withPair(pathFrontendPassTLSClientCertInfosIssuerSerialNumber, "true"), - withPair(pathFrontendPassTLSClientCertInfosSubjectCommonName, "true"), - withPair(pathFrontendPassTLSClientCertInfosSubjectCountry, "true"), - withPair(pathFrontendPassTLSClientCertInfosSubjectDomainComponent, "true"), - withPair(pathFrontendPassTLSClientCertInfosSubjectLocality, "true"), - withPair(pathFrontendPassTLSClientCertInfosSubjectOrganization, "true"), - withPair(pathFrontendPassTLSClientCertInfosSubjectProvince, "true"), - withPair(pathFrontendPassTLSClientCertInfosSubjectSerialNumber, "true"), - - withPair(pathFrontendPassTLSCert, "true"), - withList(pathFrontendEntryPoints, "http", "https"), - withList(pathFrontendWhiteListSourceRange, "1.1.1.1/24", "1234:abcd::42/32"), - withPair(pathFrontendWhiteListIPStrategyDepth, "5"), - withList(pathFrontendWhiteListIPStrategyExcludedIPs, "1.1.1.1/24", "1234:abcd::42/32"), - - withPair(pathFrontendAuthBasicRemoveHeader, "true"), - withList(pathFrontendAuthBasicUsers, "test:$apr1$H6uskkkW$IgXLP6ewTrSuBkTrqE8wj/", "test2:$apr1$d9hr9HBB$4HxwgUir3HP4EsggP/QNo0"), - withPair(pathFrontendAuthBasicUsersFile, ".htpasswd"), - withPair(pathFrontendAuthDigestRemoveHeader, "true"), - withList(pathFrontendAuthDigestUsers, "test:$apr1$H6uskkkW$IgXLP6ewTrSuBkTrqE8wj/", "test2:$apr1$d9hr9HBB$4HxwgUir3HP4EsggP/QNo0"), - withPair(pathFrontendAuthDigestUsersFile, ".htpasswd"), - withPair(pathFrontendAuthForwardAddress, "auth.server"), - withPair(pathFrontendAuthForwardTrustForwardHeader, "true"), - withPair(pathFrontendAuthForwardTLSCa, "ca.crt"), - withPair(pathFrontendAuthForwardTLSCaOptional, "true"), - withPair(pathFrontendAuthForwardTLSCert, "server.crt"), - withPair(pathFrontendAuthForwardTLSKey, "server.key"), - withPair(pathFrontendAuthForwardTLSInsecureSkipVerify, "true"), - withPair(pathFrontendAuthHeaderField, "X-WebAuth-User"), - - withPair(pathFrontendRedirectEntryPoint, "https"), - withPair(pathFrontendRedirectRegex, "nope"), - withPair(pathFrontendRedirectReplacement, "nope"), - withPair(pathFrontendRedirectPermanent, "true"), - withErrorPage("foo", "error", "/test1", "500-501", "503-599"), - withErrorPage("bar", "error", "/test2", "400-405"), - withRateLimit("client.ip", - withLimit("foo", "6", "12", "18"), - withLimit("bar", "3", "6", "9")), - - withPair(pathFrontendCustomRequestHeaders+"Access-Control-Allow-Methods", "POST,GET,OPTIONS"), - withPair(pathFrontendCustomRequestHeaders+"Content-Type", "application/json; charset=utf-8"), - withPair(pathFrontendCustomRequestHeaders+"X-Custom-Header", "test"), - withPair(pathFrontendCustomResponseHeaders+"Access-Control-Allow-Methods", "POST,GET,OPTIONS"), - withPair(pathFrontendCustomResponseHeaders+"Content-Type", "application/json; charset=utf-8"), - withPair(pathFrontendCustomResponseHeaders+"X-Custom-Header", "test"), - withPair(pathFrontendSSLProxyHeaders+"Access-Control-Allow-Methods", "POST,GET,OPTIONS"), - withPair(pathFrontendSSLProxyHeaders+"Content-Type", "application/json; charset=utf-8"), - withPair(pathFrontendSSLProxyHeaders+"X-Custom-Header", "test"), - withPair(pathFrontendAllowedHosts, "example.com, ssl.example.com"), - withList(pathFrontendHostsProxyHeaders, "foo", "bar", "goo", "hor"), - withPair(pathFrontendSTSSeconds, "666"), - withPair(pathFrontendSSLHost, "foo"), - withPair(pathFrontendCustomFrameOptionsValue, "foo"), - withPair(pathFrontendContentSecurityPolicy, "foo"), - withPair(pathFrontendPublicKey, "foo"), - withPair(pathFrontendReferrerPolicy, "foo"), - withPair(pathFrontendCustomBrowserXSSValue, "foo"), - withPair(pathFrontendSSLForceHost, "true"), - withPair(pathFrontendSSLRedirect, "true"), - withPair(pathFrontendSSLTemporaryRedirect, "true"), - withPair(pathFrontendSTSIncludeSubdomains, "true"), - withPair(pathFrontendSTSPreload, "true"), - withPair(pathFrontendForceSTSHeader, "true"), - withPair(pathFrontendFrameDeny, "true"), - withPair(pathFrontendContentTypeNosniff, "true"), - withPair(pathFrontendBrowserXSSFilter, "true"), - withPair(pathFrontendIsDevelopment, "true"), - - withPair("routes/route1/rule", "Host:test.localhost"), - withPair("routes/route2/rule", "Path:/foo")), - entry("tls/foo", - withList("entrypoints", "http", "https"), - withPair("certificate/certfile", "certfile1"), - withPair("certificate/keyfile", "keyfile1")), - entry("tls/bar", - withList("entrypoints", "http", "https"), - withPair("certificate/certfile", "certfile2"), - withPair("certificate/keyfile", "keyfile2")), - ), - expected: &types.Configuration{ - Backends: map[string]*types.Backend{ - "backend1": { - Servers: map[string]types.Server{ - "server1": { - URL: "http://172.17.0.2:80", - Weight: label.DefaultWeight, - }, - }, - CircuitBreaker: &types.CircuitBreaker{ - Expression: "NetworkErrorRatio() > 1", - }, - LoadBalancer: &types.LoadBalancer{ - Method: "drr", - Stickiness: &types.Stickiness{ - CookieName: "tomate", - }, - }, - MaxConn: &types.MaxConn{ - Amount: 5, - ExtractorFunc: "client.ip", - }, - HealthCheck: &types.HealthCheck{ - Scheme: "http", - Path: "/health", - Port: 80, - Interval: "30s", - Timeout: "5s", - Hostname: "foo.com", - Headers: map[string]string{ - "Foo": "bar", - "Bar": "foo", - }, - }, - Buffering: &types.Buffering{ - MaxResponseBodyBytes: 10485760, - MemResponseBodyBytes: 2097152, - MaxRequestBodyBytes: 10485760, - MemRequestBodyBytes: 2097152, - RetryExpression: "IsNetworkError() && Attempts() <= 2", - }, - }, - }, - Frontends: map[string]*types.Frontend{ - "frontend1": { - Priority: 6, - EntryPoints: []string{"http", "https"}, - Backend: "backend1", - PassTLSCert: true, - WhiteList: &types.WhiteList{ - SourceRange: []string{"1.1.1.1/24", "1234:abcd::42/32"}, - IPStrategy: &types.IPStrategy{ - Depth: 5, - ExcludedIPs: []string{"1.1.1.1/24", "1234:abcd::42/32"}, - }, - }, - PassTLSClientCert: &types.TLSClientHeaders{ - PEM: true, - Infos: &types.TLSClientCertificateInfos{ - NotBefore: true, - Sans: true, - NotAfter: true, - Subject: &types.TLSCLientCertificateDNInfos{ - CommonName: true, - Country: true, - DomainComponent: true, - Locality: true, - Organization: true, - Province: true, - SerialNumber: true, - }, - Issuer: &types.TLSCLientCertificateDNInfos{ - CommonName: true, - Country: true, - DomainComponent: true, - Locality: true, - Organization: true, - Province: true, - SerialNumber: true, - }, - }, - }, - Auth: &types.Auth{ - HeaderField: "X-WebAuth-User", - Basic: &types.Basic{ - RemoveHeader: true, - Users: []string{"test:$apr1$H6uskkkW$IgXLP6ewTrSuBkTrqE8wj/", - "test2:$apr1$d9hr9HBB$4HxwgUir3HP4EsggP/QNo0"}, - UsersFile: ".htpasswd", - }, - }, - Redirect: &types.Redirect{ - EntryPoint: "https", - Permanent: true, - }, - Errors: map[string]*types.ErrorPage{ - "foo": { - Backend: "error", - Query: "/test1", - Status: []string{"500-501", "503-599"}, - }, - "bar": { - Backend: "error", - Query: "/test2", - Status: []string{"400-405"}, - }, - }, - RateLimit: &types.RateLimit{ - ExtractorFunc: "client.ip", - RateSet: map[string]*types.Rate{ - "foo": { - Average: 6, - Burst: 12, - Period: parse.Duration(18 * time.Second), - }, - "bar": { - Average: 3, - Burst: 6, - Period: parse.Duration(9 * time.Second), - }, - }, - }, - Routes: map[string]types.Route{ - "route1": { - Rule: "Host:test.localhost", - }, - "route2": { - Rule: "Path:/foo", - }, - }, - Headers: &types.Headers{ - CustomRequestHeaders: map[string]string{ - "Access-Control-Allow-Methods": "POST,GET,OPTIONS", - "Content-Type": "application/json; charset=utf-8", - "X-Custom-Header": "test", - }, - CustomResponseHeaders: map[string]string{ - "Access-Control-Allow-Methods": "POST,GET,OPTIONS", - "Content-Type": "application/json; charset=utf-8", - "X-Custom-Header": "test", - }, - SSLProxyHeaders: map[string]string{ - "Access-Control-Allow-Methods": "POST,GET,OPTIONS", - "Content-Type": "application/json; charset=utf-8", - "X-Custom-Header": "test", - }, - AllowedHosts: []string{"example.com", "ssl.example.com"}, - HostsProxyHeaders: []string{"foo", "bar", "goo", "hor"}, - STSSeconds: 666, - SSLHost: "foo", - CustomFrameOptionsValue: "foo", - ContentSecurityPolicy: "foo", - PublicKey: "foo", - ReferrerPolicy: "foo", - CustomBrowserXSSValue: "foo", - SSLForceHost: true, - SSLRedirect: true, - SSLTemporaryRedirect: true, - STSIncludeSubdomains: true, - STSPreload: true, - ForceSTSHeader: true, - FrameDeny: true, - ContentTypeNosniff: true, - BrowserXSSFilter: true, - IsDevelopment: true, - }, - }, - }, - TLS: []*tls.Configuration{ - { - Stores: []string{"http", "https"}, - Certificate: &tls.Certificate{ - CertFile: "certfile2", - KeyFile: "keyfile2", - }, - }, - { - Stores: []string{"http", "https"}, - Certificate: &tls.Certificate{ - CertFile: "certfile1", - KeyFile: "keyfile1", - }, - }, - }, - }, - }, - } - - for _, test := range testCases { - test := test - t.Run(test.desc, func(t *testing.T) { - t.Parallel() - - p := &Provider{ - Prefix: "traefik", - kvClient: &Mock{ - KVPairs: test.kvPairs, - }, - } - - actual := p.buildConfiguration() - assert.NotNil(t, actual) - - assert.EqualValues(t, test.expected.Backends, actual.Backends) - assert.EqualValues(t, test.expected.Frontends, actual.Frontends) - assert.EqualValues(t, test.expected, actual) - }) - } -} - -func TestProviderList(t *testing.T) { - t.Skip("old tests") - testCases := []struct { - desc string - kvPairs []*store.KVPair - kvError error - keyParts []string - expected []string - }{ - { - desc: "empty key parts and empty store", - keyParts: []string{}, - expected: []string{}, - }, - { - desc: "when non existing key and empty store", - keyParts: []string{"traefik"}, - expected: []string{}, - }, - { - desc: "when non existing key", - kvPairs: []*store.KVPair{ - aKVPair("foo", "bar"), - }, - keyParts: []string{"bar"}, - expected: []string{}, - }, - { - desc: "when one key", - kvPairs: []*store.KVPair{ - aKVPair("foo", "bar"), - }, - keyParts: []string{"foo"}, - expected: []string{"foo"}, - }, - { - desc: "when multiple sub keys and nested sub key", - kvPairs: []*store.KVPair{ - aKVPair("foo/baz/1", "bar"), - aKVPair("foo/baz/2", "bar"), - aKVPair("foo/baz/biz/1", "bar"), - }, - keyParts: []string{"foo", "/baz/"}, - expected: []string{"foo/baz/1", "foo/baz/2"}, - }, - { - desc: "when KV error", - kvError: store.ErrNotReachable, - kvPairs: []*store.KVPair{ - aKVPair("foo/baz/1", "bar1"), - aKVPair("foo/baz/2", "bar2"), - aKVPair("foo/baz/biz/1", "bar3"), - }, - keyParts: []string{"foo/baz/1"}, - expected: nil, - }, - } - - for _, test := range testCases { - test := test - t.Run(test.desc, func(t *testing.T) { - t.Parallel() - - p := &Provider{ - kvClient: newKvClientMock(test.kvPairs, test.kvError), - } - - actual := p.list(test.keyParts...) - - sort.Strings(test.expected) - assert.Equal(t, test.expected, actual, "key: %v", test.keyParts) - }) - } -} - -func TestProviderGet(t *testing.T) { - t.Skip("old tests") - testCases := []struct { - desc string - kvPairs []*store.KVPair - storeType store.Backend - keyParts []string - defaultValue string - kvError error - expected string - }{ - { - desc: "when empty key parts, empty store", - defaultValue: "circle", - keyParts: []string{}, - expected: "circle", - }, - { - desc: "when non existing key", - defaultValue: "circle", - kvPairs: []*store.KVPair{ - aKVPair("foo", "bar"), - }, - keyParts: []string{"bar"}, - expected: "circle", - }, - { - desc: "when one part key", - kvPairs: []*store.KVPair{ - aKVPair("foo", "bar"), - }, - keyParts: []string{"foo"}, - expected: "bar", - }, - { - desc: "when several parts key", - kvPairs: []*store.KVPair{ - aKVPair("foo/baz/1", "bar1"), - aKVPair("foo/baz/2", "bar2"), - aKVPair("foo/baz/biz/1", "bar3"), - }, - keyParts: []string{"foo", "/baz/", "2"}, - expected: "bar2", - }, - { - desc: "when several parts key, starts with /", - defaultValue: "circle", - kvPairs: []*store.KVPair{ - aKVPair("foo/baz/1", "bar1"), - aKVPair("foo/baz/2", "bar2"), - aKVPair("foo/baz/biz/1", "bar3"), - }, - keyParts: []string{"/foo", "/baz/", "2"}, - expected: "circle", - }, - { - desc: "when several parts key starts with /, ETCD v2", - storeType: store.ETCD, - kvPairs: []*store.KVPair{ - aKVPair("foo/baz/1", "bar1"), - aKVPair("foo/baz/2", "bar2"), - aKVPair("foo/baz/biz/1", "bar3"), - }, - keyParts: []string{"/foo", "/baz/", "2"}, - expected: "bar2", - }, - { - desc: "when KV error", - kvError: store.ErrNotReachable, - kvPairs: []*store.KVPair{ - aKVPair("foo/baz/1", "bar1"), - aKVPair("foo/baz/2", "bar2"), - aKVPair("foo/baz/biz/1", "bar3"), - }, - keyParts: []string{"foo/baz/1"}, - expected: "", - }, - } - - for _, test := range testCases { - test := test - t.Run(test.desc, func(t *testing.T) { - t.Parallel() - - p := &Provider{ - kvClient: newKvClientMock(test.kvPairs, test.kvError), - storeType: test.storeType, - } - - actual := p.get(test.defaultValue, test.keyParts...) - - assert.Equal(t, test.expected, actual, "key %v", test.keyParts) - }) - } -} - -func TestProviderLast(t *testing.T) { - t.Skip("old tests") - p := &Provider{} - - testCases := []struct { - key string - expected string - }{ - { - key: "", - expected: "", - }, - { - key: "foo", - expected: "foo", - }, - { - key: "foo/bar", - expected: "bar", - }, - { - key: "foo/bar/baz", - expected: "baz", - }, - // FIXME is this wanted ? - { - key: "foo/bar/", - expected: "", - }, - } - - for i, test := range testCases { - test := test - t.Run(strconv.Itoa(i), func(t *testing.T) { - t.Parallel() - - actual := p.last(test.key) - - assert.Equal(t, test.expected, actual) - }) - } -} - -func TestProviderSplitGet(t *testing.T) { - t.Skip("old tests") - testCases := []struct { - desc string - kvPairs []*store.KVPair - kvError error - keyParts []string - expected []string - }{ - { - desc: "when has value", - kvPairs: filler("traefik", - frontend("foo", - withPair("bar", "courgette, carotte, tomate, aubergine"), - ), - ), - keyParts: []string{"traefik/frontends/foo/bar"}, - expected: []string{"courgette", "carotte", "tomate", "aubergine"}, - }, - { - desc: "when empty value", - kvPairs: filler("traefik", - frontend("foo", - withPair("bar", ""), - ), - ), - keyParts: []string{"traefik/frontends/foo/bar"}, - expected: nil, - }, - { - desc: "when not existing key", - kvPairs: nil, - keyParts: []string{"traefik/frontends/foo/bar"}, - expected: nil, - }, - { - desc: "when KV error", - kvError: store.ErrNotReachable, - kvPairs: filler("traefik", - frontend("foo", - withPair("bar", ""), - ), - ), - keyParts: []string{"traefik/frontends/foo/bar"}, - expected: nil, - }, - } - - for _, test := range testCases { - test := test - t.Run(test.desc, func(t *testing.T) { - t.Parallel() - - p := &Provider{ - kvClient: newKvClientMock(test.kvPairs, test.kvError), - } - - values := p.splitGet(test.keyParts...) - - assert.Equal(t, test.expected, values, "key: %v", test.keyParts) - }) - } -} - -func TestProviderGetList(t *testing.T) { - t.Skip("old tests") - testCases := []struct { - desc string - kvPairs []*store.KVPair - kvError error - keyParts []string - expected []string - }{ - { - desc: "comma separated", - kvPairs: filler("traefik", - frontend("foo", - withPair("entrypoints", "courgette, carotte, tomate, aubergine"), - ), - ), - keyParts: []string{"traefik/frontends/foo/entrypoints"}, - expected: []string{"courgette", "carotte", "tomate", "aubergine"}, - }, - { - desc: "multiple entries", - kvPairs: filler("traefik", - frontend("foo", - withPair("entrypoints/0", "courgette"), - withPair("entrypoints/1", "carotte"), - withPair("entrypoints/2", "tomate"), - withPair("entrypoints/3", "aubergine"), - ), - ), - keyParts: []string{"traefik/frontends/foo/entrypoints"}, - expected: []string{"courgette", "carotte", "tomate", "aubergine"}, - }, - { - desc: "when empty value", - kvPairs: filler("traefik", - frontend("foo", - withPair("bar", ""), - ), - ), - keyParts: []string{"traefik/frontends/foo/entrypoints"}, - expected: nil, - }, - { - desc: "when not existing key", - kvPairs: nil, - keyParts: []string{"traefik/frontends/foo/entrypoints"}, - expected: nil, - }, - { - desc: "when KV error", - kvError: store.ErrNotReachable, - kvPairs: filler("traefik", - frontend("foo", - withPair("bar", ""), - ), - ), - keyParts: []string{"traefik/frontends/foo/entrypoints"}, - expected: nil, - }, - } - - for _, test := range testCases { - test := test - t.Run(test.desc, func(t *testing.T) { - t.Parallel() - - p := &Provider{ - kvClient: newKvClientMock(test.kvPairs, test.kvError), - } - - values := p.getList(test.keyParts...) - - assert.Equal(t, test.expected, values, "key: %v", test.keyParts) - }) - } -} - -func TestProviderGetSlice(t *testing.T) { - t.Skip("old tests") - testCases := []struct { - desc string - kvPairs []*store.KVPair - kvError error - keyParts []string - expected []string - }{ - { - desc: "multiple entries", - kvPairs: filler("traefik", - frontend("foo", - withList("entrypoints", "courgette", "carotte", "tomate", "aubergine"), - ), - ), - keyParts: []string{"traefik/frontends/foo/entrypoints"}, - expected: []string{"courgette", "carotte", "tomate", "aubergine"}, - }, - { - desc: "comma separated", - kvPairs: filler("traefik", - frontend("foo", - withPair("entrypoints", "courgette, carotte, tomate, aubergine"), - ), - ), - keyParts: []string{"traefik/frontends/foo/entrypoints"}, - expected: nil, - }, - { - desc: "when empty value", - kvPairs: filler("traefik", - frontend("foo", - withPair("bar", ""), - ), - ), - keyParts: []string{"traefik/frontends/foo/entrypoints"}, - expected: nil, - }, - { - desc: "when not existing key", - kvPairs: nil, - keyParts: []string{"traefik/frontends/foo/entrypoints"}, - expected: nil, - }, - { - desc: "when KV error", - kvError: store.ErrNotReachable, - kvPairs: filler("traefik", - frontend("foo", - withPair("bar", ""), - ), - ), - keyParts: []string{"traefik/frontends/foo/entrypoints"}, - expected: nil, - }, - } - - for _, test := range testCases { - test := test - t.Run(test.desc, func(t *testing.T) { - t.Parallel() - - p := &Provider{ - kvClient: newKvClientMock(test.kvPairs, test.kvError), - } - - values := p.getSlice(test.keyParts...) - - assert.Equal(t, test.expected, values, "key: %v", test.keyParts) - }) - } -} - -func TestProviderGetBool(t *testing.T) { - t.Skip("old tests") - testCases := []struct { - desc string - kvPairs []*store.KVPair - kvError error - keyParts []string - expected bool - }{ - { - desc: "when value is 'true", - kvPairs: filler("traefik", - frontend("foo", - withPair("bar", "true"), - ), - ), - keyParts: []string{"traefik/frontends/foo/bar"}, - expected: true, - }, - { - desc: "when value is 'false", - kvPairs: filler("traefik", - frontend("foo", - withPair("bar", "false"), - ), - ), - keyParts: []string{"traefik/frontends/foo/bar"}, - expected: false, - }, - { - desc: "when empty value", - kvPairs: filler("traefik", - frontend("foo", - withPair("bar", ""), - ), - ), - keyParts: []string{"traefik/frontends/foo/bar"}, - expected: false, - }, - { - desc: "when not existing key", - kvPairs: nil, - keyParts: []string{"traefik/frontends/foo/bar"}, - expected: false, - }, - { - desc: "when KV error", - kvError: store.ErrNotReachable, - kvPairs: filler("traefik", - frontend("foo", - withPair("bar", "true"), - ), - ), - keyParts: []string{"traefik/frontends/foo/bar"}, - expected: false, - }, - } - - for i, test := range testCases { - test := test - t.Run(strconv.Itoa(i), func(t *testing.T) { - t.Parallel() - - p := &Provider{ - kvClient: newKvClientMock(test.kvPairs, test.kvError), - } - - actual := p.getBool(false, test.keyParts...) - - assert.Equal(t, test.expected, actual, "key: %v", test.keyParts) - }) - } -} - -func TestProviderGetInt(t *testing.T) { - t.Skip("old tests") - defaultValue := 666 - - testCases := []struct { - desc string - kvPairs []*store.KVPair - kvError error - keyParts []string - expected int - }{ - { - desc: "when has value", - kvPairs: filler("traefik", - frontend("foo", - withPair("bar", "6"), - ), - ), - keyParts: []string{"traefik/frontends/foo/bar"}, - expected: 6, - }, - { - desc: "when empty value", - kvPairs: filler("traefik", - frontend("foo", - withPair("bar", ""), - ), - ), - keyParts: []string{"traefik/frontends/foo/bar"}, - expected: defaultValue, - }, - { - desc: "when not existing key", - kvPairs: nil, - keyParts: []string{"traefik/frontends/foo/bar"}, - expected: defaultValue, - }, - { - desc: "when KV error", - kvError: store.ErrNotReachable, - kvPairs: filler("traefik", - frontend("foo", - withPair("bar", "true"), - ), - ), - keyParts: []string{"traefik/frontends/foo/bar"}, - expected: defaultValue, - }, - } - - for i, test := range testCases { - test := test - t.Run(strconv.Itoa(i), func(t *testing.T) { - t.Parallel() - - p := &Provider{ - kvClient: newKvClientMock(test.kvPairs, test.kvError), - } - - actual := p.getInt(defaultValue, test.keyParts...) - - assert.Equal(t, test.expected, actual, "key: %v", test.keyParts) - }) - } -} - -func TestProviderGetInt64(t *testing.T) { - t.Skip("old tests") - var defaultValue int64 = 666 - - testCases := []struct { - desc string - kvPairs []*store.KVPair - kvError error - keyParts []string - expected int64 - }{ - { - desc: "when has value", - kvPairs: filler("traefik", - frontend("foo", - withPair("bar", "6"), - ), - ), - keyParts: []string{"traefik/frontends/foo/bar"}, - expected: 6, - }, - { - desc: "when empty value", - kvPairs: filler("traefik", - frontend("foo", - withPair("bar", ""), - ), - ), - keyParts: []string{"traefik/frontends/foo/bar"}, - expected: defaultValue, - }, - { - desc: "when not existing key", - kvPairs: nil, - keyParts: []string{"traefik/frontends/foo/bar"}, - expected: defaultValue, - }, - { - desc: "when KV error", - kvError: store.ErrNotReachable, - kvPairs: filler("traefik", - frontend("foo", - withPair("bar", "true"), - ), - ), - keyParts: []string{"traefik/frontends/foo/bar"}, - expected: defaultValue, - }, - } - - for i, test := range testCases { - test := test - t.Run(strconv.Itoa(i), func(t *testing.T) { - t.Parallel() - - p := &Provider{ - kvClient: newKvClientMock(test.kvPairs, test.kvError), - } - - actual := p.getInt64(defaultValue, test.keyParts...) - - assert.Equal(t, test.expected, actual, "key: %v", test.keyParts) - }) - } -} - -func TestProviderGetMap(t *testing.T) { - t.Skip("old tests") - testCases := []struct { - desc string - keyParts []string - kvPairs []*store.KVPair - expected map[string]string - }{ - { - desc: "when several keys", - keyParts: []string{"traefik/frontends/foo", pathFrontendCustomRequestHeaders}, - kvPairs: filler("traefik", - frontend("foo", - withPair(pathFrontendCustomRequestHeaders+"Access-Control-Allow-Methods", "POST,GET,OPTIONS"), - withPair(pathFrontendCustomRequestHeaders+"Content-Type", "application/json; charset=utf-8"), - withPair(pathFrontendCustomRequestHeaders+"X-Custom-Header", "test"), - ), - ), - expected: map[string]string{ - "Access-Control-Allow-Methods": "POST,GET,OPTIONS", - "Content-Type": "application/json; charset=utf-8", - "X-Custom-Header": "test", - }, - }, - { - desc: "when no keys", - keyParts: []string{"traefik/frontends/foo", pathFrontendCustomRequestHeaders}, - kvPairs: filler("traefik", frontend("foo")), - expected: nil, - }, - } - - for _, test := range testCases { - test := test - t.Run(test.desc, func(t *testing.T) { - t.Parallel() - - p := newProviderMock(test.kvPairs) - - result := p.getMap(test.keyParts...) - - assert.EqualValues(t, test.expected, result) - }) - } -} - -func TestProviderHasStickinessLabel(t *testing.T) { - t.Skip("old tests") - testCases := []struct { - desc string - kvPairs []*store.KVPair - rootPath string - expected bool - }{ - { - desc: "without option", - expected: false, - }, - { - desc: "with cookie name without stickiness=true", - rootPath: "traefik/backends/foo", - kvPairs: filler("traefik", - backend("foo", - withPair(pathBackendLoadBalancerStickinessCookieName, "aubergine"), - ), - ), - expected: false, - }, - { - desc: "stickiness=true", - rootPath: "traefik/backends/foo", - kvPairs: filler("traefik", - backend("foo", - withPair(pathBackendLoadBalancerStickiness, "true"), - ), - ), - expected: true, - }, - } - - for _, test := range testCases { - test := test - t.Run(test.desc, func(t *testing.T) { - t.Parallel() - - p := &Provider{ - kvClient: &Mock{ - KVPairs: test.kvPairs, - }, - } - - actual := p.getLoadBalancer(test.rootPath).Stickiness != nil - - if actual != test.expected { - t.Fatalf("expected %v, got %v", test.expected, actual) - } - }) - } -} - -func TestWhiteList(t *testing.T) { - t.Skip("old tests") - testCases := []struct { - desc string - rootPath string - kvPairs []*store.KVPair - expected *types.WhiteList - }{ - { - desc: "should return nil when no white list labels", - rootPath: "traefik/frontends/foo", - expected: nil, - }, - { - desc: "should return a struct when only range", - rootPath: "traefik/frontends/foo", - kvPairs: filler("traefik", - frontend("foo", - withPair(pathFrontendWhiteListSourceRange, "10.10.10.10"))), - expected: &types.WhiteList{ - SourceRange: []string{ - "10.10.10.10", - }, - }, - }, - } - - for _, test := range testCases { - test := test - t.Run(test.desc, func(t *testing.T) { - t.Parallel() - - p := newProviderMock(test.kvPairs) - - actual := p.getWhiteList(test.rootPath) - assert.Equal(t, test.expected, actual) - }) - } -} - -func TestProviderGetRedirect(t *testing.T) { - t.Skip("old tests") - testCases := []struct { - desc string - rootPath string - kvPairs []*store.KVPair - expected *types.Redirect - }{ - { - desc: "should use entry point when entry point key is valued in the store", - rootPath: "traefik/frontends/foo", - kvPairs: filler("traefik", - frontend("foo", - withPair(pathFrontendRedirectEntryPoint, "https"))), - expected: &types.Redirect{ - EntryPoint: "https", - }, - }, - { - desc: "should use entry point when entry point key is valued in the store (permanent)", - rootPath: "traefik/frontends/foo", - kvPairs: filler("traefik", - frontend("foo", - withPair(pathFrontendRedirectEntryPoint, "https"), - withPair(pathFrontendRedirectPermanent, "true"))), - expected: &types.Redirect{ - EntryPoint: "https", - Permanent: true, - }, - }, - { - desc: "should use regex when regex keys are valued in the store", - rootPath: "traefik/frontends/foo", - kvPairs: filler("traefik", - frontend("foo", - withPair(pathFrontendRedirectRegex, "(.*)"), - withPair(pathFrontendRedirectReplacement, "$1"))), - expected: &types.Redirect{ - Regex: "(.*)", - Replacement: "$1", - }, - }, - { - desc: "should use regex when regex keys are valued in the store (permanent)", - rootPath: "traefik/frontends/foo", - kvPairs: filler("traefik", - frontend("foo", - withPair(pathFrontendRedirectRegex, "(.*)"), - withPair(pathFrontendRedirectReplacement, "$1"), - withPair(pathFrontendRedirectPermanent, "true"))), - expected: &types.Redirect{ - Regex: "(.*)", - Replacement: "$1", - Permanent: true, - }, - }, - { - desc: "should only use entry point when entry point and regex base are valued in the store", - rootPath: "traefik/frontends/foo", - kvPairs: filler("traefik", - frontend("foo", - withPair(pathFrontendRedirectEntryPoint, "https"), - withPair(pathFrontendRedirectRegex, "nope"), - withPair(pathFrontendRedirectReplacement, "nope"))), - expected: &types.Redirect{ - EntryPoint: "https", - }, - }, - { - desc: "should return when redirect keys are not valued in the store", - rootPath: "traefik/frontends/foo", - kvPairs: filler("traefik", frontend("foo")), - expected: nil, - }, - } - - for _, test := range testCases { - test := test - t.Run(test.desc, func(t *testing.T) { - t.Parallel() - - p := newProviderMock(test.kvPairs) - - actual := p.getRedirect(test.rootPath) - - assert.Equal(t, test.expected, actual) - }) - } -} - -func TestProviderGetErrorPages(t *testing.T) { - t.Skip("old tests") - testCases := []struct { - desc string - rootPath string - kvPairs []*store.KVPair - expected map[string]*types.ErrorPage - }{ - { - desc: "2 errors pages", - rootPath: "traefik/frontends/foo", - kvPairs: filler("traefik", - frontend("foo", - withErrorPage("foo", "error", "/test1", "500-501", "503-599"), - withErrorPage("bar", "error", "/test2", "400-405"))), - expected: map[string]*types.ErrorPage{ - "foo": { - Backend: "error", - Query: "/test1", - Status: []string{"500-501", "503-599"}, - }, - "bar": { - Backend: "error", - Query: "/test2", - Status: []string{"400-405"}, - }, - }, - }, - { - desc: "return nil when no errors pages", - rootPath: "traefik/frontends/foo", - kvPairs: filler("traefik", frontend("foo")), - expected: nil, - }, - } - - for _, test := range testCases { - test := test - t.Run(test.desc, func(t *testing.T) { - t.Parallel() - - p := newProviderMock(test.kvPairs) - - actual := p.getErrorPages(test.rootPath) - - assert.Equal(t, test.expected, actual) - }) - } -} - -func TestProviderGetRateLimit(t *testing.T) { - t.Skip("old tests") - testCases := []struct { - desc string - rootPath string - kvPairs []*store.KVPair - expected *types.RateLimit - }{ - { - desc: "with several limits", - rootPath: "traefik/frontends/foo", - kvPairs: filler("traefik", - frontend("foo", - withRateLimit("client.ip", - withLimit("foo", "6", "12", "18"), - withLimit("bar", "3", "6", "9")))), - expected: &types.RateLimit{ - ExtractorFunc: "client.ip", - RateSet: map[string]*types.Rate{ - "foo": { - Average: 6, - Burst: 12, - Period: parse.Duration(18 * time.Second), - }, - "bar": { - Average: 3, - Burst: 6, - Period: parse.Duration(9 * time.Second), - }, - }, - }, - }, - { - desc: "return nil when no extractor func", - rootPath: "traefik/frontends/foo", - kvPairs: filler("traefik", - frontend("foo", - withRateLimit("", - withLimit("foo", "6", "12", "18"), - withLimit("bar", "3", "6", "9")))), - expected: nil, - }, - { - desc: "return nil when no rate limit keys", - rootPath: "traefik/frontends/foo", - kvPairs: filler("traefik", frontend("foo")), - expected: nil, - }, - } - - for _, test := range testCases { - test := test - t.Run(test.desc, func(t *testing.T) { - t.Parallel() - - p := newProviderMock(test.kvPairs) - - actual := p.getRateLimit(test.rootPath) - - assert.Equal(t, test.expected, actual) - }) - } -} - -func TestProviderGetHeaders(t *testing.T) { - t.Skip("old tests") - testCases := []struct { - desc string - rootPath string - kvPairs []*store.KVPair - expected *types.Headers - }{ - { - desc: "Custom Request Headers", - rootPath: "traefik/frontends/foo", - kvPairs: filler("traefik", - frontend("foo", - withPair(pathFrontendCustomRequestHeaders+"Access-Control-Allow-Methods", "POST,GET,OPTIONS"), - withPair(pathFrontendCustomRequestHeaders+"Content-Type", "application/json; charset=utf-8"), - withPair(pathFrontendCustomRequestHeaders+"X-Custom-Header", "test"))), - expected: &types.Headers{ - CustomRequestHeaders: map[string]string{ - "Access-Control-Allow-Methods": "POST,GET,OPTIONS", - "Content-Type": "application/json; charset=utf-8", - "X-Custom-Header": "test", - }, - }, - }, - { - desc: "Custom esponse Headers", - rootPath: "traefik/frontends/foo", - kvPairs: filler("traefik", - frontend("foo", - withPair(pathFrontendCustomResponseHeaders+"Access-Control-Allow-Methods", "POST,GET,OPTIONS"), - withPair(pathFrontendCustomResponseHeaders+"Content-Type", "application/json; charset=utf-8"), - withPair(pathFrontendCustomResponseHeaders+"X-Custom-Header", "test"))), - expected: &types.Headers{ - CustomResponseHeaders: map[string]string{ - "Access-Control-Allow-Methods": "POST,GET,OPTIONS", - "Content-Type": "application/json; charset=utf-8", - "X-Custom-Header": "test", - }, - }, - }, - { - desc: "SSL Proxy Headers", - rootPath: "traefik/frontends/foo", - kvPairs: filler("traefik", - frontend("foo", - withPair(pathFrontendSSLProxyHeaders+"Access-Control-Allow-Methods", "POST,GET,OPTIONS"), - withPair(pathFrontendSSLProxyHeaders+"Content-Type", "application/json; charset=utf-8"), - withPair(pathFrontendSSLProxyHeaders+"X-Custom-Header", "test"))), - expected: &types.Headers{ - SSLProxyHeaders: map[string]string{ - "Access-Control-Allow-Methods": "POST,GET,OPTIONS", - "Content-Type": "application/json; charset=utf-8", - "X-Custom-Header": "test", - }, - }, - }, - { - desc: "Allowed Hosts", - rootPath: "traefik/frontends/foo", - kvPairs: filler("traefik", - frontend("foo", - withPair(pathFrontendAllowedHosts, "foo, bar, goo, hor"))), - expected: &types.Headers{ - AllowedHosts: []string{"foo", "bar", "goo", "hor"}, - }, - }, - { - desc: "Hosts Proxy Headers", - rootPath: "traefik/frontends/foo", - kvPairs: filler("traefik", - frontend("foo", - withPair(pathFrontendHostsProxyHeaders, "foo, bar, goo, hor"))), - expected: &types.Headers{ - HostsProxyHeaders: []string{"foo", "bar", "goo", "hor"}, - }, - }, - { - desc: "SSL Redirect", - rootPath: "traefik/frontends/foo", - kvPairs: filler("traefik", - frontend("foo", - withPair(pathFrontendSSLRedirect, "true"))), - expected: &types.Headers{ - SSLRedirect: true, - }, - }, - { - desc: "SSL Temporary Redirect", - rootPath: "traefik/frontends/foo", - kvPairs: filler("traefik", - frontend("foo", - withPair(pathFrontendSSLTemporaryRedirect, "true"))), - expected: &types.Headers{ - SSLTemporaryRedirect: true, - }, - }, - { - desc: "SSL Host", - rootPath: "traefik/frontends/foo", - kvPairs: filler("traefik", - frontend("foo", - withPair(pathFrontendSSLHost, "foo"))), - expected: &types.Headers{ - SSLHost: "foo", - }, - }, - { - desc: "STS Seconds", - rootPath: "traefik/frontends/foo", - kvPairs: filler("traefik", - frontend("foo", - withPair(pathFrontendSTSSeconds, "666"))), - expected: &types.Headers{ - STSSeconds: 666, - }, - }, - { - desc: "STS Include Subdomains", - rootPath: "traefik/frontends/foo", - kvPairs: filler("traefik", - frontend("foo", - withPair(pathFrontendSTSIncludeSubdomains, "true"))), - expected: &types.Headers{ - STSIncludeSubdomains: true, - }, - }, - { - desc: "STS Preload", - rootPath: "traefik/frontends/foo", - kvPairs: filler("traefik", - frontend("foo", - withPair(pathFrontendSTSPreload, "true"))), - expected: &types.Headers{ - STSPreload: true, - }, - }, - { - desc: "Force STS Header", - rootPath: "traefik/frontends/foo", - kvPairs: filler("traefik", - frontend("foo", - withPair(pathFrontendForceSTSHeader, "true"))), - expected: &types.Headers{ - ForceSTSHeader: true, - }, - }, - { - desc: "Frame Deny", - rootPath: "traefik/frontends/foo", - kvPairs: filler("traefik", - frontend("foo", - withPair(pathFrontendFrameDeny, "true"))), - expected: &types.Headers{ - FrameDeny: true, - }, - }, - { - desc: "Custom Frame Options Value", - rootPath: "traefik/frontends/foo", - kvPairs: filler("traefik", - frontend("foo", - withPair(pathFrontendCustomFrameOptionsValue, "foo"))), - expected: &types.Headers{ - CustomFrameOptionsValue: "foo", - }, - }, - { - desc: "Content Type Nosniff", - rootPath: "traefik/frontends/foo", - kvPairs: filler("traefik", - frontend("foo", - withPair(pathFrontendContentTypeNosniff, "true"))), - expected: &types.Headers{ - ContentTypeNosniff: true, - }, - }, - { - desc: "Browser XSS Filter", - rootPath: "traefik/frontends/foo", - kvPairs: filler("traefik", - frontend("foo", - withPair(pathFrontendBrowserXSSFilter, "true"))), - expected: &types.Headers{ - BrowserXSSFilter: true, - }, - }, - { - desc: "Custom Browser XSS Value", - rootPath: "traefik/frontends/foo", - kvPairs: filler("traefik", - frontend("foo", - withPair(pathFrontendCustomBrowserXSSValue, "foo"))), - expected: &types.Headers{ - CustomBrowserXSSValue: "foo", - }, - }, - { - desc: "Content Security Policy", - rootPath: "traefik/frontends/foo", - kvPairs: filler("traefik", - frontend("foo", - withPair(pathFrontendContentSecurityPolicy, "foo"))), - expected: &types.Headers{ - ContentSecurityPolicy: "foo", - }, - }, - { - desc: "Public Key", - rootPath: "traefik/frontends/foo", - kvPairs: filler("traefik", - frontend("foo", - withPair(pathFrontendPublicKey, "foo"))), - expected: &types.Headers{ - PublicKey: "foo", - }, - }, - { - desc: "Referrer Policy", - rootPath: "traefik/frontends/foo", - kvPairs: filler("traefik", - frontend("foo", - withPair(pathFrontendReferrerPolicy, "foo"))), - expected: &types.Headers{ - ReferrerPolicy: "foo", - }, - }, - { - desc: "Is Development", - rootPath: "traefik/frontends/foo", - kvPairs: filler("traefik", - frontend("foo", - withPair(pathFrontendIsDevelopment, "true"))), - expected: &types.Headers{ - IsDevelopment: true, - }, - }, - { - desc: "should return nil when not significant configuration", - rootPath: "traefik/frontends/foo", - kvPairs: filler("traefik", - frontend("foo", - withPair(pathFrontendIsDevelopment, "false"))), - expected: nil, - }, - { - desc: "should return nil when no headers configuration", - rootPath: "traefik/frontends/foo", - kvPairs: filler("traefik", frontend("foo")), - expected: nil, - }, - } - - for _, test := range testCases { - test := test - t.Run(test.desc, func(t *testing.T) { - t.Parallel() - - p := newProviderMock(test.kvPairs) - - headers := p.getHeaders(test.rootPath) - - assert.Equal(t, test.expected, headers) - }) - } -} - -func TestProviderGetLoadBalancer(t *testing.T) { - t.Skip("old tests") - testCases := []struct { - desc string - rootPath string - kvPairs []*store.KVPair - expected *types.LoadBalancer - }{ - { - desc: "when all keys", - rootPath: "traefik/backends/foo", - kvPairs: filler("traefik", - backend("foo", - withPair(pathBackendLoadBalancerMethod, "drr"), - withPair(pathBackendLoadBalancerStickiness, "true"), - withPair(pathBackendLoadBalancerStickinessCookieName, "aubergine"))), - expected: &types.LoadBalancer{ - Method: "drr", - Stickiness: &types.Stickiness{ - CookieName: "aubergine", - }, - }, - }, - { - desc: "when no specific configuration", - rootPath: "traefik/backends/foo", - kvPairs: filler("traefik", backend("foo")), - expected: &types.LoadBalancer{ - Method: "wrr", - }, - }, - { - desc: "when method is set", - rootPath: "traefik/backends/foo", - kvPairs: filler("traefik", - backend("foo", - withPair(pathBackendLoadBalancerMethod, "drr"))), - expected: &types.LoadBalancer{ - Method: "drr", - }, - }, - { - desc: "when stickiness is set", - rootPath: "traefik/backends/foo", - kvPairs: filler("traefik", - backend("foo", - withPair(pathBackendLoadBalancerStickiness, "true"))), - expected: &types.LoadBalancer{ - Method: "wrr", - Stickiness: &types.Stickiness{}, - }, - }, - { - desc: "when stickiness cookie name is set", - rootPath: "traefik/backends/foo", - kvPairs: filler("traefik", - backend("foo", - withPair(pathBackendLoadBalancerStickiness, "true"), - withPair(pathBackendLoadBalancerStickinessCookieName, "aubergine"))), - expected: &types.LoadBalancer{ - Method: "wrr", - Stickiness: &types.Stickiness{ - CookieName: "aubergine", - }, - }, - }, - { - desc: "when stickiness cookie name is set but not stickiness", - rootPath: "traefik/backends/foo", - kvPairs: filler("traefik", - backend("foo", - withPair(pathBackendLoadBalancerStickinessCookieName, "aubergine"))), - expected: &types.LoadBalancer{ - Method: "wrr", - }, - }, - } - - for _, test := range testCases { - test := test - t.Run(test.desc, func(t *testing.T) { - t.Parallel() - - p := newProviderMock(test.kvPairs) - - result := p.getLoadBalancer(test.rootPath) - - assert.Equal(t, test.expected, result) - }) - } -} - -func TestProviderGetCircuitBreaker(t *testing.T) { - t.Skip("old tests") - testCases := []struct { - desc string - rootPath string - kvPairs []*store.KVPair - expected *types.CircuitBreaker - }{ - { - desc: "when cb expression defined", - rootPath: "traefik/backends/foo", - kvPairs: filler("traefik", - backend("foo", - withPair(pathBackendCircuitBreakerExpression, label.DefaultCircuitBreakerExpression))), - expected: &types.CircuitBreaker{ - Expression: label.DefaultCircuitBreakerExpression, - }, - }, - { - desc: "when no cb expression", - rootPath: "traefik/backends/foo", - kvPairs: filler("traefik", backend("foo")), - expected: nil, - }, - } - - for _, test := range testCases { - test := test - t.Run(test.desc, func(t *testing.T) { - t.Parallel() - - p := newProviderMock(test.kvPairs) - - result := p.getCircuitBreaker(test.rootPath) - - assert.Equal(t, test.expected, result) - }) - } -} - -func TestProviderGetMaxConn(t *testing.T) { - t.Skip("old tests") - testCases := []struct { - desc string - rootPath string - kvPairs []*store.KVPair - expected *types.MaxConn - }{ - { - desc: "when max conn keys are defined", - rootPath: "traefik/backends/foo", - kvPairs: filler("traefik", - backend("foo", - withPair(pathBackendMaxConnAmount, "5"), - withPair(pathBackendMaxConnExtractorFunc, "client.ip"))), - expected: &types.MaxConn{ - Amount: 5, - ExtractorFunc: "client.ip", - }, - }, - { - desc: "should return nil when only extractor func is defined", - rootPath: "traefik/backends/foo", - kvPairs: filler("traefik", - backend("foo", - withPair(pathBackendMaxConnExtractorFunc, "client.ip"))), - expected: nil, - }, - { - desc: "when only amount is defined", - rootPath: "traefik/backends/foo", - kvPairs: filler("traefik", - backend("foo", - withPair(pathBackendMaxConnAmount, "5"))), - expected: &types.MaxConn{ - Amount: 5, - ExtractorFunc: "request.host", - }, - }, - } - - for _, test := range testCases { - test := test - t.Run(test.desc, func(t *testing.T) { - t.Parallel() - - p := newProviderMock(test.kvPairs) - - result := p.getMaxConn(test.rootPath) - - assert.Equal(t, test.expected, result) - }) - } -} - -func TestProviderGetHealthCheck(t *testing.T) { - t.Skip("old tests") - testCases := []struct { - desc string - rootPath string - kvPairs []*store.KVPair - expected *types.HealthCheck - }{ - { - desc: "when all configuration keys defined", - rootPath: "traefik/backends/foo", - kvPairs: filler("traefik", - backend("foo", - withPair(pathBackendHealthCheckPath, "/health"), - withPair(pathBackendHealthCheckPort, "80"), - withPair(pathBackendHealthCheckInterval, "10s"), - withPair(pathBackendHealthCheckTimeout, "3s"))), - - expected: &types.HealthCheck{ - Interval: "10s", - Timeout: "3s", - Path: "/health", - Port: 80, - }, - }, - { - desc: "when only path defined", - rootPath: "traefik/backends/foo", - kvPairs: filler("traefik", - backend("foo", - withPair(pathBackendHealthCheckPath, "/health"))), - expected: &types.HealthCheck{ - Interval: "30s", - Timeout: "5s", - Path: "/health", - Port: 0, - }, - }, - { - desc: "should return nil when no path", - rootPath: "traefik/backends/foo", - kvPairs: filler("traefik", - backend("foo", - withPair(pathBackendHealthCheckPort, "80"), - withPair(pathBackendHealthCheckInterval, "30s"), - withPair(pathBackendHealthCheckTimeout, "5s"))), - expected: nil, - }, - } - - for _, test := range testCases { - test := test - t.Run(test.desc, func(t *testing.T) { - t.Parallel() - - p := newProviderMock(test.kvPairs) - - result := p.getHealthCheck(test.rootPath) - - assert.Equal(t, test.expected, result) - }) - } -} - -func TestProviderGetBufferingReal(t *testing.T) { - t.Skip("old tests") - testCases := []struct { - desc string - rootPath string - kvPairs []*store.KVPair - expected *types.Buffering - }{ - { - desc: "when all configuration keys defined", - rootPath: "traefik/backends/foo", - kvPairs: filler("traefik", - backend("foo", - withPair(pathBackendBufferingMaxResponseBodyBytes, "10485760"), - withPair(pathBackendBufferingMemResponseBodyBytes, "2097152"), - withPair(pathBackendBufferingMaxRequestBodyBytes, "10485760"), - withPair(pathBackendBufferingMemRequestBodyBytes, "2097152"), - withPair(pathBackendBufferingRetryExpression, "IsNetworkError() && Attempts() <= 2"))), - expected: &types.Buffering{ - MaxResponseBodyBytes: 10485760, - MemResponseBodyBytes: 2097152, - MaxRequestBodyBytes: 10485760, - MemRequestBodyBytes: 2097152, - RetryExpression: "IsNetworkError() && Attempts() <= 2", - }, - }, - } - - for _, test := range testCases { - test := test - t.Run(test.desc, func(t *testing.T) { - t.Parallel() - - p := newProviderMock(test.kvPairs) - - result := p.getBuffering(test.rootPath) - - assert.Equal(t, test.expected, result) - }) - } -} - -func TestProviderGetTLSes(t *testing.T) { - t.Skip("old tests") - testCases := []struct { - desc string - kvPairs []*store.KVPair - expected []*tls.Configuration - }{ - { - desc: "when several TLS configuration defined", - kvPairs: filler("traefik", - entry("tls/foo", - withPair("entrypoints", "http,https"), - withPair("certificate/certfile", "certfile1"), - withPair("certificate/keyfile", "keyfile1")), - entry("tls/bar", - withPair("entrypoints", "http,https"), - withPair("certificate/certfile", "certfile2"), - withPair("certificate/keyfile", "keyfile2"))), - expected: []*tls.Configuration{ - { - Stores: []string{"http", "https"}, - Certificate: &tls.Certificate{ - CertFile: "certfile2", - KeyFile: "keyfile2", - }, - }, - { - Stores: []string{"http", "https"}, - Certificate: &tls.Certificate{ - CertFile: "certfile1", - KeyFile: "keyfile1", - }, - }, - }, - }, - { - desc: "should return nil when no TLS configuration", - kvPairs: filler("traefik", entry("tls/foo")), - expected: nil, - }, - { - desc: "should return nil when no entry points", - kvPairs: filler("traefik", - entry("tls/foo", - withPair("certificate/certfile", "certfile2"), - withPair("certificate/keyfile", "keyfile2"))), - expected: nil, - }, - { - desc: "should return nil when no cert file and no key file", - kvPairs: filler("traefik", - entry("tls/foo", - withPair("entrypoints", "http,https"))), - expected: nil, - }, - } - prefix := "traefik" - - for _, test := range testCases { - test := test - t.Run(test.desc, func(t *testing.T) { - t.Parallel() - - p := newProviderMock(test.kvPairs) - - result := p.getTLSSection(prefix) - - assert.Equal(t, test.expected, result) - }) - } -} - -func TestProviderGetAuth(t *testing.T) { - t.Skip("old tests") - testCases := []struct { - desc string - rootPath string - kvPairs []*store.KVPair - expected *types.Auth - }{ - { - desc: "should return nil when no data", - expected: nil, - }, - { - desc: "should return a valid basic auth", - rootPath: "traefik/frontends/foo", - kvPairs: filler("traefik", - frontend("foo", - withPair(pathFrontendAuthBasicRemoveHeader, "true"), - withList(pathFrontendAuthBasicUsers, "test:$apr1$H6uskkkW$IgXLP6ewTrSuBkTrqE8wj/", "test2:$apr1$d9hr9HBB$4HxwgUir3HP4EsggP/QNo0"), - withPair(pathFrontendAuthBasicUsersFile, ".htpasswd"), - withPair(pathFrontendAuthHeaderField, "X-WebAuth-User"))), - expected: &types.Auth{ - HeaderField: "X-WebAuth-User", - Basic: &types.Basic{ - RemoveHeader: true, - Users: []string{"test:$apr1$H6uskkkW$IgXLP6ewTrSuBkTrqE8wj/", - "test2:$apr1$d9hr9HBB$4HxwgUir3HP4EsggP/QNo0"}, - UsersFile: ".htpasswd", - }, - }, - }, - { - desc: "should return a valid digest auth", - rootPath: "traefik/frontends/foo", - kvPairs: filler("traefik", - frontend("foo", - withList(pathFrontendAuthDigestUsers, "test:$apr1$H6uskkkW$IgXLP6ewTrSuBkTrqE8wj/", "test2:$apr1$d9hr9HBB$4HxwgUir3HP4EsggP/QNo0"), - withPair(pathFrontendAuthDigestUsersFile, ".htpasswd"), - withPair(pathFrontendAuthHeaderField, "X-WebAuth-User"), - )), - expected: &types.Auth{ - HeaderField: "X-WebAuth-User", - Digest: &types.Digest{ - Users: []string{"test:$apr1$H6uskkkW$IgXLP6ewTrSuBkTrqE8wj/", - "test2:$apr1$d9hr9HBB$4HxwgUir3HP4EsggP/QNo0"}, - UsersFile: ".htpasswd", - }, - }, - }, - { - desc: "should return a valid forward auth", - rootPath: "traefik/frontends/foo", - kvPairs: filler("traefik", - frontend("foo", - withPair(pathFrontendAuthForwardAddress, "auth.server"), - withPair(pathFrontendAuthForwardTrustForwardHeader, "true"), - withPair(pathFrontendAuthForwardTLSCa, "ca.crt"), - withPair(pathFrontendAuthForwardTLSCaOptional, "true"), - withPair(pathFrontendAuthForwardTLSCert, "server.crt"), - withPair(pathFrontendAuthForwardTLSKey, "server.key"), - withPair(pathFrontendAuthForwardTLSInsecureSkipVerify, "true"), - withPair(pathFrontendAuthHeaderField, "X-WebAuth-User"), - )), - expected: &types.Auth{ - HeaderField: "X-WebAuth-User", - Forward: &types.Forward{ - Address: "auth.server", - TrustForwardHeader: true, - TLS: &types.ClientTLS{ - CA: "ca.crt", - CAOptional: true, - InsecureSkipVerify: true, - Cert: "server.crt", - Key: "server.key", - }, - }, - }, - }, - } - - for _, test := range testCases { - test := test - t.Run(test.desc, func(t *testing.T) { - t.Parallel() - - p := newProviderMock(test.kvPairs) - - result := p.getAuth(test.rootPath) - - assert.Equal(t, test.expected, result) - }) - } -} - -func TestProviderGetRoutes(t *testing.T) { - t.Skip("old tests") - testCases := []struct { - desc string - rootPath string - kvPairs []*store.KVPair - expected map[string]types.Route - }{ - { - desc: "should return nil when no data", - expected: nil, - }, - { - desc: "should return nil when route key exists but without rule key", - rootPath: "traefik/frontends/foo", - kvPairs: filler("traefik", - frontend("foo", - withPair(pathFrontendRoutes+"bar", "test1"), - withPair(pathFrontendRoutes+"bir", "test2"))), - expected: nil, - }, - { - desc: "should return a map when configuration keys are defined", - rootPath: "traefik/frontends/foo", - kvPairs: filler("traefik", - frontend("foo", - withPair(pathFrontendRoutes+"bar"+pathFrontendRule, "test1"), - withPair(pathFrontendRoutes+"bir"+pathFrontendRule, "test2"))), - expected: map[string]types.Route{ - "bar": { - Rule: "test1", - }, - "bir": { - Rule: "test2", - }, - }, - }, - } - - for _, test := range testCases { - test := test - t.Run(test.desc, func(t *testing.T) { - t.Parallel() - - p := newProviderMock(test.kvPairs) - - result := p.getRoutes(test.rootPath) - - assert.Equal(t, test.expected, result) - }) - } -} - -func TestProviderGetServers(t *testing.T) { - t.Skip("old tests") - testCases := []struct { - desc string - rootPath string - kvPairs []*store.KVPair - expected map[string]types.Server - }{ - { - desc: "should return nil when no data", - expected: nil, - }, - { - desc: "should return nil when server has no URL", - rootPath: "traefik/backends/foo", - kvPairs: filler("traefik", - backend("foo", - withPair(pathBackendServers+"server1/weight", "7"), - withPair(pathBackendServers+"server2/weight", "6"))), - expected: nil, - }, - { - desc: "should use default weight when invalid weight value", - rootPath: "traefik/backends/foo", - kvPairs: filler("traefik", - backend("foo", - withPair(pathBackendServers+"server1/url", "http://172.17.0.2:80"), - withPair(pathBackendServers+"server1/weight", "kls"))), - expected: map[string]types.Server{ - "server1": { - URL: "http://172.17.0.2:80", - Weight: label.DefaultWeight, - }, - }, - }, - { - desc: "should return a map when configuration keys are defined", - rootPath: "traefik/backends/foo", - kvPairs: filler("traefik", - backend("foo", - withPair(pathBackendServers+"server1/url", "http://172.17.0.2:80"), - withPair(pathBackendServers+"server2/url", "http://172.17.0.3:80"), - withPair(pathBackendServers+"server2/weight", "6"))), - expected: map[string]types.Server{ - "server1": { - URL: "http://172.17.0.2:80", - Weight: label.DefaultWeight, - }, - "server2": { - URL: "http://172.17.0.3:80", - Weight: 6, - }, - }, - }, - } - - for _, test := range testCases { - test := test - t.Run(test.desc, func(t *testing.T) { - t.Parallel() - - p := newProviderMock(test.kvPairs) - - result := p.getServers(test.rootPath) - - assert.Equal(t, test.expected, result) - }) - } -} diff --git a/old/provider/kv/kv_mock_test.go b/old/provider/kv/kv_mock_test.go deleted file mode 100644 index a1916b7d0..000000000 --- a/old/provider/kv/kv_mock_test.go +++ /dev/null @@ -1,124 +0,0 @@ -package kv - -import ( - "errors" - "strings" - - "github.com/abronan/valkeyrie/store" -) - -func newProviderMock(kvPairs []*store.KVPair) *Provider { - return &Provider{ - Prefix: "traefik", - kvClient: &Mock{ - KVPairs: kvPairs, - }, - } -} - -// Override Get/List to return a error -type KvError struct { - Get error - List error -} - -// Extremely limited mock store so we can test initialization -type Mock struct { - Error KvError - KVPairs []*store.KVPair - WatchTreeMethod func() <-chan []*store.KVPair -} - -func newKvClientMock(kvPairs []*store.KVPair, err error) *Mock { - mock := &Mock{ - KVPairs: kvPairs, - } - - if err != nil { - mock.Error = KvError{ - Get: err, - List: err, - } - } - return mock -} - -func (s *Mock) Put(key string, value []byte, opts *store.WriteOptions) error { - return errors.New("put not supported") -} - -func (s *Mock) Get(key string, options *store.ReadOptions) (*store.KVPair, error) { - if err := s.Error.Get; err != nil { - return nil, err - } - for _, kvPair := range s.KVPairs { - if kvPair.Key == key { - return kvPair, nil - } - } - return nil, store.ErrKeyNotFound -} - -func (s *Mock) Delete(key string) error { - return errors.New("delete not supported") -} - -// Exists mock -func (s *Mock) Exists(key string, options *store.ReadOptions) (bool, error) { - if err := s.Error.Get; err != nil { - return false, err - } - for _, kvPair := range s.KVPairs { - if strings.HasPrefix(kvPair.Key, key) { - return true, nil - } - } - return false, store.ErrKeyNotFound -} - -// Watch mock -func (s *Mock) Watch(key string, stopCh <-chan struct{}, options *store.ReadOptions) (<-chan *store.KVPair, error) { - return nil, errors.New("watch not supported") -} - -// WatchTree mock -func (s *Mock) WatchTree(prefix string, stopCh <-chan struct{}, options *store.ReadOptions) (<-chan []*store.KVPair, error) { - return s.WatchTreeMethod(), nil -} - -// NewLock mock -func (s *Mock) NewLock(key string, options *store.LockOptions) (store.Locker, error) { - return nil, errors.New("NewLock not supported") -} - -// List mock -func (s *Mock) List(prefix string, options *store.ReadOptions) ([]*store.KVPair, error) { - if err := s.Error.List; err != nil { - return nil, err - } - var kv []*store.KVPair - for _, kvPair := range s.KVPairs { - if strings.HasPrefix(kvPair.Key, prefix) && !strings.ContainsAny(strings.TrimPrefix(kvPair.Key, prefix), pathSeparator) { - kv = append(kv, kvPair) - } - } - return kv, nil -} - -// DeleteTree mock -func (s *Mock) DeleteTree(prefix string) error { - return errors.New("DeleteTree not supported") -} - -// AtomicPut mock -func (s *Mock) AtomicPut(key string, value []byte, previous *store.KVPair, opts *store.WriteOptions) (bool, *store.KVPair, error) { - return false, nil, errors.New("AtomicPut not supported") -} - -// AtomicDelete mock -func (s *Mock) AtomicDelete(key string, previous *store.KVPair) (bool, error) { - return false, errors.New("AtomicDelete not supported") -} - -// Close mock -func (s *Mock) Close() {} diff --git a/old/provider/kv/kv_test.go b/old/provider/kv/kv_test.go deleted file mode 100644 index 1a9a982eb..000000000 --- a/old/provider/kv/kv_test.go +++ /dev/null @@ -1,54 +0,0 @@ -package kv - -import ( - "testing" - "time" - - "github.com/abronan/valkeyrie/store" - "github.com/containous/traefik/old/log" - "github.com/containous/traefik/old/types" -) - -func TestKvWatchTree(t *testing.T) { - t.Skip("Old") - returnedChans := make(chan chan []*store.KVPair) - provider := Provider{ - kvClient: &Mock{ - WatchTreeMethod: func() <-chan []*store.KVPair { - c := make(chan []*store.KVPair, 10) - returnedChans <- c - return c - }, - }, - } - - configChan := make(chan types.ConfigMessage) - go func() { - if err := provider.watchKv(configChan, "prefix", make(chan bool, 1)); err != nil { - log.Error(err) - } - }() - - select { - case c1 := <-returnedChans: - c1 <- []*store.KVPair{} - <-configChan - close(c1) // WatchTree chans can close due to error - case <-time.After(1 * time.Second): - t.Fatalf("Failed to create a new WatchTree chan") - } - - select { - case c2 := <-returnedChans: - c2 <- []*store.KVPair{} - <-configChan - case <-time.After(1 * time.Second): - t.Fatalf("Failed to create a new WatchTree chan") - } - - select { - case <-configChan: - t.Fatalf("configChan should be empty") - default: - } -} diff --git a/old/provider/label/label.go b/old/provider/label/label.go deleted file mode 100644 index 1a69d76ba..000000000 --- a/old/provider/label/label.go +++ /dev/null @@ -1,212 +0,0 @@ -package label - -import ( - "fmt" - "net/http" - "regexp" - "strconv" - "strings" - - "github.com/containous/traefik/old/log" -) - -const ( - mapEntrySeparator = "||" - mapValueSeparator = ":" -) - -// Default values -const ( - DefaultWeight = 1 - DefaultProtocol = "http" - DefaultPassHostHeader = true - DefaultPassTLSCert = false - DefaultFrontendPriority = 0 - DefaultCircuitBreakerExpression = "NetworkErrorRatio() > 1" - DefaultBackendLoadBalancerMethod = "wrr" - DefaultBackendMaxconnExtractorFunc = "request.host" - DefaultBackendLoadbalancerStickinessCookieName = "" - DefaultBackendHealthCheckPort = 0 -) - -var ( - // RegexpFrontendErrorPage used to extract error pages from label - RegexpFrontendErrorPage = regexp.MustCompile(`^traefik\.frontend\.errors\.(?P[^ .]+)\.(?P[^ .]+)$`) - - // RegexpFrontendRateLimit used to extract rate limits from label - RegexpFrontendRateLimit = regexp.MustCompile(`^traefik\.frontend\.rateLimit\.rateSet\.(?P[^ .]+)\.(?P[^ .]+)$`) -) - -// GetStringValue get string value associated to a label -func GetStringValue(labels map[string]string, labelName string, defaultValue string) string { - if value, ok := labels[labelName]; ok && len(value) > 0 { - return value - } - return defaultValue -} - -// GetBoolValue get bool value associated to a label -func GetBoolValue(labels map[string]string, labelName string, defaultValue bool) bool { - rawValue, ok := labels[labelName] - if ok { - v, err := strconv.ParseBool(rawValue) - if err == nil { - return v - } - log.Errorf("Unable to parse %q: %q, falling back to %v. %v", labelName, rawValue, defaultValue, err) - } - return defaultValue -} - -// GetIntValue get int value associated to a label -func GetIntValue(labels map[string]string, labelName string, defaultValue int) int { - if rawValue, ok := labels[labelName]; ok { - value, err := strconv.Atoi(rawValue) - if err == nil { - return value - } - log.Errorf("Unable to parse %q: %q, falling back to %v. %v", labelName, rawValue, defaultValue, err) - } - return defaultValue -} - -// GetInt64Value get int64 value associated to a label -func GetInt64Value(labels map[string]string, labelName string, defaultValue int64) int64 { - if rawValue, ok := labels[labelName]; ok { - value, err := strconv.ParseInt(rawValue, 10, 64) - if err == nil { - return value - } - log.Errorf("Unable to parse %q: %q, falling back to %v. %v", labelName, rawValue, defaultValue, err) - } - return defaultValue -} - -// GetSliceStringValue get a slice of string associated to a label -func GetSliceStringValue(labels map[string]string, labelName string) []string { - var value []string - - if values, ok := labels[labelName]; ok { - value = SplitAndTrimString(values, ",") - - if len(value) == 0 { - log.Debugf("Could not load %q.", labelName) - } - } - return value -} - -// ParseMapValue get Map value for a label value -func ParseMapValue(labelName, values string) map[string]string { - mapValue := make(map[string]string) - - for _, parts := range strings.Split(values, mapEntrySeparator) { - pair := strings.SplitN(parts, mapValueSeparator, 2) - if len(pair) != 2 { - log.Warnf("Could not load %q: %q, skipping...", labelName, parts) - } else { - mapValue[http.CanonicalHeaderKey(strings.TrimSpace(pair[0]))] = strings.TrimSpace(pair[1]) - } - } - - if len(mapValue) == 0 { - log.Errorf("Could not load %q, skipping...", labelName) - return nil - } - return mapValue -} - -// GetMapValue get Map value associated to a label -func GetMapValue(labels map[string]string, labelName string) map[string]string { - if values, ok := labels[labelName]; ok { - - if len(values) == 0 { - log.Errorf("Missing value for %q, skipping...", labelName) - return nil - } - - return ParseMapValue(labelName, values) - } - - return nil -} - -// GetStringMultipleStrict get multiple string values associated to several labels -// Fail if one label is missing -func GetStringMultipleStrict(labels map[string]string, labelNames ...string) (map[string]string, error) { - foundLabels := map[string]string{} - for _, name := range labelNames { - value := GetStringValue(labels, name, "") - // Error out only if one of them is not defined. - if len(value) == 0 { - return nil, fmt.Errorf("label not found: %s", name) - } - foundLabels[name] = value - } - return foundLabels, nil -} - -// Has Check if a value is associated to a label -func Has(labels map[string]string, labelName string) bool { - value, ok := labels[labelName] - return ok && len(value) > 0 -} - -// HasPrefix Check if a value is associated to a less one label with a prefix -func HasPrefix(labels map[string]string, prefix string) bool { - for name, value := range labels { - if strings.HasPrefix(name, prefix) && len(value) > 0 { - return true - } - } - return false -} - -// IsEnabled Check if a container is enabled in Traefik -func IsEnabled(labels map[string]string, exposedByDefault bool) bool { - return GetBoolValue(labels, TraefikEnable, exposedByDefault) -} - -// SplitAndTrimString splits separatedString at the separator character and trims each -// piece, filtering out empty pieces. Returns the list of pieces or nil if the input -// did not contain a non-empty piece. -func SplitAndTrimString(base string, sep string) []string { - var trimmedStrings []string - - for _, s := range strings.Split(base, sep) { - s = strings.TrimSpace(s) - if len(s) > 0 { - trimmedStrings = append(trimmedStrings, s) - } - } - - return trimmedStrings -} - -// GetFuncString a func related to GetStringValue -func GetFuncString(labelName string, defaultValue string) func(map[string]string) string { - return func(labels map[string]string) string { - return GetStringValue(labels, labelName, defaultValue) - } -} - -// GetFuncInt a func related to GetIntValue -func GetFuncInt(labelName string, defaultValue int) func(map[string]string) int { - return func(labels map[string]string) int { - return GetIntValue(labels, labelName, defaultValue) - } -} - -// GetFuncBool a func related to GetBoolValue -func GetFuncBool(labelName string, defaultValue bool) func(map[string]string) bool { - return func(labels map[string]string) bool { - return GetBoolValue(labels, labelName, defaultValue) - } -} - -// GetFuncSliceString a func related to GetSliceStringValue -func GetFuncSliceString(labelName string) func(map[string]string) []string { - return func(labels map[string]string) []string { - return GetSliceStringValue(labels, labelName) - } -} diff --git a/old/provider/label/label_test.go b/old/provider/label/label_test.go deleted file mode 100644 index 13a4476d4..000000000 --- a/old/provider/label/label_test.go +++ /dev/null @@ -1,692 +0,0 @@ -package label - -import ( - "strconv" - "testing" - - "github.com/stretchr/testify/assert" -) - -func TestSplitAndTrimString(t *testing.T) { - testCases := []struct { - desc string - input string - expected []string - }{ - { - desc: "empty string", - input: "", - expected: nil, - }, { - desc: "one piece", - input: "foo", - expected: []string{"foo"}, - }, { - desc: "two pieces", - input: "foo,bar", - expected: []string{"foo", "bar"}, - }, { - desc: "three pieces", - input: "foo,bar,zoo", - expected: []string{"foo", "bar", "zoo"}, - }, { - desc: "two pieces with whitespace", - input: " foo , bar ", - expected: []string{"foo", "bar"}, - }, { - desc: "consecutive commas", - input: " foo ,, bar ", - expected: []string{"foo", "bar"}, - }, { - desc: "consecutive commas with whitespace", - input: " foo , , bar ", - expected: []string{"foo", "bar"}, - }, { - desc: "leading and trailing commas", - input: ",, foo , , bar,, , ", - expected: []string{"foo", "bar"}, - }, { - desc: "no valid pieces", - input: ", , , ,, ,", - expected: nil, - }, - } - - for _, test := range testCases { - test := test - t.Run(test.desc, func(t *testing.T) { - t.Parallel() - actual := SplitAndTrimString(test.input, ",") - assert.Equal(t, test.expected, actual) - }) - } -} - -func TestGetStringValue(t *testing.T) { - testCases := []struct { - desc string - labels map[string]string - labelName string - defaultValue string - expected string - }{ - { - desc: "empty labels map", - labelName: "foo", - defaultValue: "default", - expected: "default", - }, - { - desc: "existing label", - labels: map[string]string{ - "foo": "bar", - }, - labelName: "foo", - defaultValue: "default", - expected: "bar", - }, - { - desc: "non existing label", - labels: map[string]string{ - "foo": "bar", - }, - labelName: "fii", - defaultValue: "default", - expected: "default", - }, - } - for _, test := range testCases { - test := test - t.Run(test.desc, func(t *testing.T) { - t.Parallel() - - got := GetStringValue(test.labels, test.labelName, test.defaultValue) - assert.Equal(t, test.expected, got) - }) - } -} - -func TestGetBoolValue(t *testing.T) { - testCases := []struct { - desc string - labels map[string]string - labelName string - defaultValue bool - expected bool - }{ - { - desc: "empty map", - labelName: "foo", - }, - { - desc: "invalid boolean value", - labels: map[string]string{ - "foo": "bar", - }, - labelName: "foo", - defaultValue: true, - expected: true, - }, - { - desc: "valid boolean value: true", - labels: map[string]string{ - "foo": "true", - }, - labelName: "foo", - defaultValue: false, - expected: true, - }, - { - desc: "valid boolean value: false", - labels: map[string]string{ - "foo": "false", - }, - labelName: "foo", - defaultValue: true, - expected: false, - }, - } - for _, test := range testCases { - test := test - t.Run(test.desc, func(t *testing.T) { - t.Parallel() - - got := GetBoolValue(test.labels, test.labelName, test.defaultValue) - assert.Equal(t, test.expected, got) - }) - } -} - -func TestGetIntValue(t *testing.T) { - testCases := []struct { - desc string - labels map[string]string - labelName string - defaultValue int - expected int - }{ - { - desc: "empty map", - labelName: "foo", - }, - { - desc: "invalid int value", - labelName: "foo", - labels: map[string]string{ - "foo": "bar", - }, - defaultValue: 666, - expected: 666, - }, - { - desc: "negative int value", - labelName: "foo", - labels: map[string]string{ - "foo": "-1", - }, - defaultValue: 666, - expected: -1, - }, - { - desc: "positive int value", - labelName: "foo", - labels: map[string]string{ - "foo": "1", - }, - defaultValue: 666, - expected: 1, - }, - } - for _, test := range testCases { - test := test - t.Run(test.desc, func(t *testing.T) { - t.Parallel() - - got := GetIntValue(test.labels, test.labelName, test.defaultValue) - assert.Equal(t, test.expected, got) - }) - } -} - -func TestGetInt64Value(t *testing.T) { - testCases := []struct { - desc string - labels map[string]string - labelName string - defaultValue int64 - expected int64 - }{ - { - desc: "empty map", - labelName: "foo", - }, - { - desc: "invalid int value", - labelName: "foo", - labels: map[string]string{ - "foo": "bar", - }, - defaultValue: 666, - expected: 666, - }, - { - desc: "negative int value", - labelName: "foo", - labels: map[string]string{ - "foo": "-1", - }, - defaultValue: 666, - expected: -1, - }, - { - desc: "positive int value", - labelName: "foo", - labels: map[string]string{ - "foo": "1", - }, - defaultValue: 666, - expected: 1, - }, - } - for _, test := range testCases { - test := test - t.Run(test.desc, func(t *testing.T) { - t.Parallel() - - got := GetInt64Value(test.labels, test.labelName, test.defaultValue) - assert.Equal(t, test.expected, got) - }) - } -} - -func TestGetSliceStringValue(t *testing.T) { - testCases := []struct { - desc string - labels map[string]string - labelName string - expected []string - }{ - { - desc: "empty map", - labelName: "foo", - }, - { - desc: "empty value", - labels: map[string]string{ - "foo": "", - }, - labelName: "foo", - expected: nil, - }, - { - desc: "one value, not split", - labels: map[string]string{ - "foo": "bar", - }, - labelName: "foo", - expected: []string{"bar"}, - }, - { - desc: "several values", - labels: map[string]string{ - "foo": "bar,bir ,bur", - }, - labelName: "foo", - expected: []string{"bar", "bir", "bur"}, - }, - } - for _, test := range testCases { - test := test - t.Run(test.desc, func(t *testing.T) { - t.Parallel() - - got := GetSliceStringValue(test.labels, test.labelName) - assert.EqualValues(t, test.expected, got) - }) - } -} - -func TestGetMapValue(t *testing.T) { - testCases := []struct { - desc string - labels map[string]string - labelName string - expected map[string]string - }{ - { - desc: "empty map", - labelName: "foo", - }, - { - desc: "existent label with empty entry", - labelName: "foo", - labels: map[string]string{ - "foo": "", - }, - expected: nil, - }, - { - desc: "existent label with invalid entry", - labelName: "foo", - labels: map[string]string{ - "foo": "bar", - }, - expected: nil, - }, - { - desc: "existent label with empty value", - labelName: "foo", - labels: map[string]string{ - "foo": "bar:", - }, - expected: map[string]string{ - "Bar": "", - }, - }, - { - desc: "one entry", - labelName: "foo", - labels: map[string]string{ - "foo": " Access-Control-Allow-Methods:POST,GET,OPTIONS ", - }, - expected: map[string]string{ - "Access-Control-Allow-Methods": "POST,GET,OPTIONS", - }, - }, - { - desc: "several entry", - labelName: "foo", - labels: map[string]string{ - "foo": "Access-Control-Allow-Methods:POST,GET,OPTIONS || Content-type: application/json; charset=utf-8", - }, - expected: map[string]string{ - "Access-Control-Allow-Methods": "POST,GET,OPTIONS", - "Content-Type": "application/json; charset=utf-8", - }, - }, - } - for _, test := range testCases { - test := test - t.Run(test.desc, func(t *testing.T) { - t.Parallel() - - got := GetMapValue(test.labels, test.labelName) - assert.EqualValues(t, test.expected, got) - }) - } -} - -func TestGetStringMultipleStrict(t *testing.T) { - testCases := []struct { - desc string - labels map[string]string - labelNames []string - expected map[string]string - expectedErr bool - }{ - { - desc: "empty labels names and empty labels map", - labels: map[string]string{}, - expected: map[string]string{}, - }, - { - desc: "empty labels names", - labels: map[string]string{ - "foo": "bar", - "fii": "bir", - }, - expected: map[string]string{}, - }, - { - desc: "one label missing", - labels: map[string]string{ - "foo": "bar", - "fii": "bir", - "fyy": "byr", - }, - labelNames: []string{"foo", "fii", "fuu"}, - expected: nil, - expectedErr: true, - }, - { - desc: "all labels are present", - labels: map[string]string{ - "foo": "bar", - "fii": "bir", - "fyy": "byr", - }, - labelNames: []string{"foo", "fii"}, - expected: map[string]string{ - "foo": "bar", - "fii": "bir", - }, - }, - } - for _, test := range testCases { - test := test - t.Run(test.desc, func(t *testing.T) { - t.Parallel() - - got, err := GetStringMultipleStrict(test.labels, test.labelNames...) - if (err != nil) != test.expectedErr { - t.Errorf("error = %v, wantErr %v", err, test.expectedErr) - return - } - assert.EqualValues(t, test.expected, got) - }) - } -} - -func TestHas(t *testing.T) { - testCases := []struct { - desc string - labels map[string]string - labelName string - expected bool - }{ - { - desc: "nil labels map", - labelName: "foo", - }, - { - desc: "nonexistent label", - labels: map[string]string{ - "foo": "bar", - }, - labelName: "fii", - expected: false, - }, - { - desc: "existent label", - labels: map[string]string{ - "foo": "bar", - }, - labelName: "foo", - expected: true, - }, - { - desc: "existent label with empty value", - labels: map[string]string{ - "foo": "", - }, - labelName: "foo", - expected: false, - }, - } - for _, test := range testCases { - test := test - t.Run(test.desc, func(t *testing.T) { - t.Parallel() - - got := Has(test.labels, test.labelName) - assert.Equal(t, test.expected, got) - }) - } -} - -func TestIsEnabled(t *testing.T) { - testCases := []struct { - desc string - labels map[string]string - exposedByDefault bool - expected bool - }{ - { - desc: "empty labels map & exposedByDefault true", - exposedByDefault: true, - expected: true, - }, - { - desc: "empty labels map & exposedByDefault false", - exposedByDefault: false, - expected: false, - }, - { - desc: "exposedByDefault false and label enable true", - labels: map[string]string{ - TraefikEnable: "true", - }, - exposedByDefault: false, - expected: true, - }, - { - desc: "exposedByDefault false and label enable false", - labels: map[string]string{ - TraefikEnable: "false", - }, - exposedByDefault: false, - expected: false, - }, - { - desc: "exposedByDefault true and label enable false", - labels: map[string]string{ - TraefikEnable: "false", - }, - exposedByDefault: true, - expected: false, - }, - { - desc: "exposedByDefault true and label enable true", - labels: map[string]string{ - TraefikEnable: "true", - }, - exposedByDefault: true, - expected: true, - }, - } - for _, test := range testCases { - test := test - t.Run(test.desc, func(t *testing.T) { - t.Parallel() - - got := IsEnabled(test.labels, test.exposedByDefault) - assert.Equal(t, test.expected, got) - }) - } -} - -func TestHasPrefix(t *testing.T) { - testCases := []struct { - desc string - labels map[string]string - prefix string - expected bool - }{ - { - desc: "nil labels map", - prefix: "foo", - expected: false, - }, - { - desc: "nonexistent prefix", - labels: map[string]string{ - "foo.carotte": "bar", - }, - prefix: "fii", - expected: false, - }, - { - desc: "existent prefix", - labels: map[string]string{ - "foo.carotte": "bar", - }, - prefix: "foo", - expected: true, - }, - { - desc: "existent prefix with empty value", - labels: map[string]string{ - "foo.carotte": "", - }, - prefix: "foo", - expected: false, - }, - } - for _, test := range testCases { - test := test - t.Run(test.desc, func(t *testing.T) { - t.Parallel() - - got := HasPrefix(test.labels, test.prefix) - assert.Equal(t, test.expected, got) - }) - } -} - -func TestGetFuncString(t *testing.T) { - testCases := []struct { - labels map[string]string - labelName string - defaultValue string - expected string - }{ - { - labels: nil, - labelName: TraefikProtocol, - defaultValue: DefaultProtocol, - expected: "http", - }, - { - labels: map[string]string{ - TraefikProtocol: "https", - }, - labelName: TraefikProtocol, - defaultValue: DefaultProtocol, - expected: "https", - }, - } - - for containerID, test := range testCases { - test := test - t.Run(test.labelName+strconv.Itoa(containerID), func(t *testing.T) { - t.Parallel() - - actual := GetFuncString(test.labelName, test.defaultValue)(test.labels) - assert.Equal(t, test.expected, actual) - }) - } -} - -func TestGetSliceString(t *testing.T) { - testCases := []struct { - desc string - labels map[string]string - labelName string - expected []string - }{ - { - desc: "no whitelist-label", - labels: nil, - expected: nil, - }, - { - desc: "whitelist-label with empty string", - labels: map[string]string{ - TraefikFrontendWhiteListSourceRange: "", - }, - labelName: TraefikFrontendWhiteListSourceRange, - expected: nil, - }, - { - desc: "whitelist-label with IPv4 mask", - labels: map[string]string{ - TraefikFrontendWhiteListSourceRange: "1.2.3.4/16", - }, - labelName: TraefikFrontendWhiteListSourceRange, - expected: []string{ - "1.2.3.4/16", - }, - }, - { - desc: "whitelist-label with IPv6 mask", - labels: map[string]string{ - TraefikFrontendWhiteListSourceRange: "fe80::/16", - }, - labelName: TraefikFrontendWhiteListSourceRange, - expected: []string{ - "fe80::/16", - }, - }, - { - desc: "whitelist-label with multiple masks", - labels: map[string]string{ - TraefikFrontendWhiteListSourceRange: "1.1.1.1/24, 1234:abcd::42/32", - }, - labelName: TraefikFrontendWhiteListSourceRange, - expected: []string{ - "1.1.1.1/24", - "1234:abcd::42/32", - }, - }, - } - - for _, test := range testCases { - test := test - t.Run(test.desc, func(t *testing.T) { - t.Parallel() - - actual := GetFuncSliceString(test.labelName)(test.labels) - assert.EqualValues(t, test.expected, actual) - }) - } -} diff --git a/old/provider/label/names.go b/old/provider/label/names.go deleted file mode 100644 index d77c69169..000000000 --- a/old/provider/label/names.go +++ /dev/null @@ -1,240 +0,0 @@ -package label - -// Traefik labels -const ( - Prefix = "traefik." - SuffixBackend = "backend" - SuffixDomain = "domain" - SuffixEnable = "enable" - SuffixPort = "port" - SuffixPortName = "portName" - SuffixPortIndex = "portIndex" - SuffixProtocol = "protocol" - SuffixTags = "tags" - SuffixWeight = "weight" - SuffixBackendID = "backend.id" - SuffixBackendCircuitBreaker = "backend.circuitbreaker" - SuffixBackendCircuitBreakerExpression = "backend.circuitbreaker.expression" - SuffixBackendHealthCheckScheme = "backend.healthcheck.scheme" - SuffixBackendHealthCheckPath = "backend.healthcheck.path" - SuffixBackendHealthCheckPort = "backend.healthcheck.port" - SuffixBackendHealthCheckInterval = "backend.healthcheck.interval" - SuffixBackendHealthCheckTimeout = "backend.healthcheck.timeout" - SuffixBackendHealthCheckHostname = "backend.healthcheck.hostname" - SuffixBackendHealthCheckHeaders = "backend.healthcheck.headers" - SuffixBackendLoadBalancer = "backend.loadbalancer" - SuffixBackendLoadBalancerMethod = SuffixBackendLoadBalancer + ".method" - SuffixBackendLoadBalancerStickiness = SuffixBackendLoadBalancer + ".stickiness" - SuffixBackendLoadBalancerStickinessCookieName = SuffixBackendLoadBalancer + ".stickiness.cookieName" - SuffixBackendMaxConnAmount = "backend.maxconn.amount" - SuffixBackendMaxConnExtractorFunc = "backend.maxconn.extractorfunc" - SuffixBackendBuffering = "backend.buffering" - SuffixBackendResponseForwardingFlushInterval = "backend.responseForwarding.flushInterval" - SuffixBackendBufferingMaxRequestBodyBytes = SuffixBackendBuffering + ".maxRequestBodyBytes" - SuffixBackendBufferingMemRequestBodyBytes = SuffixBackendBuffering + ".memRequestBodyBytes" - SuffixBackendBufferingMaxResponseBodyBytes = SuffixBackendBuffering + ".maxResponseBodyBytes" - SuffixBackendBufferingMemResponseBodyBytes = SuffixBackendBuffering + ".memResponseBodyBytes" - SuffixBackendBufferingRetryExpression = SuffixBackendBuffering + ".retryExpression" - SuffixFrontend = "frontend" - SuffixFrontendAuth = SuffixFrontend + ".auth" - SuffixFrontendAuthBasic = SuffixFrontendAuth + ".basic" - SuffixFrontendAuthBasicRealm = SuffixFrontendAuthBasic + ".realm" - SuffixFrontendAuthBasicRemoveHeader = SuffixFrontendAuthBasic + ".removeHeader" - SuffixFrontendAuthBasicUsers = SuffixFrontendAuthBasic + ".users" - SuffixFrontendAuthBasicUsersFile = SuffixFrontendAuthBasic + ".usersFile" - SuffixFrontendAuthDigest = SuffixFrontendAuth + ".digest" - SuffixFrontendAuthDigestRemoveHeader = SuffixFrontendAuthDigest + ".removeHeader" - SuffixFrontendAuthDigestUsers = SuffixFrontendAuthDigest + ".users" - SuffixFrontendAuthDigestUsersFile = SuffixFrontendAuthDigest + ".usersFile" - SuffixFrontendAuthForward = SuffixFrontendAuth + ".forward" - SuffixFrontendAuthForwardAddress = SuffixFrontendAuthForward + ".address" - SuffixFrontendAuthForwardAuthResponseHeaders = SuffixFrontendAuthForward + ".authResponseHeaders" - SuffixFrontendAuthForwardTLS = SuffixFrontendAuthForward + ".tls" - SuffixFrontendAuthForwardTLSCa = SuffixFrontendAuthForwardTLS + ".ca" - SuffixFrontendAuthForwardTLSCaOptional = SuffixFrontendAuthForwardTLS + ".caOptional" - SuffixFrontendAuthForwardTLSCert = SuffixFrontendAuthForwardTLS + ".cert" - SuffixFrontendAuthForwardTLSInsecureSkipVerify = SuffixFrontendAuthForwardTLS + ".insecureSkipVerify" - SuffixFrontendAuthForwardTLSKey = SuffixFrontendAuthForwardTLS + ".key" - SuffixFrontendAuthForwardTrustForwardHeader = SuffixFrontendAuthForward + ".trustForwardHeader" - SuffixFrontendAuthHeaderField = SuffixFrontendAuth + ".headerField" - SuffixFrontendEntryPoints = "frontend.entryPoints" - SuffixFrontendHeaders = "frontend.headers." - SuffixFrontendRequestHeaders = SuffixFrontendHeaders + "customRequestHeaders" - SuffixFrontendResponseHeaders = SuffixFrontendHeaders + "customResponseHeaders" - SuffixFrontendHeadersAllowedHosts = SuffixFrontendHeaders + "allowedHosts" - SuffixFrontendHeadersHostsProxyHeaders = SuffixFrontendHeaders + "hostsProxyHeaders" - SuffixFrontendHeadersSSLForceHost = SuffixFrontendHeaders + "SSLForceHost" - SuffixFrontendHeadersSSLRedirect = SuffixFrontendHeaders + "SSLRedirect" - SuffixFrontendHeadersSSLTemporaryRedirect = SuffixFrontendHeaders + "SSLTemporaryRedirect" - SuffixFrontendHeadersSSLHost = SuffixFrontendHeaders + "SSLHost" - SuffixFrontendHeadersSSLProxyHeaders = SuffixFrontendHeaders + "SSLProxyHeaders" - SuffixFrontendHeadersSTSSeconds = SuffixFrontendHeaders + "STSSeconds" - SuffixFrontendHeadersSTSIncludeSubdomains = SuffixFrontendHeaders + "STSIncludeSubdomains" - SuffixFrontendHeadersSTSPreload = SuffixFrontendHeaders + "STSPreload" - SuffixFrontendHeadersForceSTSHeader = SuffixFrontendHeaders + "forceSTSHeader" - SuffixFrontendHeadersFrameDeny = SuffixFrontendHeaders + "frameDeny" - SuffixFrontendHeadersCustomFrameOptionsValue = SuffixFrontendHeaders + "customFrameOptionsValue" - SuffixFrontendHeadersContentTypeNosniff = SuffixFrontendHeaders + "contentTypeNosniff" - SuffixFrontendHeadersBrowserXSSFilter = SuffixFrontendHeaders + "browserXSSFilter" - SuffixFrontendHeadersCustomBrowserXSSValue = SuffixFrontendHeaders + "customBrowserXSSValue" - SuffixFrontendHeadersContentSecurityPolicy = SuffixFrontendHeaders + "contentSecurityPolicy" - SuffixFrontendHeadersPublicKey = SuffixFrontendHeaders + "publicKey" - SuffixFrontendHeadersReferrerPolicy = SuffixFrontendHeaders + "referrerPolicy" - SuffixFrontendHeadersIsDevelopment = SuffixFrontendHeaders + "isDevelopment" - SuffixFrontendPassHostHeader = "frontend.passHostHeader" - SuffixFrontendPassTLSClientCert = "frontend.passTLSClientCert" - SuffixFrontendPassTLSClientCertPem = SuffixFrontendPassTLSClientCert + ".pem" - SuffixFrontendPassTLSClientCertInfos = SuffixFrontendPassTLSClientCert + ".infos" - SuffixFrontendPassTLSClientCertInfosIssuer = SuffixFrontendPassTLSClientCertInfos + ".issuer" - SuffixFrontendPassTLSClientCertInfosIssuerCommonName = SuffixFrontendPassTLSClientCertInfosIssuer + ".commonName" - SuffixFrontendPassTLSClientCertInfosIssuerCountry = SuffixFrontendPassTLSClientCertInfosIssuer + ".country" - SuffixFrontendPassTLSClientCertInfosIssuerDomainComponent = SuffixFrontendPassTLSClientCertInfosIssuer + ".domainComponent" - SuffixFrontendPassTLSClientCertInfosIssuerLocality = SuffixFrontendPassTLSClientCertInfosIssuer + ".locality" - SuffixFrontendPassTLSClientCertInfosIssuerOrganization = SuffixFrontendPassTLSClientCertInfosIssuer + ".organization" - SuffixFrontendPassTLSClientCertInfosIssuerProvince = SuffixFrontendPassTLSClientCertInfosIssuer + ".province" - SuffixFrontendPassTLSClientCertInfosIssuerSerialNumber = SuffixFrontendPassTLSClientCertInfosIssuer + ".serialNumber" - SuffixFrontendPassTLSClientCertInfosSubject = SuffixFrontendPassTLSClientCertInfos + ".subject" - SuffixFrontendPassTLSClientCertInfosNotAfter = SuffixFrontendPassTLSClientCertInfos + ".notAfter" - SuffixFrontendPassTLSClientCertInfosNotBefore = SuffixFrontendPassTLSClientCertInfos + ".notBefore" - SuffixFrontendPassTLSClientCertInfosSans = SuffixFrontendPassTLSClientCertInfos + ".sans" - SuffixFrontendPassTLSClientCertInfosSubjectCommonName = SuffixFrontendPassTLSClientCertInfosSubject + ".commonName" - SuffixFrontendPassTLSClientCertInfosSubjectCountry = SuffixFrontendPassTLSClientCertInfosSubject + ".country" - SuffixFrontendPassTLSClientCertInfosSubjectDomainComponent = SuffixFrontendPassTLSClientCertInfosSubject + ".domainComponent" - SuffixFrontendPassTLSClientCertInfosSubjectLocality = SuffixFrontendPassTLSClientCertInfosSubject + ".locality" - SuffixFrontendPassTLSClientCertInfosSubjectOrganization = SuffixFrontendPassTLSClientCertInfosSubject + ".organization" - SuffixFrontendPassTLSClientCertInfosSubjectProvince = SuffixFrontendPassTLSClientCertInfosSubject + ".province" - SuffixFrontendPassTLSClientCertInfosSubjectSerialNumber = SuffixFrontendPassTLSClientCertInfosSubject + ".serialNumber" - SuffixFrontendPassTLSCert = "frontend.passTLSCert" // Deprecated - SuffixFrontendPriority = "frontend.priority" - SuffixFrontendRateLimitExtractorFunc = "frontend.rateLimit.extractorFunc" - SuffixFrontendRedirectEntryPoint = "frontend.redirect.entryPoint" - SuffixFrontendRedirectRegex = "frontend.redirect.regex" - SuffixFrontendRedirectReplacement = "frontend.redirect.replacement" - SuffixFrontendRedirectPermanent = "frontend.redirect.permanent" - SuffixFrontendRule = "frontend.rule" - SuffixFrontendWhiteList = "frontend.whiteList." - SuffixFrontendWhiteListSourceRange = SuffixFrontendWhiteList + "sourceRange" - SuffixFrontendWhiteListIPStrategy = SuffixFrontendWhiteList + "ipStrategy" - SuffixFrontendWhiteListIPStrategyDepth = SuffixFrontendWhiteListIPStrategy + ".depth" - SuffixFrontendWhiteListIPStrategyExcludedIPS = SuffixFrontendWhiteListIPStrategy + ".excludedIPs" - TraefikDomain = Prefix + SuffixDomain - TraefikEnable = Prefix + SuffixEnable - TraefikPort = Prefix + SuffixPort - TraefikPortName = Prefix + SuffixPortName - TraefikPortIndex = Prefix + SuffixPortIndex - TraefikProtocol = Prefix + SuffixProtocol - TraefikTags = Prefix + SuffixTags - TraefikWeight = Prefix + SuffixWeight - TraefikBackend = Prefix + SuffixBackend - TraefikBackendID = Prefix + SuffixBackendID - TraefikBackendCircuitBreaker = Prefix + SuffixBackendCircuitBreaker - TraefikBackendCircuitBreakerExpression = Prefix + SuffixBackendCircuitBreakerExpression - TraefikBackendHealthCheckScheme = Prefix + SuffixBackendHealthCheckScheme - TraefikBackendHealthCheckPath = Prefix + SuffixBackendHealthCheckPath - TraefikBackendHealthCheckPort = Prefix + SuffixBackendHealthCheckPort - TraefikBackendHealthCheckInterval = Prefix + SuffixBackendHealthCheckInterval - TraefikBackendHealthCheckTimeout = Prefix + SuffixBackendHealthCheckTimeout - TraefikBackendHealthCheckHostname = Prefix + SuffixBackendHealthCheckHostname - TraefikBackendHealthCheckHeaders = Prefix + SuffixBackendHealthCheckHeaders - TraefikBackendLoadBalancer = Prefix + SuffixBackendLoadBalancer - TraefikBackendLoadBalancerMethod = Prefix + SuffixBackendLoadBalancerMethod - TraefikBackendLoadBalancerStickiness = Prefix + SuffixBackendLoadBalancerStickiness - TraefikBackendLoadBalancerStickinessCookieName = Prefix + SuffixBackendLoadBalancerStickinessCookieName - TraefikBackendMaxConnAmount = Prefix + SuffixBackendMaxConnAmount - TraefikBackendMaxConnExtractorFunc = Prefix + SuffixBackendMaxConnExtractorFunc - TraefikBackendBuffering = Prefix + SuffixBackendBuffering - TraefikBackendResponseForwardingFlushInterval = Prefix + SuffixBackendResponseForwardingFlushInterval - TraefikBackendBufferingMaxRequestBodyBytes = Prefix + SuffixBackendBufferingMaxRequestBodyBytes - TraefikBackendBufferingMemRequestBodyBytes = Prefix + SuffixBackendBufferingMemRequestBodyBytes - TraefikBackendBufferingMaxResponseBodyBytes = Prefix + SuffixBackendBufferingMaxResponseBodyBytes - TraefikBackendBufferingMemResponseBodyBytes = Prefix + SuffixBackendBufferingMemResponseBodyBytes - TraefikBackendBufferingRetryExpression = Prefix + SuffixBackendBufferingRetryExpression - TraefikFrontend = Prefix + SuffixFrontend - TraefikFrontendAuth = Prefix + SuffixFrontendAuth - TraefikFrontendAuthBasic = Prefix + SuffixFrontendAuthBasic - TraefikFrontendAuthBasicRealm = Prefix + SuffixFrontendAuthBasicRealm - TraefikFrontendAuthBasicRemoveHeader = Prefix + SuffixFrontendAuthBasicRemoveHeader - TraefikFrontendAuthBasicUsers = Prefix + SuffixFrontendAuthBasicUsers - TraefikFrontendAuthBasicUsersFile = Prefix + SuffixFrontendAuthBasicUsersFile - TraefikFrontendAuthDigest = Prefix + SuffixFrontendAuthDigest - TraefikFrontendAuthDigestRemoveHeader = Prefix + SuffixFrontendAuthDigestRemoveHeader - TraefikFrontendAuthDigestUsers = Prefix + SuffixFrontendAuthDigestUsers - TraefikFrontendAuthDigestUsersFile = Prefix + SuffixFrontendAuthDigestUsersFile - TraefikFrontendAuthForward = Prefix + SuffixFrontendAuthForward - TraefikFrontendAuthForwardAddress = Prefix + SuffixFrontendAuthForwardAddress - TraefikFrontendAuthForwardAuthResponseHeaders = Prefix + SuffixFrontendAuthForwardAuthResponseHeaders - TraefikFrontendAuthForwardTLS = Prefix + SuffixFrontendAuthForwardTLS - TraefikFrontendAuthForwardTLSCa = Prefix + SuffixFrontendAuthForwardTLSCa - TraefikFrontendAuthForwardTLSCaOptional = Prefix + SuffixFrontendAuthForwardTLSCaOptional - TraefikFrontendAuthForwardTLSCert = Prefix + SuffixFrontendAuthForwardTLSCert - TraefikFrontendAuthForwardTLSInsecureSkipVerify = Prefix + SuffixFrontendAuthForwardTLSInsecureSkipVerify - TraefikFrontendAuthForwardTLSKey = Prefix + SuffixFrontendAuthForwardTLSKey - TraefikFrontendAuthForwardTrustForwardHeader = Prefix + SuffixFrontendAuthForwardTrustForwardHeader - TraefikFrontendAuthHeaderField = Prefix + SuffixFrontendAuthHeaderField - TraefikFrontendEntryPoints = Prefix + SuffixFrontendEntryPoints - TraefikFrontendPassHostHeader = Prefix + SuffixFrontendPassHostHeader - TraefikFrontendPassTLSClientCert = Prefix + SuffixFrontendPassTLSClientCert - TraefikFrontendPassTLSClientCertPem = Prefix + SuffixFrontendPassTLSClientCertPem - TraefikFrontendPassTLSClientCertInfos = Prefix + SuffixFrontendPassTLSClientCertInfos - TraefikFrontendPassTLSClientCertInfosIssuer = Prefix + SuffixFrontendPassTLSClientCertInfosIssuer - TraefikFrontendPassTLSClientCertInfosIssuerCommonName = Prefix + SuffixFrontendPassTLSClientCertInfosIssuerCommonName - TraefikFrontendPassTLSClientCertInfosIssuerCountry = Prefix + SuffixFrontendPassTLSClientCertInfosIssuerCountry - TraefikFrontendPassTLSClientCertInfosIssuerDomainComponent = Prefix + SuffixFrontendPassTLSClientCertInfosIssuerDomainComponent - TraefikFrontendPassTLSClientCertInfosIssuerLocality = Prefix + SuffixFrontendPassTLSClientCertInfosIssuerLocality - TraefikFrontendPassTLSClientCertInfosIssuerOrganization = Prefix + SuffixFrontendPassTLSClientCertInfosIssuerOrganization - TraefikFrontendPassTLSClientCertInfosIssuerProvince = Prefix + SuffixFrontendPassTLSClientCertInfosIssuerProvince - TraefikFrontendPassTLSClientCertInfosIssuerSerialNumber = Prefix + SuffixFrontendPassTLSClientCertInfosIssuerSerialNumber - TraefikFrontendPassTLSClientCertInfosNotAfter = Prefix + SuffixFrontendPassTLSClientCertInfosNotAfter - TraefikFrontendPassTLSClientCertInfosNotBefore = Prefix + SuffixFrontendPassTLSClientCertInfosNotBefore - TraefikFrontendPassTLSClientCertInfosSans = Prefix + SuffixFrontendPassTLSClientCertInfosSans - TraefikFrontendPassTLSClientCertInfosSubject = Prefix + SuffixFrontendPassTLSClientCertInfosSubject - TraefikFrontendPassTLSClientCertInfosSubjectCommonName = Prefix + SuffixFrontendPassTLSClientCertInfosSubjectCommonName - TraefikFrontendPassTLSClientCertInfosSubjectCountry = Prefix + SuffixFrontendPassTLSClientCertInfosSubjectCountry - TraefikFrontendPassTLSClientCertInfosSubjectDomainComponent = Prefix + SuffixFrontendPassTLSClientCertInfosSubjectDomainComponent - TraefikFrontendPassTLSClientCertInfosSubjectLocality = Prefix + SuffixFrontendPassTLSClientCertInfosSubjectLocality - TraefikFrontendPassTLSClientCertInfosSubjectOrganization = Prefix + SuffixFrontendPassTLSClientCertInfosSubjectOrganization - TraefikFrontendPassTLSClientCertInfosSubjectProvince = Prefix + SuffixFrontendPassTLSClientCertInfosSubjectProvince - TraefikFrontendPassTLSClientCertInfosSubjectSerialNumber = Prefix + SuffixFrontendPassTLSClientCertInfosSubjectSerialNumber - TraefikFrontendPassTLSCert = Prefix + SuffixFrontendPassTLSCert // Deprecated - TraefikFrontendPriority = Prefix + SuffixFrontendPriority - TraefikFrontendRateLimitExtractorFunc = Prefix + SuffixFrontendRateLimitExtractorFunc - TraefikFrontendRedirectEntryPoint = Prefix + SuffixFrontendRedirectEntryPoint - TraefikFrontendRedirectRegex = Prefix + SuffixFrontendRedirectRegex - TraefikFrontendRedirectReplacement = Prefix + SuffixFrontendRedirectReplacement - TraefikFrontendRedirectPermanent = Prefix + SuffixFrontendRedirectPermanent - TraefikFrontendRule = Prefix + SuffixFrontendRule - TraefikFrontendWhiteListSourceRange = Prefix + SuffixFrontendWhiteListSourceRange - TraefikFrontendWhiteListIPStrategy = Prefix + SuffixFrontendWhiteListIPStrategy - TraefikFrontendWhiteListIPStrategyDepth = Prefix + SuffixFrontendWhiteListIPStrategyDepth - TraefikFrontendWhiteListIPStrategyExcludedIPS = Prefix + SuffixFrontendWhiteListIPStrategyExcludedIPS - TraefikFrontendRequestHeaders = Prefix + SuffixFrontendRequestHeaders - TraefikFrontendResponseHeaders = Prefix + SuffixFrontendResponseHeaders - TraefikFrontendAllowedHosts = Prefix + SuffixFrontendHeadersAllowedHosts - TraefikFrontendHostsProxyHeaders = Prefix + SuffixFrontendHeadersHostsProxyHeaders - TraefikFrontendSSLForceHost = Prefix + SuffixFrontendHeadersSSLForceHost - TraefikFrontendSSLRedirect = Prefix + SuffixFrontendHeadersSSLRedirect - TraefikFrontendSSLTemporaryRedirect = Prefix + SuffixFrontendHeadersSSLTemporaryRedirect - TraefikFrontendSSLHost = Prefix + SuffixFrontendHeadersSSLHost - TraefikFrontendSSLProxyHeaders = Prefix + SuffixFrontendHeadersSSLProxyHeaders - TraefikFrontendSTSSeconds = Prefix + SuffixFrontendHeadersSTSSeconds - TraefikFrontendSTSIncludeSubdomains = Prefix + SuffixFrontendHeadersSTSIncludeSubdomains - TraefikFrontendSTSPreload = Prefix + SuffixFrontendHeadersSTSPreload - TraefikFrontendForceSTSHeader = Prefix + SuffixFrontendHeadersForceSTSHeader - TraefikFrontendFrameDeny = Prefix + SuffixFrontendHeadersFrameDeny - TraefikFrontendCustomFrameOptionsValue = Prefix + SuffixFrontendHeadersCustomFrameOptionsValue - TraefikFrontendContentTypeNosniff = Prefix + SuffixFrontendHeadersContentTypeNosniff - TraefikFrontendBrowserXSSFilter = Prefix + SuffixFrontendHeadersBrowserXSSFilter - TraefikFrontendCustomBrowserXSSValue = Prefix + SuffixFrontendHeadersCustomBrowserXSSValue - TraefikFrontendContentSecurityPolicy = Prefix + SuffixFrontendHeadersContentSecurityPolicy - TraefikFrontendPublicKey = Prefix + SuffixFrontendHeadersPublicKey - TraefikFrontendReferrerPolicy = Prefix + SuffixFrontendHeadersReferrerPolicy - TraefikFrontendIsDevelopment = Prefix + SuffixFrontendHeadersIsDevelopment - BaseFrontendErrorPage = "frontend.errors." - SuffixErrorPageBackend = "backend" - SuffixErrorPageQuery = "query" - SuffixErrorPageStatus = "status" - BaseFrontendRateLimit = "frontend.rateLimit.rateSet." - SuffixRateLimitPeriod = "period" - SuffixRateLimitAverage = "average" - SuffixRateLimitBurst = "burst" -) diff --git a/old/provider/label/partial.go b/old/provider/label/partial.go deleted file mode 100644 index e09ff31e6..000000000 --- a/old/provider/label/partial.go +++ /dev/null @@ -1,431 +0,0 @@ -package label - -import ( - "math" - "regexp" - "strconv" - "strings" - - "github.com/containous/flaeg/parse" - "github.com/containous/traefik/old/log" - "github.com/containous/traefik/old/types" -) - -// GetWhiteList Create white list from labels -func GetWhiteList(labels map[string]string) *types.WhiteList { - ranges := GetSliceStringValue(labels, TraefikFrontendWhiteListSourceRange) - if len(ranges) == 0 { - return nil - } - - return &types.WhiteList{ - SourceRange: ranges, - IPStrategy: getIPStrategy(labels), - } -} - -func getIPStrategy(labels map[string]string) *types.IPStrategy { - ipStrategy := GetBoolValue(labels, TraefikFrontendWhiteListIPStrategy, false) - depth := GetIntValue(labels, TraefikFrontendWhiteListIPStrategyDepth, 0) - excludedIPs := GetSliceStringValue(labels, TraefikFrontendWhiteListIPStrategyExcludedIPS) - - if depth == 0 && len(excludedIPs) == 0 && !ipStrategy { - return nil - } - - return &types.IPStrategy{ - Depth: depth, - ExcludedIPs: excludedIPs, - } -} - -// GetRedirect Create redirect from labels -func GetRedirect(labels map[string]string) *types.Redirect { - permanent := GetBoolValue(labels, TraefikFrontendRedirectPermanent, false) - - if Has(labels, TraefikFrontendRedirectEntryPoint) { - return &types.Redirect{ - EntryPoint: GetStringValue(labels, TraefikFrontendRedirectEntryPoint, ""), - Permanent: permanent, - } - } - - if Has(labels, TraefikFrontendRedirectRegex) && - Has(labels, TraefikFrontendRedirectReplacement) { - return &types.Redirect{ - Regex: GetStringValue(labels, TraefikFrontendRedirectRegex, ""), - Replacement: GetStringValue(labels, TraefikFrontendRedirectReplacement, ""), - Permanent: permanent, - } - } - - return nil -} - -// GetTLSClientCert create TLS client header configuration from labels -func GetTLSClientCert(labels map[string]string) *types.TLSClientHeaders { - if !HasPrefix(labels, TraefikFrontendPassTLSClientCert) { - return nil - } - - tlsClientHeaders := &types.TLSClientHeaders{ - PEM: GetBoolValue(labels, TraefikFrontendPassTLSClientCertPem, false), - } - - if HasPrefix(labels, TraefikFrontendPassTLSClientCertInfos) { - infos := &types.TLSClientCertificateInfos{ - NotAfter: GetBoolValue(labels, TraefikFrontendPassTLSClientCertInfosNotAfter, false), - NotBefore: GetBoolValue(labels, TraefikFrontendPassTLSClientCertInfosNotBefore, false), - Sans: GetBoolValue(labels, TraefikFrontendPassTLSClientCertInfosSans, false), - } - - if HasPrefix(labels, TraefikFrontendPassTLSClientCertInfosSubject) { - subject := &types.TLSCLientCertificateDNInfos{ - CommonName: GetBoolValue(labels, TraefikFrontendPassTLSClientCertInfosSubjectCommonName, false), - Country: GetBoolValue(labels, TraefikFrontendPassTLSClientCertInfosSubjectCountry, false), - DomainComponent: GetBoolValue(labels, TraefikFrontendPassTLSClientCertInfosSubjectDomainComponent, false), - Locality: GetBoolValue(labels, TraefikFrontendPassTLSClientCertInfosSubjectLocality, false), - Organization: GetBoolValue(labels, TraefikFrontendPassTLSClientCertInfosSubjectOrganization, false), - Province: GetBoolValue(labels, TraefikFrontendPassTLSClientCertInfosSubjectProvince, false), - SerialNumber: GetBoolValue(labels, TraefikFrontendPassTLSClientCertInfosSubjectSerialNumber, false), - } - infos.Subject = subject - } - - if HasPrefix(labels, TraefikFrontendPassTLSClientCertInfosIssuer) { - issuer := &types.TLSCLientCertificateDNInfos{ - CommonName: GetBoolValue(labels, TraefikFrontendPassTLSClientCertInfosIssuerCommonName, false), - Country: GetBoolValue(labels, TraefikFrontendPassTLSClientCertInfosIssuerCountry, false), - DomainComponent: GetBoolValue(labels, TraefikFrontendPassTLSClientCertInfosIssuerDomainComponent, false), - Locality: GetBoolValue(labels, TraefikFrontendPassTLSClientCertInfosIssuerLocality, false), - Organization: GetBoolValue(labels, TraefikFrontendPassTLSClientCertInfosIssuerOrganization, false), - Province: GetBoolValue(labels, TraefikFrontendPassTLSClientCertInfosIssuerProvince, false), - SerialNumber: GetBoolValue(labels, TraefikFrontendPassTLSClientCertInfosIssuerSerialNumber, false), - } - infos.Issuer = issuer - } - tlsClientHeaders.Infos = infos - } - return tlsClientHeaders -} - -// GetAuth Create auth from labels -func GetAuth(labels map[string]string) *types.Auth { - if !HasPrefix(labels, TraefikFrontendAuth) { - return nil - } - - auth := &types.Auth{ - HeaderField: GetStringValue(labels, TraefikFrontendAuthHeaderField, ""), - } - - if HasPrefix(labels, TraefikFrontendAuthBasic) { - auth.Basic = getAuthBasic(labels) - } else if HasPrefix(labels, TraefikFrontendAuthDigest) { - auth.Digest = getAuthDigest(labels) - } else if HasPrefix(labels, TraefikFrontendAuthForward) { - auth.Forward = getAuthForward(labels) - } - - return auth -} - -// getAuthBasic Create Basic Auth from labels -func getAuthBasic(labels map[string]string) *types.Basic { - basicAuth := &types.Basic{ - Realm: GetStringValue(labels, TraefikFrontendAuthBasicRealm, ""), - UsersFile: GetStringValue(labels, TraefikFrontendAuthBasicUsersFile, ""), - RemoveHeader: GetBoolValue(labels, TraefikFrontendAuthBasicRemoveHeader, false), - } - - // backward compatibility - if Has(labels, TraefikFrontendAuthBasic) { - basicAuth.Users = GetSliceStringValue(labels, TraefikFrontendAuthBasic) - log.Warnf("Deprecated configuration found: %s. Please use %s.", TraefikFrontendAuthBasic, TraefikFrontendAuthBasicUsers) - } else { - basicAuth.Users = GetSliceStringValue(labels, TraefikFrontendAuthBasicUsers) - } - - return basicAuth -} - -// getAuthDigest Create Digest Auth from labels -func getAuthDigest(labels map[string]string) *types.Digest { - return &types.Digest{ - Users: GetSliceStringValue(labels, TraefikFrontendAuthDigestUsers), - UsersFile: GetStringValue(labels, TraefikFrontendAuthDigestUsersFile, ""), - RemoveHeader: GetBoolValue(labels, TraefikFrontendAuthDigestRemoveHeader, false), - } -} - -// getAuthForward Create Forward Auth from labels -func getAuthForward(labels map[string]string) *types.Forward { - forwardAuth := &types.Forward{ - Address: GetStringValue(labels, TraefikFrontendAuthForwardAddress, ""), - AuthResponseHeaders: GetSliceStringValue(labels, TraefikFrontendAuthForwardAuthResponseHeaders), - TrustForwardHeader: GetBoolValue(labels, TraefikFrontendAuthForwardTrustForwardHeader, false), - } - - // TLS configuration - if HasPrefix(labels, TraefikFrontendAuthForwardTLS) { - forwardAuth.TLS = &types.ClientTLS{ - CA: GetStringValue(labels, TraefikFrontendAuthForwardTLSCa, ""), - CAOptional: GetBoolValue(labels, TraefikFrontendAuthForwardTLSCaOptional, false), - Cert: GetStringValue(labels, TraefikFrontendAuthForwardTLSCert, ""), - InsecureSkipVerify: GetBoolValue(labels, TraefikFrontendAuthForwardTLSInsecureSkipVerify, false), - Key: GetStringValue(labels, TraefikFrontendAuthForwardTLSKey, ""), - } - } - - return forwardAuth -} - -// GetErrorPages Create error pages from labels -func GetErrorPages(labels map[string]string) map[string]*types.ErrorPage { - prefix := Prefix + BaseFrontendErrorPage - return ParseErrorPages(labels, prefix, RegexpFrontendErrorPage) -} - -// ParseErrorPages parse error pages to create ErrorPage struct -func ParseErrorPages(labels map[string]string, labelPrefix string, labelRegex *regexp.Regexp) map[string]*types.ErrorPage { - var errorPages map[string]*types.ErrorPage - - for lblName, value := range labels { - if strings.HasPrefix(lblName, labelPrefix) { - submatch := labelRegex.FindStringSubmatch(lblName) - if len(submatch) != 3 { - log.Errorf("Invalid page error label: %s, sub-match: %v", lblName, submatch) - continue - } - - if errorPages == nil { - errorPages = make(map[string]*types.ErrorPage) - } - - pageName := submatch[1] - - ep, ok := errorPages[pageName] - if !ok { - ep = &types.ErrorPage{} - errorPages[pageName] = ep - } - - switch submatch[2] { - case SuffixErrorPageStatus: - ep.Status = SplitAndTrimString(value, ",") - case SuffixErrorPageQuery: - ep.Query = value - case SuffixErrorPageBackend: - ep.Backend = value - default: - log.Errorf("Invalid page error label: %s", lblName) - continue - } - } - } - - return errorPages -} - -// GetRateLimit Create rate limits from labels -func GetRateLimit(labels map[string]string) *types.RateLimit { - extractorFunc := GetStringValue(labels, TraefikFrontendRateLimitExtractorFunc, "") - if len(extractorFunc) == 0 { - return nil - } - - prefix := Prefix + BaseFrontendRateLimit - limits := ParseRateSets(labels, prefix, RegexpFrontendRateLimit) - - return &types.RateLimit{ - ExtractorFunc: extractorFunc, - RateSet: limits, - } -} - -// ParseRateSets parse rate limits to create Rate struct -func ParseRateSets(labels map[string]string, labelPrefix string, labelRegex *regexp.Regexp) map[string]*types.Rate { - var rateSets map[string]*types.Rate - - for lblName, rawValue := range labels { - if strings.HasPrefix(lblName, labelPrefix) && len(rawValue) > 0 { - submatch := labelRegex.FindStringSubmatch(lblName) - if len(submatch) != 3 { - log.Errorf("Invalid rate limit label: %s, sub-match: %v", lblName, submatch) - continue - } - - if rateSets == nil { - rateSets = make(map[string]*types.Rate) - } - - limitName := submatch[1] - - ep, ok := rateSets[limitName] - if !ok { - ep = &types.Rate{} - rateSets[limitName] = ep - } - - switch submatch[2] { - case "period": - var d parse.Duration - err := d.Set(rawValue) - if err != nil { - log.Errorf("Unable to parse %q: %q. %v", lblName, rawValue, err) - continue - } - ep.Period = d - case "average": - value, err := strconv.ParseInt(rawValue, 10, 64) - if err != nil { - log.Errorf("Unable to parse %q: %q. %v", lblName, rawValue, err) - continue - } - ep.Average = value - case "burst": - value, err := strconv.ParseInt(rawValue, 10, 64) - if err != nil { - log.Errorf("Unable to parse %q: %q. %v", lblName, rawValue, err) - continue - } - ep.Burst = value - default: - log.Errorf("Invalid rate limit label: %s", lblName) - continue - } - } - } - return rateSets -} - -// GetHeaders Create headers from labels -func GetHeaders(labels map[string]string) *types.Headers { - headers := &types.Headers{ - CustomRequestHeaders: GetMapValue(labels, TraefikFrontendRequestHeaders), - CustomResponseHeaders: GetMapValue(labels, TraefikFrontendResponseHeaders), - SSLProxyHeaders: GetMapValue(labels, TraefikFrontendSSLProxyHeaders), - AllowedHosts: GetSliceStringValue(labels, TraefikFrontendAllowedHosts), - HostsProxyHeaders: GetSliceStringValue(labels, TraefikFrontendHostsProxyHeaders), - STSSeconds: GetInt64Value(labels, TraefikFrontendSTSSeconds, 0), - SSLRedirect: GetBoolValue(labels, TraefikFrontendSSLRedirect, false), - SSLTemporaryRedirect: GetBoolValue(labels, TraefikFrontendSSLTemporaryRedirect, false), - SSLForceHost: GetBoolValue(labels, TraefikFrontendSSLForceHost, false), - STSIncludeSubdomains: GetBoolValue(labels, TraefikFrontendSTSIncludeSubdomains, false), - STSPreload: GetBoolValue(labels, TraefikFrontendSTSPreload, false), - ForceSTSHeader: GetBoolValue(labels, TraefikFrontendForceSTSHeader, false), - FrameDeny: GetBoolValue(labels, TraefikFrontendFrameDeny, false), - ContentTypeNosniff: GetBoolValue(labels, TraefikFrontendContentTypeNosniff, false), - BrowserXSSFilter: GetBoolValue(labels, TraefikFrontendBrowserXSSFilter, false), - IsDevelopment: GetBoolValue(labels, TraefikFrontendIsDevelopment, false), - SSLHost: GetStringValue(labels, TraefikFrontendSSLHost, ""), - CustomFrameOptionsValue: GetStringValue(labels, TraefikFrontendCustomFrameOptionsValue, ""), - ContentSecurityPolicy: GetStringValue(labels, TraefikFrontendContentSecurityPolicy, ""), - PublicKey: GetStringValue(labels, TraefikFrontendPublicKey, ""), - ReferrerPolicy: GetStringValue(labels, TraefikFrontendReferrerPolicy, ""), - CustomBrowserXSSValue: GetStringValue(labels, TraefikFrontendCustomBrowserXSSValue, ""), - } - - if !headers.HasSecureHeadersDefined() && !headers.HasCustomHeadersDefined() { - return nil - } - - return headers -} - -// GetMaxConn Create max connection from labels -func GetMaxConn(labels map[string]string) *types.MaxConn { - amount := GetInt64Value(labels, TraefikBackendMaxConnAmount, math.MinInt64) - extractorFunc := GetStringValue(labels, TraefikBackendMaxConnExtractorFunc, DefaultBackendMaxconnExtractorFunc) - - if amount == math.MinInt64 || len(extractorFunc) == 0 { - return nil - } - - return &types.MaxConn{ - Amount: amount, - ExtractorFunc: extractorFunc, - } -} - -// GetHealthCheck Create health check from labels -func GetHealthCheck(labels map[string]string) *types.HealthCheck { - path := GetStringValue(labels, TraefikBackendHealthCheckPath, "") - if len(path) == 0 { - return nil - } - - scheme := GetStringValue(labels, TraefikBackendHealthCheckScheme, "") - port := GetIntValue(labels, TraefikBackendHealthCheckPort, DefaultBackendHealthCheckPort) - interval := GetStringValue(labels, TraefikBackendHealthCheckInterval, "") - timeout := GetStringValue(labels, TraefikBackendHealthCheckTimeout, "") - hostname := GetStringValue(labels, TraefikBackendHealthCheckHostname, "") - headers := GetMapValue(labels, TraefikBackendHealthCheckHeaders) - - return &types.HealthCheck{ - Scheme: scheme, - Path: path, - Port: port, - Interval: interval, - Timeout: timeout, - Hostname: hostname, - Headers: headers, - } -} - -// GetResponseForwarding Create ResponseForwarding from labels -func GetResponseForwarding(labels map[string]string) *types.ResponseForwarding { - if !HasPrefix(labels, TraefikBackendResponseForwardingFlushInterval) { - return nil - } - - value := GetStringValue(labels, TraefikBackendResponseForwardingFlushInterval, "0") - - return &types.ResponseForwarding{ - FlushInterval: value, - } -} - -// GetBuffering Create buffering from labels -func GetBuffering(labels map[string]string) *types.Buffering { - if !HasPrefix(labels, TraefikBackendBuffering) { - return nil - } - - return &types.Buffering{ - MaxRequestBodyBytes: GetInt64Value(labels, TraefikBackendBufferingMaxRequestBodyBytes, 0), - MaxResponseBodyBytes: GetInt64Value(labels, TraefikBackendBufferingMaxResponseBodyBytes, 0), - MemRequestBodyBytes: GetInt64Value(labels, TraefikBackendBufferingMemRequestBodyBytes, 0), - MemResponseBodyBytes: GetInt64Value(labels, TraefikBackendBufferingMemResponseBodyBytes, 0), - RetryExpression: GetStringValue(labels, TraefikBackendBufferingRetryExpression, ""), - } -} - -// GetCircuitBreaker Create circuit breaker from labels -func GetCircuitBreaker(labels map[string]string) *types.CircuitBreaker { - circuitBreaker := GetStringValue(labels, TraefikBackendCircuitBreakerExpression, "") - if len(circuitBreaker) == 0 { - return nil - } - return &types.CircuitBreaker{Expression: circuitBreaker} -} - -// GetLoadBalancer Create load balancer from labels -func GetLoadBalancer(labels map[string]string) *types.LoadBalancer { - if !HasPrefix(labels, TraefikBackendLoadBalancer) { - return nil - } - - method := GetStringValue(labels, TraefikBackendLoadBalancerMethod, DefaultBackendLoadBalancerMethod) - - lb := &types.LoadBalancer{ - Method: method, - } - - if GetBoolValue(labels, TraefikBackendLoadBalancerStickiness, false) { - cookieName := GetStringValue(labels, TraefikBackendLoadBalancerStickinessCookieName, DefaultBackendLoadbalancerStickinessCookieName) - lb.Stickiness = &types.Stickiness{CookieName: cookieName} - } - - return lb -} diff --git a/old/provider/label/partial_test.go b/old/provider/label/partial_test.go deleted file mode 100644 index 2ddc08b0c..000000000 --- a/old/provider/label/partial_test.go +++ /dev/null @@ -1,1014 +0,0 @@ -package label - -import ( - "testing" - "time" - - "github.com/containous/flaeg/parse" - "github.com/containous/traefik/old/types" - "github.com/stretchr/testify/assert" -) - -func TestParseErrorPages(t *testing.T) { - testCases := []struct { - desc string - labels map[string]string - expected map[string]*types.ErrorPage - }{ - { - desc: "2 errors pages", - labels: map[string]string{ - Prefix + BaseFrontendErrorPage + "foo." + SuffixErrorPageStatus: "404", - Prefix + BaseFrontendErrorPage + "foo." + SuffixErrorPageBackend: "foo_backend", - Prefix + BaseFrontendErrorPage + "foo." + SuffixErrorPageQuery: "foo_query", - Prefix + BaseFrontendErrorPage + "bar." + SuffixErrorPageStatus: "500,600", - Prefix + BaseFrontendErrorPage + "bar." + SuffixErrorPageBackend: "bar_backend", - Prefix + BaseFrontendErrorPage + "bar." + SuffixErrorPageQuery: "bar_query", - }, - expected: map[string]*types.ErrorPage{ - "foo": { - Status: []string{"404"}, - Query: "foo_query", - Backend: "foo_backend", - }, - "bar": { - Status: []string{"500", "600"}, - Query: "bar_query", - Backend: "bar_backend", - }, - }, - }, - { - desc: "only status field", - labels: map[string]string{ - Prefix + BaseFrontendErrorPage + "foo." + SuffixErrorPageStatus: "404", - }, - expected: map[string]*types.ErrorPage{ - "foo": { - Status: []string{"404"}, - }, - }, - }, - { - desc: "invalid field", - labels: map[string]string{ - Prefix + BaseFrontendErrorPage + "foo." + "courgette": "404", - }, - expected: map[string]*types.ErrorPage{"foo": {}}, - }, - { - desc: "no error pages labels", - labels: map[string]string{}, - expected: nil, - }, - } - - for _, test := range testCases { - test := test - t.Run(test.desc, func(t *testing.T) { - t.Parallel() - - pages := ParseErrorPages(test.labels, Prefix+BaseFrontendErrorPage, RegexpFrontendErrorPage) - - assert.EqualValues(t, test.expected, pages) - }) - } -} - -func TestParseRateSets(t *testing.T) { - testCases := []struct { - desc string - labels map[string]string - expected map[string]*types.Rate - }{ - { - desc: "2 rate limits", - labels: map[string]string{ - Prefix + BaseFrontendRateLimit + "foo." + SuffixRateLimitPeriod: "6", - Prefix + BaseFrontendRateLimit + "foo." + SuffixRateLimitAverage: "12", - Prefix + BaseFrontendRateLimit + "foo." + SuffixRateLimitBurst: "18", - Prefix + BaseFrontendRateLimit + "bar." + SuffixRateLimitPeriod: "3", - Prefix + BaseFrontendRateLimit + "bar." + SuffixRateLimitAverage: "6", - Prefix + BaseFrontendRateLimit + "bar." + SuffixRateLimitBurst: "9", - }, - expected: map[string]*types.Rate{ - "foo": { - Period: parse.Duration(6 * time.Second), - Average: 12, - Burst: 18, - }, - "bar": { - Period: parse.Duration(3 * time.Second), - Average: 6, - Burst: 9, - }, - }, - }, - { - desc: "no rate limits labels", - labels: map[string]string{}, - expected: nil, - }, - } - - for _, test := range testCases { - test := test - t.Run(test.desc, func(t *testing.T) { - t.Parallel() - - rateSets := ParseRateSets(test.labels, Prefix+BaseFrontendRateLimit, RegexpFrontendRateLimit) - - assert.EqualValues(t, test.expected, rateSets) - }) - } -} - -func TestWhiteList(t *testing.T) { - testCases := []struct { - desc string - labels map[string]string - expected *types.WhiteList - }{ - { - desc: "should return nil when no white list labels", - labels: map[string]string{}, - expected: nil, - }, - { - desc: "should return a struct when only range", - labels: map[string]string{ - TraefikFrontendWhiteListSourceRange: "10.10.10.10", - }, - expected: &types.WhiteList{ - SourceRange: []string{ - "10.10.10.10", - }, - }, - }, - { - desc: "should return a struct with ip strategy depth", - labels: map[string]string{ - TraefikFrontendWhiteListSourceRange: "10.10.10.10", - TraefikFrontendWhiteListIPStrategyDepth: "5", - }, - expected: &types.WhiteList{ - SourceRange: []string{ - "10.10.10.10", - }, - IPStrategy: &types.IPStrategy{ - Depth: 5, - }, - }, - }, - { - desc: "should return a struct with ip strategy depth and excluded ips", - labels: map[string]string{ - TraefikFrontendWhiteListSourceRange: "10.10.10.10", - TraefikFrontendWhiteListIPStrategyDepth: "5", - TraefikFrontendWhiteListIPStrategyExcludedIPS: "10.10.10.10,10.10.10.11", - }, - expected: &types.WhiteList{ - SourceRange: []string{ - "10.10.10.10", - }, - IPStrategy: &types.IPStrategy{ - Depth: 5, - ExcludedIPs: []string{ - "10.10.10.10", - "10.10.10.11", - }, - }, - }, - }, - { - desc: "should return a struct with ip strategy (remoteAddr) with no depth and no excludedIPs", - labels: map[string]string{ - TraefikFrontendWhiteListSourceRange: "10.10.10.10", - TraefikFrontendWhiteListIPStrategy: "true", - }, - expected: &types.WhiteList{ - SourceRange: []string{ - "10.10.10.10", - }, - IPStrategy: &types.IPStrategy{ - Depth: 0, - ExcludedIPs: nil, - }, - }, - }, - { - desc: "should return a struct with ip strategy with depth", - labels: map[string]string{ - TraefikFrontendWhiteListSourceRange: "10.10.10.10", - TraefikFrontendWhiteListIPStrategy: "true", - TraefikFrontendWhiteListIPStrategyDepth: "5", - }, - expected: &types.WhiteList{ - SourceRange: []string{ - "10.10.10.10", - }, - IPStrategy: &types.IPStrategy{ - Depth: 5, - ExcludedIPs: nil, - }, - }, - }, - } - - for _, test := range testCases { - test := test - t.Run(test.desc, func(t *testing.T) { - t.Parallel() - - actual := GetWhiteList(test.labels) - assert.Equal(t, test.expected, actual) - }) - } -} - -func TestGetCircuitBreaker(t *testing.T) { - testCases := []struct { - desc string - labels map[string]string - expected *types.CircuitBreaker - }{ - { - desc: "should return nil when no CB label", - labels: map[string]string{}, - expected: nil, - }, - { - desc: "should return a struct when CB label is set", - labels: map[string]string{ - TraefikBackendCircuitBreakerExpression: "NetworkErrorRatio() > 0.5", - }, - expected: &types.CircuitBreaker{ - Expression: "NetworkErrorRatio() > 0.5", - }, - }, - } - - for _, test := range testCases { - test := test - t.Run(test.desc, func(t *testing.T) { - t.Parallel() - - actual := GetCircuitBreaker(test.labels) - - assert.Equal(t, test.expected, actual) - }) - } -} - -func TestGetLoadBalancer(t *testing.T) { - testCases := []struct { - desc string - labels map[string]string - expected *types.LoadBalancer - }{ - { - desc: "should return nil when no LB labels", - labels: map[string]string{}, - expected: nil, - }, - { - desc: "should return a struct when labels are set", - labels: map[string]string{ - TraefikBackendLoadBalancerMethod: "drr", - TraefikBackendLoadBalancerStickiness: "true", - TraefikBackendLoadBalancerStickinessCookieName: "foo", - }, - expected: &types.LoadBalancer{ - Method: "drr", - Stickiness: &types.Stickiness{ - CookieName: "foo", - }, - }, - }, - { - desc: "should return a nil Stickiness when Stickiness is not set", - labels: map[string]string{ - TraefikBackendLoadBalancerMethod: "drr", - TraefikBackendLoadBalancerStickinessCookieName: "foo", - }, - expected: &types.LoadBalancer{ - Method: "drr", - Stickiness: nil, - }, - }, - } - - for _, test := range testCases { - test := test - t.Run(test.desc, func(t *testing.T) { - t.Parallel() - - actual := GetLoadBalancer(test.labels) - - assert.Equal(t, test.expected, actual) - }) - } -} - -func TestGetMaxConn(t *testing.T) { - testCases := []struct { - desc string - labels map[string]string - expected *types.MaxConn - }{ - { - desc: "should return nil when no max conn labels", - labels: map[string]string{}, - expected: nil, - }, - { - desc: "should return nil when no amount label", - labels: map[string]string{ - TraefikBackendMaxConnExtractorFunc: "client.ip", - }, - expected: nil, - }, - { - desc: "should return default when no empty extractorFunc label", - labels: map[string]string{ - TraefikBackendMaxConnExtractorFunc: "", - TraefikBackendMaxConnAmount: "666", - }, - expected: &types.MaxConn{ - ExtractorFunc: "request.host", - Amount: 666, - }, - }, - { - desc: "should return a struct when max conn labels are set", - labels: map[string]string{ - TraefikBackendMaxConnExtractorFunc: "client.ip", - TraefikBackendMaxConnAmount: "666", - }, - expected: &types.MaxConn{ - ExtractorFunc: "client.ip", - Amount: 666, - }, - }, - } - - for _, test := range testCases { - test := test - t.Run(test.desc, func(t *testing.T) { - t.Parallel() - - actual := GetMaxConn(test.labels) - assert.Equal(t, test.expected, actual) - }) - } -} - -func TestGetHealthCheck(t *testing.T) { - testCases := []struct { - desc string - labels map[string]string - expected *types.HealthCheck - }{ - { - desc: "should return nil when no health check labels", - labels: map[string]string{}, - expected: nil, - }, - { - desc: "should return nil when no health check Path label", - labels: map[string]string{ - TraefikBackendHealthCheckPort: "80", - TraefikBackendHealthCheckInterval: "6", - TraefikBackendHealthCheckTimeout: "3", - }, - expected: nil, - }, - { - desc: "should return a struct when health check labels are set", - labels: map[string]string{ - TraefikBackendHealthCheckPath: "/health", - TraefikBackendHealthCheckPort: "80", - TraefikBackendHealthCheckInterval: "6", - TraefikBackendHealthCheckTimeout: "3", - TraefikBackendHealthCheckHeaders: "Foo:bar || Goo:bir", - TraefikBackendHealthCheckHostname: "traefik", - TraefikBackendHealthCheckScheme: "http", - }, - expected: &types.HealthCheck{ - Scheme: "http", - Path: "/health", - Port: 80, - Interval: "6", - Timeout: "3", - Hostname: "traefik", - Headers: map[string]string{ - "Foo": "bar", - "Goo": "bir", - }, - }, - }, - } - - for _, test := range testCases { - test := test - t.Run(test.desc, func(t *testing.T) { - t.Parallel() - - actual := GetHealthCheck(test.labels) - - assert.Equal(t, test.expected, actual) - }) - } -} - -func TestGetBuffering(t *testing.T) { - testCases := []struct { - desc string - labels map[string]string - expected *types.Buffering - }{ - { - desc: "should return nil when no buffering labels", - labels: map[string]string{}, - expected: nil, - }, - { - desc: "should return a struct when buffering labels are set", - labels: map[string]string{ - TraefikBackendBufferingMaxResponseBodyBytes: "10485760", - TraefikBackendBufferingMemResponseBodyBytes: "2097152", - TraefikBackendBufferingMaxRequestBodyBytes: "10485760", - TraefikBackendBufferingMemRequestBodyBytes: "2097152", - TraefikBackendBufferingRetryExpression: "IsNetworkError() && Attempts() <= 2", - }, - expected: &types.Buffering{ - MaxResponseBodyBytes: 10485760, - MemResponseBodyBytes: 2097152, - MaxRequestBodyBytes: 10485760, - MemRequestBodyBytes: 2097152, - RetryExpression: "IsNetworkError() && Attempts() <= 2", - }, - }, - } - - for _, test := range testCases { - test := test - t.Run(test.desc, func(t *testing.T) { - t.Parallel() - - actual := GetBuffering(test.labels) - - assert.Equal(t, test.expected, actual) - }) - } -} - -func TestGetRedirect(t *testing.T) { - testCases := []struct { - desc string - labels map[string]string - expected *types.Redirect - }{ - - { - desc: "should return nil when no redirect labels", - labels: map[string]string{}, - expected: nil, - }, - { - desc: "should use only entry point tag when mix regex redirect and entry point redirect", - labels: map[string]string{ - TraefikFrontendRedirectEntryPoint: "https", - TraefikFrontendRedirectRegex: "(.*)", - TraefikFrontendRedirectReplacement: "$1", - }, - expected: &types.Redirect{ - EntryPoint: "https", - }, - }, - { - desc: "should return a struct when entry point redirect label", - labels: map[string]string{ - TraefikFrontendRedirectEntryPoint: "https", - }, - expected: &types.Redirect{ - EntryPoint: "https", - }, - }, - { - desc: "should return a struct when entry point redirect label (permanent)", - labels: map[string]string{ - TraefikFrontendRedirectEntryPoint: "https", - TraefikFrontendRedirectPermanent: "true", - }, - expected: &types.Redirect{ - EntryPoint: "https", - Permanent: true, - }, - }, - { - desc: "should return a struct when regex redirect labels", - labels: map[string]string{ - TraefikFrontendRedirectRegex: "(.*)", - TraefikFrontendRedirectReplacement: "$1", - }, - expected: &types.Redirect{ - Regex: "(.*)", - Replacement: "$1", - }, - }, - { - desc: "should return a struct when regex redirect labels (permanent)", - labels: map[string]string{ - TraefikFrontendRedirectRegex: "(.*)", - TraefikFrontendRedirectReplacement: "$1", - TraefikFrontendRedirectPermanent: "true", - }, - expected: &types.Redirect{ - Regex: "(.*)", - Replacement: "$1", - Permanent: true, - }, - }, - } - - for _, test := range testCases { - test := test - t.Run(test.desc, func(t *testing.T) { - t.Parallel() - - actual := GetRedirect(test.labels) - - assert.Equal(t, test.expected, actual) - }) - } -} - -func TestGetRateLimit(t *testing.T) { - testCases := []struct { - desc string - labels map[string]string - expected *types.RateLimit - }{ - { - desc: "should return nil when no rate limit labels", - labels: map[string]string{}, - expected: nil, - }, - { - desc: "should return a struct when rate limit labels are defined", - labels: map[string]string{ - TraefikFrontendRateLimitExtractorFunc: "client.ip", - Prefix + BaseFrontendRateLimit + "foo." + SuffixRateLimitPeriod: "6", - Prefix + BaseFrontendRateLimit + "foo." + SuffixRateLimitAverage: "12", - Prefix + BaseFrontendRateLimit + "foo." + SuffixRateLimitBurst: "18", - Prefix + BaseFrontendRateLimit + "bar." + SuffixRateLimitPeriod: "3", - Prefix + BaseFrontendRateLimit + "bar." + SuffixRateLimitAverage: "6", - Prefix + BaseFrontendRateLimit + "bar." + SuffixRateLimitBurst: "9", - }, - expected: &types.RateLimit{ - ExtractorFunc: "client.ip", - RateSet: map[string]*types.Rate{ - "foo": { - Period: parse.Duration(6 * time.Second), - Average: 12, - Burst: 18, - }, - "bar": { - Period: parse.Duration(3 * time.Second), - Average: 6, - Burst: 9, - }, - }, - }, - }, - { - desc: "should return nil when ExtractorFunc is missing", - labels: map[string]string{ - Prefix + BaseFrontendRateLimit + "foo." + SuffixRateLimitPeriod: "6", - Prefix + BaseFrontendRateLimit + "foo." + SuffixRateLimitAverage: "12", - Prefix + BaseFrontendRateLimit + "foo." + SuffixRateLimitBurst: "18", - Prefix + BaseFrontendRateLimit + "bar." + SuffixRateLimitPeriod: "3", - Prefix + BaseFrontendRateLimit + "bar." + SuffixRateLimitAverage: "6", - Prefix + BaseFrontendRateLimit + "bar." + SuffixRateLimitBurst: "9", - }, - expected: nil, - }, - } - - for _, test := range testCases { - test := test - t.Run(test.desc, func(t *testing.T) { - t.Parallel() - - actual := GetRateLimit(test.labels) - - assert.Equal(t, test.expected, actual) - }) - } -} - -func TestGetHeaders(t *testing.T) { - testCases := []struct { - desc string - labels map[string]string - expected *types.Headers - }{ - { - desc: "should return nil when no custom headers options are set", - labels: map[string]string{}, - expected: nil, - }, - { - desc: "should return a struct when all custom headers options are set", - labels: map[string]string{ - TraefikFrontendRequestHeaders: "Access-Control-Allow-Methods:POST,GET,OPTIONS || Content-type: application/json; charset=utf-8", - TraefikFrontendResponseHeaders: "Access-Control-Allow-Methods:POST,GET,OPTIONS || Content-type: application/json; charset=utf-8", - TraefikFrontendSSLProxyHeaders: "Access-Control-Allow-Methods:POST,GET,OPTIONS || Content-type: application/json; charset=utf-8", - TraefikFrontendAllowedHosts: "foo,bar,bor", - TraefikFrontendHostsProxyHeaders: "foo,bar,bor", - TraefikFrontendSSLHost: "foo", - TraefikFrontendCustomFrameOptionsValue: "foo", - TraefikFrontendContentSecurityPolicy: "foo", - TraefikFrontendPublicKey: "foo", - TraefikFrontendReferrerPolicy: "foo", - TraefikFrontendCustomBrowserXSSValue: "foo", - TraefikFrontendSTSSeconds: "666", - TraefikFrontendSSLRedirect: "true", - TraefikFrontendSSLForceHost: "true", - TraefikFrontendSSLTemporaryRedirect: "true", - TraefikFrontendSTSIncludeSubdomains: "true", - TraefikFrontendSTSPreload: "true", - TraefikFrontendForceSTSHeader: "true", - TraefikFrontendFrameDeny: "true", - TraefikFrontendContentTypeNosniff: "true", - TraefikFrontendBrowserXSSFilter: "true", - TraefikFrontendIsDevelopment: "true", - }, - expected: &types.Headers{ - CustomRequestHeaders: map[string]string{ - "Access-Control-Allow-Methods": "POST,GET,OPTIONS", - "Content-Type": "application/json; charset=utf-8", - }, - CustomResponseHeaders: map[string]string{ - "Access-Control-Allow-Methods": "POST,GET,OPTIONS", - "Content-Type": "application/json; charset=utf-8", - }, - SSLProxyHeaders: map[string]string{ - "Access-Control-Allow-Methods": "POST,GET,OPTIONS", - "Content-Type": "application/json; charset=utf-8", - }, - AllowedHosts: []string{"foo", "bar", "bor"}, - HostsProxyHeaders: []string{"foo", "bar", "bor"}, - SSLHost: "foo", - CustomFrameOptionsValue: "foo", - ContentSecurityPolicy: "foo", - PublicKey: "foo", - ReferrerPolicy: "foo", - CustomBrowserXSSValue: "foo", - STSSeconds: 666, - SSLForceHost: true, - SSLRedirect: true, - SSLTemporaryRedirect: true, - STSIncludeSubdomains: true, - STSPreload: true, - ForceSTSHeader: true, - FrameDeny: true, - ContentTypeNosniff: true, - BrowserXSSFilter: true, - IsDevelopment: true, - }, - }, - } - - for _, test := range testCases { - test := test - t.Run(test.desc, func(t *testing.T) { - t.Parallel() - - actual := GetHeaders(test.labels) - - assert.Equal(t, test.expected, actual) - }) - } -} - -func TestProviderGetErrorPages(t *testing.T) { - testCases := []struct { - desc string - labels map[string]string - expected map[string]*types.ErrorPage - }{ - { - desc: "should return nil when no tags", - labels: map[string]string{}, - expected: nil, - }, - { - desc: "should return a map when tags are present", - labels: map[string]string{ - Prefix + BaseFrontendErrorPage + "foo." + SuffixErrorPageStatus: "404", - Prefix + BaseFrontendErrorPage + "foo." + SuffixErrorPageBackend: "foo_backend", - Prefix + BaseFrontendErrorPage + "foo." + SuffixErrorPageQuery: "foo_query", - Prefix + BaseFrontendErrorPage + "bar." + SuffixErrorPageStatus: "500,600", - Prefix + BaseFrontendErrorPage + "bar." + SuffixErrorPageBackend: "bar_backend", - Prefix + BaseFrontendErrorPage + "bar." + SuffixErrorPageQuery: "bar_query", - }, - expected: map[string]*types.ErrorPage{ - "foo": { - Status: []string{"404"}, - Query: "foo_query", - Backend: "foo_backend", - }, - "bar": { - Status: []string{"500", "600"}, - Query: "bar_query", - Backend: "bar_backend", - }, - }, - }, - } - - for _, test := range testCases { - test := test - t.Run(test.desc, func(t *testing.T) { - t.Parallel() - - result := GetErrorPages(test.labels) - - assert.Equal(t, test.expected, result) - }) - } -} - -func TestGetAuth(t *testing.T) { - testCases := []struct { - desc string - labels map[string]string - expected *types.Auth - }{ - { - desc: "should return nil when no tags", - labels: map[string]string{}, - expected: nil, - }, - { - desc: "should return a basic auth", - labels: map[string]string{ - TraefikFrontendAuthHeaderField: "myHeaderField", - TraefikFrontendAuthBasicRealm: "myRealm", - TraefikFrontendAuthBasicUsers: "user:pwd,user2:pwd2", - TraefikFrontendAuthBasicUsersFile: "myUsersFile", - TraefikFrontendAuthBasicRemoveHeader: "true", - }, - expected: &types.Auth{ - HeaderField: "myHeaderField", - Basic: &types.Basic{UsersFile: "myUsersFile", Users: []string{"user:pwd", "user2:pwd2"}, RemoveHeader: true, Realm: "myRealm"}, - }, - }, - { - desc: "should return a digest auth", - labels: map[string]string{ - TraefikFrontendAuthDigestRemoveHeader: "true", - TraefikFrontendAuthHeaderField: "myHeaderField", - TraefikFrontendAuthDigestUsers: "user:pwd,user2:pwd2", - TraefikFrontendAuthDigestUsersFile: "myUsersFile", - }, - expected: &types.Auth{ - HeaderField: "myHeaderField", - Digest: &types.Digest{UsersFile: "myUsersFile", Users: []string{"user:pwd", "user2:pwd2"}, RemoveHeader: true}, - }, - }, - { - desc: "should return a forward auth", - labels: map[string]string{ - TraefikFrontendAuthHeaderField: "myHeaderField", - TraefikFrontendAuthForwardAddress: "myAddress", - TraefikFrontendAuthForwardTrustForwardHeader: "true", - TraefikFrontendAuthForwardTLSCa: "ca.crt", - TraefikFrontendAuthForwardTLSCaOptional: "true", - TraefikFrontendAuthForwardTLSInsecureSkipVerify: "true", - TraefikFrontendAuthForwardTLSKey: "myKey", - TraefikFrontendAuthForwardTLSCert: "myCert", - }, - expected: &types.Auth{ - HeaderField: "myHeaderField", - Forward: &types.Forward{ - TrustForwardHeader: true, - Address: "myAddress", - TLS: &types.ClientTLS{ - InsecureSkipVerify: true, - CA: "ca.crt", - CAOptional: true, - Key: "myKey", - Cert: "myCert", - }, - }, - }, - }, - } - - for _, test := range testCases { - test := test - t.Run(test.desc, func(t *testing.T) { - t.Parallel() - - result := GetAuth(test.labels) - - assert.Equal(t, test.expected, result) - }) - } -} -func TestGetPassTLSClientCert(t *testing.T) { - testCases := []struct { - desc string - labels map[string]string - expected *types.TLSClientHeaders - }{ - { - desc: "should return nil when no tags", - labels: map[string]string{}, - expected: nil, - }, - { - desc: "should return tlsClientHeaders with true pem flag", - labels: map[string]string{ - TraefikFrontendPassTLSClientCertPem: "true", - }, - expected: &types.TLSClientHeaders{ - PEM: true, - }, - }, - { - desc: "should return tlsClientHeaders with infos and NotAfter true", - labels: map[string]string{ - TraefikFrontendPassTLSClientCertInfosNotAfter: "true", - }, - expected: &types.TLSClientHeaders{ - Infos: &types.TLSClientCertificateInfos{ - NotAfter: true, - }, - }, - }, - { - desc: "should return tlsClientHeaders with infos and NotBefore true", - labels: map[string]string{ - TraefikFrontendPassTLSClientCertInfosNotBefore: "true", - }, - expected: &types.TLSClientHeaders{ - Infos: &types.TLSClientCertificateInfos{ - NotBefore: true, - }, - }, - }, - { - desc: "should return tlsClientHeaders with infos and sans true", - labels: map[string]string{ - TraefikFrontendPassTLSClientCertInfosSans: "true", - }, - expected: &types.TLSClientHeaders{ - Infos: &types.TLSClientCertificateInfos{ - Sans: true, - }, - }, - }, - { - desc: "should return tlsClientHeaders with infos and subject with commonName true", - labels: map[string]string{ - TraefikFrontendPassTLSClientCertInfosSubjectCommonName: "true", - }, - expected: &types.TLSClientHeaders{ - Infos: &types.TLSClientCertificateInfos{ - Subject: &types.TLSCLientCertificateDNInfos{ - CommonName: true, - }, - }, - }, - }, - { - desc: "should return tlsClientHeaders with infos and subject with country true", - labels: map[string]string{ - TraefikFrontendPassTLSClientCertInfosSubjectCountry: "true", - }, - expected: &types.TLSClientHeaders{ - Infos: &types.TLSClientCertificateInfos{ - Subject: &types.TLSCLientCertificateDNInfos{ - Country: true, - }, - }, - }, - }, - { - desc: "should return tlsClientHeaders with infos and subject with locality true", - labels: map[string]string{ - TraefikFrontendPassTLSClientCertInfosSubjectLocality: "true", - }, - expected: &types.TLSClientHeaders{ - Infos: &types.TLSClientCertificateInfos{ - Subject: &types.TLSCLientCertificateDNInfos{ - Locality: true, - }, - }, - }, - }, - { - desc: "should return tlsClientHeaders with infos and subject with organization true", - labels: map[string]string{ - TraefikFrontendPassTLSClientCertInfosSubjectOrganization: "true", - }, - expected: &types.TLSClientHeaders{ - Infos: &types.TLSClientCertificateInfos{ - Subject: &types.TLSCLientCertificateDNInfos{ - Organization: true, - }, - }, - }, - }, - { - desc: "should return tlsClientHeaders with infos and subject with province true", - labels: map[string]string{ - TraefikFrontendPassTLSClientCertInfosSubjectProvince: "true", - }, - expected: &types.TLSClientHeaders{ - Infos: &types.TLSClientCertificateInfos{ - Subject: &types.TLSCLientCertificateDNInfos{ - Province: true, - }, - }, - }, - }, - { - desc: "should return tlsClientHeaders with infos and subject with serialNumber true", - labels: map[string]string{ - TraefikFrontendPassTLSClientCertInfosSubjectSerialNumber: "true", - }, - expected: &types.TLSClientHeaders{ - Infos: &types.TLSClientCertificateInfos{ - Subject: &types.TLSCLientCertificateDNInfos{ - SerialNumber: true, - }, - }, - }, - }, - { - desc: "should return tlsClientHeaders with all infos", - labels: map[string]string{ - TraefikFrontendPassTLSClientCertPem: "true", - TraefikFrontendPassTLSClientCertInfosNotAfter: "true", - TraefikFrontendPassTLSClientCertInfosNotBefore: "true", - TraefikFrontendPassTLSClientCertInfosSans: "true", - TraefikFrontendPassTLSClientCertInfosIssuerCommonName: "true", - TraefikFrontendPassTLSClientCertInfosIssuerCountry: "true", - TraefikFrontendPassTLSClientCertInfosIssuerDomainComponent: "true", - TraefikFrontendPassTLSClientCertInfosIssuerLocality: "true", - TraefikFrontendPassTLSClientCertInfosIssuerOrganization: "true", - TraefikFrontendPassTLSClientCertInfosIssuerProvince: "true", - TraefikFrontendPassTLSClientCertInfosIssuerSerialNumber: "true", - TraefikFrontendPassTLSClientCertInfosSubjectCommonName: "true", - TraefikFrontendPassTLSClientCertInfosSubjectCountry: "true", - TraefikFrontendPassTLSClientCertInfosSubjectDomainComponent: "true", - TraefikFrontendPassTLSClientCertInfosSubjectLocality: "true", - TraefikFrontendPassTLSClientCertInfosSubjectOrganization: "true", - TraefikFrontendPassTLSClientCertInfosSubjectProvince: "true", - TraefikFrontendPassTLSClientCertInfosSubjectSerialNumber: "true", - }, - expected: &types.TLSClientHeaders{ - PEM: true, - Infos: &types.TLSClientCertificateInfos{ - Sans: true, - NotBefore: true, - NotAfter: true, - Subject: &types.TLSCLientCertificateDNInfos{ - CommonName: true, - Country: true, - DomainComponent: true, - Locality: true, - Organization: true, - Province: true, - SerialNumber: true, - }, - Issuer: &types.TLSCLientCertificateDNInfos{ - CommonName: true, - Country: true, - DomainComponent: true, - Locality: true, - Organization: true, - Province: true, - SerialNumber: true, - }, - }, - }, - }, - } - - for _, test := range testCases { - test := test - t.Run(test.desc, func(t *testing.T) { - t.Parallel() - - result := GetTLSClientCert(test.labels) - - assert.Equal(t, test.expected, result) - }) - } -} diff --git a/old/provider/label/segment.go b/old/provider/label/segment.go deleted file mode 100644 index 06a4cd85b..000000000 --- a/old/provider/label/segment.go +++ /dev/null @@ -1,115 +0,0 @@ -package label - -import ( - "regexp" - "strings" - - "github.com/containous/traefik/old/log" -) - -var ( - // SegmentPropertiesRegexp used to extract the name of the segment and the name of the property for this segment - // All properties are under the format traefik..frontend.*= except the port/portIndex/weight/protocol/backend directly after traefik.. - SegmentPropertiesRegexp = regexp.MustCompile(`^traefik\.(?P.+?)\.(?Pport|portIndex|portName|weight|protocol|backend|frontend\.(.+))$`) - - // PortRegexp used to extract the port label of the segment - PortRegexp = regexp.MustCompile(`^traefik\.(?P.+?)\.port$`) -) - -// SegmentPropertyValues is a map of segment properties -// an example value is: weight=42 -type SegmentPropertyValues map[string]string - -// SegmentProperties is a map of segment properties per segment, -// which we can get with label[segmentName][propertyName]. -// It yields a property value. -type SegmentProperties map[string]SegmentPropertyValues - -// FindSegmentSubmatch split segment labels -func FindSegmentSubmatch(name string) []string { - matches := SegmentPropertiesRegexp.FindStringSubmatch(name) - if matches == nil || - strings.HasPrefix(name, TraefikFrontend+".") || - strings.HasPrefix(name, TraefikBackend+".") { - return nil - } - return matches -} - -// ExtractTraefikLabels transform labels to segment labels -func ExtractTraefikLabels(originLabels map[string]string) SegmentProperties { - allLabels := make(SegmentProperties) - - if _, ok := allLabels[""]; !ok { - allLabels[""] = make(SegmentPropertyValues) - } - - for name, value := range originLabels { - if !strings.HasPrefix(name, Prefix) { - continue - } - - matches := FindSegmentSubmatch(name) - if matches == nil { - // Classic labels - allLabels[""][name] = value - } else { - // segments labels - var segmentName string - var propertyName string - for i, name := range SegmentPropertiesRegexp.SubexpNames() { - // the group 0 is anonymous because it's always the root expression - if i != 0 { - if name == "segment_name" { - segmentName = matches[i] - } else if name == "property_name" { - propertyName = matches[i] - } - } - } - - if _, ok := allLabels[segmentName]; !ok { - allLabels[segmentName] = make(SegmentPropertyValues) - } - allLabels[segmentName][Prefix+propertyName] = value - } - } - - log.Debug("originLabels", originLabels) - log.Debug("allLabels", allLabels) - - allLabels.mergeDefault() - - return allLabels -} - -func (s SegmentProperties) mergeDefault() { - // if SegmentProperties contains the default segment, merge each segments with the default segment - if defaultLabels, okDefault := s[""]; okDefault { - - segmentsNames := s.GetSegmentNames() - if len(defaultLabels) > 0 { - for _, name := range segmentsNames { - segmentLabels := s[name] - for key, value := range defaultLabels { - if _, ok := segmentLabels[key]; !ok { - segmentLabels[key] = value - } - } - } - } - - if len(segmentsNames) > 1 { - delete(s, "") - } - } -} - -// GetSegmentNames get all segment names -func (s SegmentProperties) GetSegmentNames() []string { - var names []string - for name := range s { - names = append(names, name) - } - return names -} diff --git a/old/provider/label/segment_test.go b/old/provider/label/segment_test.go deleted file mode 100644 index 96d123ecb..000000000 --- a/old/provider/label/segment_test.go +++ /dev/null @@ -1,95 +0,0 @@ -package label - -import ( - "testing" - - "github.com/stretchr/testify/assert" -) - -func TestExtractTraefikLabels(t *testing.T) { - testCases := []struct { - desc string - prefix string - originLabels map[string]string - expected SegmentProperties - }{ - { - desc: "nil labels map", - prefix: "traefik", - originLabels: nil, - expected: SegmentProperties{"": {}}, - }, - { - desc: "container labels", - prefix: "traefik", - originLabels: map[string]string{ - "frontend.priority": "foo", // missing prefix: skip - "traefik.port": "bar", - }, - expected: SegmentProperties{ - "": { - "traefik.port": "bar", - }, - }, - }, - { - desc: "segment labels: only segment no default", - prefix: "traefik", - originLabels: map[string]string{ - "traefik.goo.frontend.priority": "A", - "traefik.goo.port": "D", - "traefik.port": "C", - }, - expected: SegmentProperties{ - "goo": { - "traefik.frontend.priority": "A", - "traefik.port": "D", - }, - }, - }, - { - desc: "segment labels: use default", - prefix: "traefik", - originLabels: map[string]string{ - "traefik.guu.frontend.priority": "B", - "traefik.port": "C", - }, - expected: SegmentProperties{ - "guu": { - "traefik.frontend.priority": "B", - "traefik.port": "C", - }, - }, - }, - { - desc: "segment labels: several segments", - prefix: "traefik", - originLabels: map[string]string{ - "traefik.goo.frontend.priority": "A", - "traefik.goo.port": "D", - "traefik.guu.frontend.priority": "B", - "traefik.port": "C", - }, - expected: SegmentProperties{ - "goo": { - "traefik.frontend.priority": "A", - "traefik.port": "D", - }, - "guu": { - "traefik.frontend.priority": "B", - "traefik.port": "C", - }, - }, - }, - } - - for _, test := range testCases { - test := test - t.Run(test.desc, func(t *testing.T) { - t.Parallel() - - actual := ExtractTraefikLabels(test.originLabels) - assert.Equal(t, test.expected, actual) - }) - } -} diff --git a/old/provider/mesos/config.go b/old/provider/mesos/config.go deleted file mode 100644 index 71b2325d6..000000000 --- a/old/provider/mesos/config.go +++ /dev/null @@ -1,310 +0,0 @@ -package mesos - -import ( - "fmt" - "math" - "net" - "strconv" - "strings" - "text/template" - - "github.com/containous/traefik/old/log" - "github.com/containous/traefik/old/provider" - "github.com/containous/traefik/old/provider/label" - "github.com/containous/traefik/old/types" - "github.com/mesosphere/mesos-dns/records/state" -) - -type taskData struct { - state.Task - TraefikLabels map[string]string - SegmentName string -} - -func (p *Provider) buildConfiguration(tasks []state.Task) *types.Configuration { - var mesosFuncMap = template.FuncMap{ - "getDomain": label.GetFuncString(label.TraefikDomain, p.Domain), - "getSubDomain": p.getSubDomain, - "getSegmentSubDomain": p.getSegmentSubDomain, - "getID": getID, - - // Backend functions - "getBackendName": getBackendName, - "getCircuitBreaker": label.GetCircuitBreaker, - "getLoadBalancer": label.GetLoadBalancer, - "getMaxConn": label.GetMaxConn, - "getHealthCheck": label.GetHealthCheck, - "getBuffering": label.GetBuffering, - "getResponseForwarding": label.GetResponseForwarding, - "getServers": p.getServers, - "getHost": p.getHost, - "getServerPort": p.getServerPort, - - // Frontend functions - "getSegmentNameSuffix": getSegmentNameSuffix, - "getFrontEndName": getFrontendName, - "getEntryPoints": label.GetFuncSliceString(label.TraefikFrontendEntryPoints), - "getBasicAuth": label.GetFuncSliceString(label.TraefikFrontendAuthBasic), // Deprecated - "getAuth": label.GetAuth, - "getPriority": label.GetFuncInt(label.TraefikFrontendPriority, label.DefaultFrontendPriority), - "getPassHostHeader": label.GetFuncBool(label.TraefikFrontendPassHostHeader, label.DefaultPassHostHeader), - "getPassTLSCert": label.GetFuncBool(label.TraefikFrontendPassTLSCert, label.DefaultPassTLSCert), - "getPassTLSClientCert": label.GetTLSClientCert, - "getFrontendRule": p.getFrontendRule, - "getRedirect": label.GetRedirect, - "getErrorPages": label.GetErrorPages, - "getRateLimit": label.GetRateLimit, - "getHeaders": label.GetHeaders, - "getWhiteList": label.GetWhiteList, - } - - appsTasks := p.filterTasks(tasks) - - templateObjects := struct { - ApplicationsTasks map[string][]taskData - Domain string - }{ - ApplicationsTasks: appsTasks, - Domain: p.Domain, - } - - configuration, err := p.GetConfiguration("templates/mesos.tmpl", mesosFuncMap, templateObjects) - if err != nil { - log.Error(err) - } - - return configuration -} - -func (p *Provider) filterTasks(tasks []state.Task) map[string][]taskData { - appsTasks := make(map[string][]taskData) - - for _, task := range tasks { - taskLabels := label.ExtractTraefikLabels(extractLabels(task)) - for segmentName, traefikLabels := range taskLabels { - data := taskData{ - Task: task, - TraefikLabels: traefikLabels, - SegmentName: segmentName, - } - - if taskFilter(data, p.ExposedByDefault) { - name := getName(data) - if _, ok := appsTasks[name]; !ok { - appsTasks[name] = []taskData{data} - } else { - appsTasks[name] = append(appsTasks[name], data) - } - } - } - } - - return appsTasks -} - -func taskFilter(task taskData, exposedByDefaultFlag bool) bool { - name := getName(task) - - if len(task.DiscoveryInfo.Ports.DiscoveryPorts) == 0 { - log.Debugf("Filtering Mesos task without port %s", name) - return false - } - if !isEnabled(task, exposedByDefaultFlag) { - log.Debugf("Filtering disabled Mesos task %s", name) - return false - } - - // filter indeterminable task port - portIndexLabel := label.GetStringValue(task.TraefikLabels, label.TraefikPortIndex, "") - portNameLabel := label.GetStringValue(task.TraefikLabels, label.TraefikPortName, "") - portValueLabel := label.GetStringValue(task.TraefikLabels, label.TraefikPort, "") - if portIndexLabel != "" && portValueLabel != "" { - log.Debugf("Filtering Mesos task %s specifying both %q' and %q labels", task.Name, label.TraefikPortIndex, label.TraefikPort) - return false - } - if portIndexLabel != "" { - index, err := strconv.Atoi(portIndexLabel) - if err != nil || index < 0 || index > len(task.DiscoveryInfo.Ports.DiscoveryPorts)-1 { - log.Debugf("Filtering Mesos task %s with unexpected value for %q label", task.Name, label.TraefikPortIndex) - return false - } - } - if portValueLabel != "" { - port, err := strconv.Atoi(portValueLabel) - if err != nil { - log.Debugf("Filtering Mesos task %s with unexpected value for %q label", task.Name, label.TraefikPort) - return false - } - - var foundPort bool - for _, exposedPort := range task.DiscoveryInfo.Ports.DiscoveryPorts { - if port == exposedPort.Number { - foundPort = true - break - } - } - - if !foundPort { - log.Debugf("Filtering Mesos task %s without a matching port for %q label", task.Name, label.TraefikPort) - return false - } - } - if portNameLabel != "" { - var foundPort bool - for _, exposedPort := range task.DiscoveryInfo.Ports.DiscoveryPorts { - if portNameLabel == exposedPort.Name { - foundPort = true - break - } - } - - if !foundPort { - log.Debugf("Filtering Mesos task %s without a matching port for %q label", task.Name, label.TraefikPortName) - return false - } - } - - // filter healthChecks - if task.Statuses != nil && len(task.Statuses) > 0 && task.Statuses[0].Healthy != nil && !*task.Statuses[0].Healthy { - log.Debugf("Filtering Mesos task %s with bad healthCheck", name) - return false - - } - return true -} - -func getID(task taskData) string { - return provider.Normalize(task.ID + getSegmentNameSuffix(task.SegmentName)) -} - -func getName(task taskData) string { - return provider.Normalize(task.DiscoveryInfo.Name + getSegmentNameSuffix(task.SegmentName)) -} - -func getBackendName(task taskData) string { - return label.GetStringValue(task.TraefikLabels, label.TraefikBackend, getName(task)) -} - -func getFrontendName(task taskData) string { - // TODO task.ID -> task.Name + task.ID - return provider.Normalize(task.ID + getSegmentNameSuffix(task.SegmentName)) -} - -func getSegmentNameSuffix(serviceName string) string { - if len(serviceName) > 0 { - return "-service-" + provider.Normalize(serviceName) - } - return "" -} - -func (p *Provider) getSubDomain(name string) string { - if p.GroupsAsSubDomains { - splitedName := strings.Split(strings.TrimPrefix(name, "/"), "/") - provider.ReverseStringSlice(&splitedName) - reverseName := strings.Join(splitedName, ".") - return reverseName - } - return strings.Replace(strings.Replace(strings.TrimPrefix(name, "/"), "/", "-", -1), "_", "-", -1) -} - -func (p *Provider) getSegmentSubDomain(task taskData) string { - subDomain := strings.ToLower(p.getSubDomain(task.DiscoveryInfo.Name)) - if len(task.SegmentName) > 0 { - subDomain = strings.ToLower(provider.Normalize(task.SegmentName)) + "." + subDomain - } - return subDomain -} - -// getFrontendRule returns the frontend rule for the specified application, using it's label. -// It returns a default one (Host) if the label is not present. -func (p *Provider) getFrontendRule(task taskData) string { - if v := label.GetStringValue(task.TraefikLabels, label.TraefikFrontendRule, ""); len(v) > 0 { - return v - } - - domain := label.GetStringValue(task.TraefikLabels, label.TraefikDomain, p.Domain) - if len(domain) > 0 { - domain = "." + domain - } - - return "Host:" + p.getSegmentSubDomain(task) + domain -} - -func (p *Provider) getServers(tasks []taskData) map[string]types.Server { - var servers map[string]types.Server - - for _, task := range tasks { - if servers == nil { - servers = make(map[string]types.Server) - } - - protocol := label.GetStringValue(task.TraefikLabels, label.TraefikProtocol, label.DefaultProtocol) - host := p.getHost(task) - port := p.getServerPort(task) - - serverName := "server-" + getID(task) - servers[serverName] = types.Server{ - URL: fmt.Sprintf("%s://%s", protocol, net.JoinHostPort(host, port)), - Weight: getIntValue(task.TraefikLabels, label.TraefikWeight, label.DefaultWeight, math.MaxInt32), - } - } - - return servers -} - -func (p *Provider) getHost(task taskData) string { - return task.IP(strings.Split(p.IPSources, ",")...) -} - -func (p *Provider) getServerPort(task taskData) string { - if label.Has(task.TraefikLabels, label.TraefikPort) { - pv := label.GetIntValue(task.TraefikLabels, label.TraefikPort, 0) - if pv <= 0 { - log.Errorf("explicitly specified port %d must be larger than zero", pv) - return "" - } - return strconv.Itoa(pv) - } - - plv := getIntValue(task.TraefikLabels, label.TraefikPortIndex, math.MinInt32, len(task.DiscoveryInfo.Ports.DiscoveryPorts)-1) - if plv >= 0 { - return strconv.Itoa(task.DiscoveryInfo.Ports.DiscoveryPorts[plv].Number) - } - - // Find named port using traefik.portName or the segment name - if pn := label.GetStringValue(task.TraefikLabels, label.TraefikPortName, task.SegmentName); len(pn) > 0 { - for _, port := range task.DiscoveryInfo.Ports.DiscoveryPorts { - if pn == port.Name { - return strconv.Itoa(port.Number) - } - } - } - - for _, port := range task.DiscoveryInfo.Ports.DiscoveryPorts { - return strconv.Itoa(port.Number) - } - return "" -} - -func isEnabled(task taskData, exposedByDefault bool) bool { - return label.GetBoolValue(task.TraefikLabels, label.TraefikEnable, exposedByDefault) -} - -// Label functions - -func getIntValue(labels map[string]string, labelName string, defaultValue int, maxValue int) int { - value := label.GetIntValue(labels, labelName, defaultValue) - if value <= maxValue { - return value - } - log.Warnf("The value %d for %s exceed the max authorized value %d, falling back to %d.", value, labelName, maxValue, defaultValue) - return defaultValue -} - -func extractLabels(task state.Task) map[string]string { - labels := make(map[string]string) - for _, lbl := range task.Labels { - labels[lbl.Key] = lbl.Value - } - return labels -} diff --git a/old/provider/mesos/config_segment_test.go b/old/provider/mesos/config_segment_test.go deleted file mode 100644 index 3262b91ee..000000000 --- a/old/provider/mesos/config_segment_test.go +++ /dev/null @@ -1,412 +0,0 @@ -// +build ignore - -package mesos - -import ( - "testing" - "time" - - "github.com/containous/flaeg/parse" - "github.com/containous/traefik/old/provider/label" - "github.com/containous/traefik/old/types" - "github.com/mesosphere/mesos-dns/records/state" - "github.com/stretchr/testify/assert" - "github.com/stretchr/testify/require" -) - -func TestBuildConfigurationSegments(t *testing.T) { - p := &Provider{ - Domain: "mesos.localhost", - ExposedByDefault: true, - IPSources: "host", - } - - testCases := []struct { - desc string - tasks []state.Task - expectedFrontends map[string]*types.Frontend - expectedBackends map[string]*types.Backend - }{ - { - desc: "multiple ports with segments", - tasks: []state.Task{ - aTask("app-taskID", - withIP("127.0.0.1"), - withInfo("/app", - withPorts( - withPort("TCP", 80, "web"), - withPort("TCP", 81, "admin"), - ), - ), - withStatus(withHealthy(true), withState("TASK_RUNNING")), - withLabel(label.TraefikBackendMaxConnAmount, "1000"), - withLabel(label.TraefikBackendMaxConnExtractorFunc, "client.ip"), - withSegmentLabel(label.TraefikPort, "80", "web"), - withSegmentLabel(label.TraefikPort, "81", "admin"), - withLabel("traefik..port", "82"), // This should be ignored, as it fails to match the segmentPropertiesRegexp regex. - withSegmentLabel(label.TraefikFrontendRule, "Host:web.app.mesos.localhost", "web"), - withSegmentLabel(label.TraefikFrontendRule, "Host:admin.app.mesos.localhost", "admin"), - ), - }, - expectedFrontends: map[string]*types.Frontend{ - "frontend-app-taskID-service-web": { - Backend: "backend-app-service-web", - Routes: map[string]types.Route{ - `route-host-app-taskID-service-web`: { - Rule: "Host:web.app.mesos.localhost", - }, - }, - PassHostHeader: true, - EntryPoints: []string{}, - }, - "frontend-app-taskID-service-admin": { - Backend: "backend-app-service-admin", - Routes: map[string]types.Route{ - `route-host-app-taskID-service-admin`: { - Rule: "Host:admin.app.mesos.localhost", - }, - }, - PassHostHeader: true, - EntryPoints: []string{}, - }, - }, - expectedBackends: map[string]*types.Backend{ - "backend-app-service-web": { - Servers: map[string]types.Server{ - "server-app-taskID-service-web": { - URL: "http://127.0.0.1:80", - Weight: label.DefaultWeight, - }, - }, - MaxConn: &types.MaxConn{ - Amount: 1000, - ExtractorFunc: "client.ip", - }, - }, - "backend-app-service-admin": { - Servers: map[string]types.Server{ - "server-app-taskID-service-admin": { - URL: "http://127.0.0.1:81", - Weight: label.DefaultWeight, - }, - }, - MaxConn: &types.MaxConn{ - Amount: 1000, - ExtractorFunc: "client.ip", - }, - }, - }, - }, - { - desc: "when all labels are set", - tasks: []state.Task{ - aTask("app-taskID", - withIP("127.0.0.1"), - withInfo("/app", - withPorts( - withPort("TCP", 80, "web"), - withPort("TCP", 81, "admin"), - ), - ), - withStatus(withHealthy(true), withState("TASK_RUNNING")), - - withLabel(label.TraefikBackendCircuitBreakerExpression, "NetworkErrorRatio() > 0.5"), - withLabel(label.TraefikBackendHealthCheckScheme, "http"), - withLabel(label.TraefikBackendHealthCheckPath, "/health"), - withLabel(label.TraefikBackendHealthCheckPort, "880"), - withLabel(label.TraefikBackendHealthCheckInterval, "6"), - withLabel(label.TraefikBackendHealthCheckTimeout, "3"), - withLabel(label.TraefikBackendHealthCheckHostname, "foo.com"), - withLabel(label.TraefikBackendHealthCheckHeaders, "Foo:bar || Bar:foo"), - withLabel(label.TraefikBackendLoadBalancerMethod, "drr"), - withLabel(label.TraefikBackendLoadBalancerStickiness, "true"), - withLabel(label.TraefikBackendLoadBalancerStickinessCookieName, "chocolate"), - withLabel(label.TraefikBackendMaxConnAmount, "666"), - withLabel(label.TraefikBackendMaxConnExtractorFunc, "client.ip"), - withLabel(label.TraefikBackendBufferingMaxResponseBodyBytes, "10485760"), - withLabel(label.TraefikBackendBufferingMemResponseBodyBytes, "2097152"), - withLabel(label.TraefikBackendBufferingMaxRequestBodyBytes, "10485760"), - withLabel(label.TraefikBackendBufferingMemRequestBodyBytes, "2097152"), - withLabel(label.TraefikBackendBufferingRetryExpression, "IsNetworkError() && Attempts() <= 2"), - - withSegmentLabel(label.TraefikPort, "80", "containous"), - withSegmentLabel(label.TraefikPortName, "web", "containous"), - withSegmentLabel(label.TraefikProtocol, "https", "containous"), - withSegmentLabel(label.TraefikWeight, "12", "containous"), - - withSegmentLabel(label.TraefikFrontendPassTLSClientCertPem, "true", "containous"), - withSegmentLabel(label.TraefikFrontendPassTLSClientCertInfosNotBefore, "true", "containous"), - withSegmentLabel(label.TraefikFrontendPassTLSClientCertInfosNotAfter, "true", "containous"), - withSegmentLabel(label.TraefikFrontendPassTLSClientCertInfosSans, "true", "containous"), - withSegmentLabel(label.TraefikFrontendPassTLSClientCertInfosIssuerCommonName, "true", "containous"), - withSegmentLabel(label.TraefikFrontendPassTLSClientCertInfosIssuerCountry, "true", "containous"), - withSegmentLabel(label.TraefikFrontendPassTLSClientCertInfosIssuerDomainComponent, "true", "containous"), - withSegmentLabel(label.TraefikFrontendPassTLSClientCertInfosIssuerLocality, "true", "containous"), - withSegmentLabel(label.TraefikFrontendPassTLSClientCertInfosIssuerOrganization, "true", "containous"), - withSegmentLabel(label.TraefikFrontendPassTLSClientCertInfosIssuerProvince, "true", "containous"), - withSegmentLabel(label.TraefikFrontendPassTLSClientCertInfosIssuerSerialNumber, "true", "containous"), - withSegmentLabel(label.TraefikFrontendPassTLSClientCertInfosSubjectCommonName, "true", "containous"), - withSegmentLabel(label.TraefikFrontendPassTLSClientCertInfosSubjectCountry, "true", "containous"), - withSegmentLabel(label.TraefikFrontendPassTLSClientCertInfosSubjectDomainComponent, "true", "containous"), - withSegmentLabel(label.TraefikFrontendPassTLSClientCertInfosSubjectLocality, "true", "containous"), - withSegmentLabel(label.TraefikFrontendPassTLSClientCertInfosSubjectOrganization, "true", "containous"), - withSegmentLabel(label.TraefikFrontendPassTLSClientCertInfosSubjectProvince, "true", "containous"), - withSegmentLabel(label.TraefikFrontendPassTLSClientCertInfosSubjectSerialNumber, "true", "containous"), - - withSegmentLabel(label.TraefikFrontendAuthBasic, "test:$apr1$H6uskkkW$IgXLP6ewTrSuBkTrqE8wj/,test2:$apr1$d9hr9HBB$4HxwgUir3HP4EsggP/QNo0", "containous"), - withSegmentLabel(label.TraefikFrontendAuthBasicRemoveHeader, "true", "containous"), - withSegmentLabel(label.TraefikFrontendAuthBasicUsers, "test:$apr1$H6uskkkW$IgXLP6ewTrSuBkTrqE8wj/,test2:$apr1$d9hr9HBB$4HxwgUir3HP4EsggP/QNo0", "containous"), - withSegmentLabel(label.TraefikFrontendAuthBasicUsersFile, ".htpasswd", "containous"), - withSegmentLabel(label.TraefikFrontendAuthDigestRemoveHeader, "true", "containous"), - withSegmentLabel(label.TraefikFrontendAuthDigestUsers, "test:$apr1$H6uskkkW$IgXLP6ewTrSuBkTrqE8wj/,test2:$apr1$d9hr9HBB$4HxwgUir3HP4EsggP/QNo0", "containous"), - withSegmentLabel(label.TraefikFrontendAuthDigestUsersFile, ".htpasswd", "containous"), - withSegmentLabel(label.TraefikFrontendAuthForwardAddress, "auth.server", "containous"), - withSegmentLabel(label.TraefikFrontendAuthForwardTrustForwardHeader, "true", "containous"), - withSegmentLabel(label.TraefikFrontendAuthForwardTLSCa, "ca.crt", "containous"), - withSegmentLabel(label.TraefikFrontendAuthForwardTLSCaOptional, "true", "containous"), - withSegmentLabel(label.TraefikFrontendAuthForwardTLSCert, "server.crt", "containous"), - withSegmentLabel(label.TraefikFrontendAuthForwardTLSKey, "server.key", "containous"), - withSegmentLabel(label.TraefikFrontendAuthForwardTLSInsecureSkipVerify, "true", "containous"), - withSegmentLabel(label.TraefikFrontendAuthHeaderField, "X-WebAuth-User", "containous"), - - withSegmentLabel(label.TraefikFrontendEntryPoints, "http,https", "containous"), - withSegmentLabel(label.TraefikFrontendPassHostHeader, "true", "containous"), - withSegmentLabel(label.TraefikFrontendPassTLSCert, "true", "containous"), - withSegmentLabel(label.TraefikFrontendPriority, "666", "containous"), - withSegmentLabel(label.TraefikFrontendRedirectEntryPoint, "https", "containous"), - withSegmentLabel(label.TraefikFrontendRedirectRegex, "nope", "containous"), - withSegmentLabel(label.TraefikFrontendRedirectReplacement, "nope", "containous"), - withSegmentLabel(label.TraefikFrontendRedirectPermanent, "true", "containous"), - withSegmentLabel(label.TraefikFrontendRule, "Host:traefik.io", "containous"), - withSegmentLabel(label.TraefikFrontendWhiteListSourceRange, "10.10.10.10", "containous"), - - withSegmentLabel(label.TraefikFrontendRequestHeaders, "Access-Control-Allow-Methods:POST,GET,OPTIONS || Content-type: application/json; charset=utf-8", "containous"), - withSegmentLabel(label.TraefikFrontendResponseHeaders, "Access-Control-Allow-Methods:POST,GET,OPTIONS || Content-type: application/json; charset=utf-8", "containous"), - withSegmentLabel(label.TraefikFrontendSSLProxyHeaders, "Access-Control-Allow-Methods:POST,GET,OPTIONS || Content-type: application/json; charset=utf-8", "containous"), - withSegmentLabel(label.TraefikFrontendAllowedHosts, "foo,bar,bor", "containous"), - withSegmentLabel(label.TraefikFrontendHostsProxyHeaders, "foo,bar,bor", "containous"), - withSegmentLabel(label.TraefikFrontendSSLForceHost, "true", "containous"), - withSegmentLabel(label.TraefikFrontendSSLHost, "foo", "containous"), - withSegmentLabel(label.TraefikFrontendCustomFrameOptionsValue, "foo", "containous"), - withSegmentLabel(label.TraefikFrontendContentSecurityPolicy, "foo", "containous"), - withSegmentLabel(label.TraefikFrontendPublicKey, "foo", "containous"), - withSegmentLabel(label.TraefikFrontendReferrerPolicy, "foo", "containous"), - withSegmentLabel(label.TraefikFrontendCustomBrowserXSSValue, "foo", "containous"), - withSegmentLabel(label.TraefikFrontendSTSSeconds, "666", "containous"), - withSegmentLabel(label.TraefikFrontendSSLRedirect, "true", "containous"), - withSegmentLabel(label.TraefikFrontendSSLTemporaryRedirect, "true", "containous"), - withSegmentLabel(label.TraefikFrontendSTSIncludeSubdomains, "true", "containous"), - withSegmentLabel(label.TraefikFrontendSTSPreload, "true", "containous"), - withSegmentLabel(label.TraefikFrontendForceSTSHeader, "true", "containous"), - withSegmentLabel(label.TraefikFrontendFrameDeny, "true", "containous"), - withSegmentLabel(label.TraefikFrontendContentTypeNosniff, "true", "containous"), - withSegmentLabel(label.TraefikFrontendBrowserXSSFilter, "true", "containous"), - withSegmentLabel(label.TraefikFrontendIsDevelopment, "true", "containous"), - - withLabel(label.Prefix+"containous."+label.BaseFrontendErrorPage+"foo."+label.SuffixErrorPageStatus, "404"), - withLabel(label.Prefix+"containous."+label.BaseFrontendErrorPage+"foo."+label.SuffixErrorPageBackend, "foobar"), - withLabel(label.Prefix+"containous."+label.BaseFrontendErrorPage+"foo."+label.SuffixErrorPageQuery, "foo_query"), - withLabel(label.Prefix+"containous."+label.BaseFrontendErrorPage+"bar."+label.SuffixErrorPageStatus, "500,600"), - withLabel(label.Prefix+"containous."+label.BaseFrontendErrorPage+"bar."+label.SuffixErrorPageBackend, "foobar"), - withLabel(label.Prefix+"containous."+label.BaseFrontendErrorPage+"bar."+label.SuffixErrorPageQuery, "bar_query"), - - withSegmentLabel(label.TraefikFrontendRateLimitExtractorFunc, "client.ip", "containous"), - withLabel(label.Prefix+"containous."+label.BaseFrontendRateLimit+"foo."+label.SuffixRateLimitPeriod, "6"), - withLabel(label.Prefix+"containous."+label.BaseFrontendRateLimit+"foo."+label.SuffixRateLimitAverage, "12"), - withLabel(label.Prefix+"containous."+label.BaseFrontendRateLimit+"foo."+label.SuffixRateLimitBurst, "18"), - withLabel(label.Prefix+"containous."+label.BaseFrontendRateLimit+"bar."+label.SuffixRateLimitPeriod, "3"), - withLabel(label.Prefix+"containous."+label.BaseFrontendRateLimit+"bar."+label.SuffixRateLimitAverage, "6"), - withLabel(label.Prefix+"containous."+label.BaseFrontendRateLimit+"bar."+label.SuffixRateLimitBurst, "9"), - ), - }, - expectedFrontends: map[string]*types.Frontend{ - "frontend-app-taskID-service-containous": { - EntryPoints: []string{ - "http", - "https", - }, - Backend: "backend-app-service-containous", - Routes: map[string]types.Route{ - "route-host-app-taskID-service-containous": { - Rule: "Host:traefik.io", - }, - }, - PassHostHeader: true, - PassTLSCert: true, - Priority: 666, - PassTLSClientCert: &types.TLSClientHeaders{ - PEM: true, - Infos: &types.TLSClientCertificateInfos{ - NotBefore: true, - Sans: true, - NotAfter: true, - Subject: &types.TLSCLientCertificateDNInfos{ - CommonName: true, - Country: true, - DomainComponent: true, - Locality: true, - Organization: true, - Province: true, - SerialNumber: true, - }, - Issuer: &types.TLSCLientCertificateDNInfos{ - CommonName: true, - Country: true, - DomainComponent: true, - Locality: true, - Organization: true, - Province: true, - SerialNumber: true, - }, - }, - }, - Auth: &types.Auth{ - HeaderField: "X-WebAuth-User", - Basic: &types.Basic{ - RemoveHeader: true, - Users: []string{"test:$apr1$H6uskkkW$IgXLP6ewTrSuBkTrqE8wj/", - "test2:$apr1$d9hr9HBB$4HxwgUir3HP4EsggP/QNo0"}, - UsersFile: ".htpasswd", - }, - }, - - WhiteList: &types.WhiteList{ - SourceRange: []string{"10.10.10.10"}, - }, - Headers: &types.Headers{ - CustomRequestHeaders: map[string]string{ - "Access-Control-Allow-Methods": "POST,GET,OPTIONS", - "Content-Type": "application/json; charset=utf-8", - }, - CustomResponseHeaders: map[string]string{ - "Access-Control-Allow-Methods": "POST,GET,OPTIONS", - "Content-Type": "application/json; charset=utf-8", - }, - AllowedHosts: []string{ - "foo", - "bar", - "bor", - }, - HostsProxyHeaders: []string{ - "foo", - "bar", - "bor", - }, - SSLRedirect: true, - SSLTemporaryRedirect: true, - SSLForceHost: true, - SSLHost: "foo", - SSLProxyHeaders: map[string]string{ - "Access-Control-Allow-Methods": "POST,GET,OPTIONS", - "Content-Type": "application/json; charset=utf-8", - }, - STSSeconds: 666, - STSIncludeSubdomains: true, - STSPreload: true, - ForceSTSHeader: true, - FrameDeny: true, - CustomFrameOptionsValue: "foo", - ContentTypeNosniff: true, - BrowserXSSFilter: true, - CustomBrowserXSSValue: "foo", - ContentSecurityPolicy: "foo", - PublicKey: "foo", - ReferrerPolicy: "foo", - IsDevelopment: true, - }, - Errors: map[string]*types.ErrorPage{ - "bar": { - Status: []string{ - "500", - "600", - }, - Backend: "backend-foobar", - Query: "bar_query", - }, - "foo": { - Status: []string{ - "404", - }, - Backend: "backend-foobar", - Query: "foo_query", - }, - }, - RateLimit: &types.RateLimit{ - RateSet: map[string]*types.Rate{ - "bar": { - Period: parse.Duration(3 * time.Second), - Average: 6, - Burst: 9, - }, - "foo": { - Period: parse.Duration(6 * time.Second), - Average: 12, - Burst: 18, - }, - }, - ExtractorFunc: "client.ip", - }, - Redirect: &types.Redirect{ - EntryPoint: "https", - Permanent: true, - }, - }, - }, - expectedBackends: map[string]*types.Backend{ - "backend-app-service-containous": { - Servers: map[string]types.Server{ - "server-app-taskID-service-containous": { - URL: "https://127.0.0.1:80", - Weight: 12, - }, - }, - CircuitBreaker: &types.CircuitBreaker{ - Expression: "NetworkErrorRatio() > 0.5", - }, - LoadBalancer: &types.LoadBalancer{ - Method: "drr", - Stickiness: &types.Stickiness{ - CookieName: "chocolate", - }, - }, - MaxConn: &types.MaxConn{ - Amount: 666, - ExtractorFunc: "client.ip", - }, - HealthCheck: &types.HealthCheck{ - Scheme: "http", - Path: "/health", - Port: 880, - Interval: "6", - Timeout: "3", - Hostname: "foo.com", - Headers: map[string]string{ - "Bar": "foo", - "Foo": "bar", - }, - }, - Buffering: &types.Buffering{ - MaxResponseBodyBytes: 10485760, - MemResponseBodyBytes: 2097152, - MaxRequestBodyBytes: 10485760, - MemRequestBodyBytes: 2097152, - RetryExpression: "IsNetworkError() && Attempts() <= 2", - }, - }, - }, - }, - } - - for _, test := range testCases { - test := test - t.Run(test.desc, func(t *testing.T) { - t.Parallel() - - actualConfig := p.buildConfiguration(test.tasks) - - require.NotNil(t, actualConfig) - assert.Equal(t, test.expectedBackends, actualConfig.Backends) - assert.Equal(t, test.expectedFrontends, actualConfig.Frontends) - }) - } -} diff --git a/old/provider/mesos/config_test.go b/old/provider/mesos/config_test.go deleted file mode 100644 index e31f1e259..000000000 --- a/old/provider/mesos/config_test.go +++ /dev/null @@ -1,1288 +0,0 @@ -// +build ignore - -package mesos - -import ( - "testing" - "time" - - "github.com/containous/flaeg/parse" - "github.com/containous/traefik/old/provider/label" - "github.com/containous/traefik/old/types" - "github.com/mesosphere/mesos-dns/records/state" - "github.com/stretchr/testify/assert" - "github.com/stretchr/testify/require" -) - -func TestBuildConfiguration(t *testing.T) { - p := &Provider{ - Domain: "mesos.localhost", - ExposedByDefault: true, - IPSources: "host", - } - - testCases := []struct { - desc string - tasks []state.Task - expectedFrontends map[string]*types.Frontend - expectedBackends map[string]*types.Backend - }{ - { - desc: "when no tasks", - tasks: []state.Task{}, - expectedFrontends: map[string]*types.Frontend{}, - expectedBackends: map[string]*types.Backend{}, - }, - { - desc: "2 applications with 2 tasks", - tasks: []state.Task{ - // App 1 - aTask("ID1", - withIP("10.10.10.10"), - withInfo("name1", - withPorts(withPort("TCP", 80, "WEB"))), - withStatus(withHealthy(true), withState("TASK_RUNNING")), - ), - aTask("ID2", - withIP("10.10.10.11"), - withInfo("name1", - withPorts(withPort("TCP", 81, "WEB"))), - withStatus(withHealthy(true), withState("TASK_RUNNING")), - ), - // App 2 - aTask("ID3", - withIP("20.10.10.10"), - withInfo("name2", - withPorts(withPort("TCP", 80, "WEB"))), - withStatus(withHealthy(true), withState("TASK_RUNNING")), - ), - aTask("ID4", - withIP("20.10.10.11"), - withInfo("name2", - withPorts(withPort("TCP", 81, "WEB"))), - withStatus(withHealthy(true), withState("TASK_RUNNING")), - ), - }, - expectedFrontends: map[string]*types.Frontend{ - "frontend-ID1": { - Backend: "backend-name1", - EntryPoints: []string{}, - PassHostHeader: true, - Routes: map[string]types.Route{ - "route-host-ID1": { - Rule: "Host:name1.mesos.localhost", - }, - }, - }, - "frontend-ID3": { - Backend: "backend-name2", - EntryPoints: []string{}, - PassHostHeader: true, - Routes: map[string]types.Route{ - "route-host-ID3": { - Rule: "Host:name2.mesos.localhost", - }, - }, - }, - }, - expectedBackends: map[string]*types.Backend{ - "backend-name1": { - Servers: map[string]types.Server{ - "server-ID1": { - URL: "http://10.10.10.10:80", - Weight: label.DefaultWeight, - }, - "server-ID2": { - URL: "http://10.10.10.11:81", - Weight: label.DefaultWeight, - }, - }, - }, - "backend-name2": { - Servers: map[string]types.Server{ - "server-ID3": { - URL: "http://20.10.10.10:80", - Weight: label.DefaultWeight, - }, - "server-ID4": { - URL: "http://20.10.10.11:81", - Weight: label.DefaultWeight, - }, - }, - }, - }, - }, - { - desc: "With basic auth", - tasks: []state.Task{ - // App 1 - aTask("ID1", - withIP("10.10.10.10"), - withInfo("name1", - withPorts(withPort("TCP", 80, "WEB"))), - withStatus(withHealthy(true), withState("TASK_RUNNING")), - withLabel(label.TraefikFrontendAuthBasicUsers, "test:$apr1$H6uskkkW$IgXLP6ewTrSuBkTrqE8wj/,test2:$apr1$d9hr9HBB$4HxwgUir3HP4EsggP/QNo0"), - withLabel(label.TraefikFrontendAuthBasicUsersFile, ".htpasswd"), - withLabel(label.TraefikFrontendAuthBasicRemoveHeader, "true"), - withLabel(label.TraefikFrontendAuthHeaderField, "X-WebAuth-User"), - ), - }, - expectedFrontends: map[string]*types.Frontend{ - "frontend-ID1": { - Backend: "backend-name1", - EntryPoints: []string{}, - PassHostHeader: true, - Routes: map[string]types.Route{ - "route-host-ID1": { - Rule: "Host:name1.mesos.localhost", - }, - }, - Auth: &types.Auth{ - HeaderField: "X-WebAuth-User", - Basic: &types.Basic{ - RemoveHeader: true, - Users: []string{"test:$apr1$H6uskkkW$IgXLP6ewTrSuBkTrqE8wj/", - "test2:$apr1$d9hr9HBB$4HxwgUir3HP4EsggP/QNo0"}, - UsersFile: ".htpasswd", - }, - }, - }, - }, - expectedBackends: map[string]*types.Backend{ - "backend-name1": { - Servers: map[string]types.Server{ - "server-ID1": { - URL: "http://10.10.10.10:80", - Weight: label.DefaultWeight, - }, - }, - }, - }, - }, - { - desc: "With basic auth (backward compatibility)", - tasks: []state.Task{ - // App 1 - aTask("ID1", - withIP("10.10.10.10"), - withInfo("name1", - withPorts(withPort("TCP", 80, "WEB"))), - withStatus(withHealthy(true), withState("TASK_RUNNING")), - withLabel(label.TraefikFrontendAuthBasic, "test:$apr1$H6uskkkW$IgXLP6ewTrSuBkTrqE8wj/,test2:$apr1$d9hr9HBB$4HxwgUir3HP4EsggP/QNo0"), - ), - }, - expectedFrontends: map[string]*types.Frontend{ - "frontend-ID1": { - Backend: "backend-name1", - EntryPoints: []string{}, - PassHostHeader: true, - Routes: map[string]types.Route{ - "route-host-ID1": { - Rule: "Host:name1.mesos.localhost", - }, - }, - Auth: &types.Auth{ - Basic: &types.Basic{ - Users: []string{"test:$apr1$H6uskkkW$IgXLP6ewTrSuBkTrqE8wj/", - "test2:$apr1$d9hr9HBB$4HxwgUir3HP4EsggP/QNo0"}, - }, - }, - }, - }, - expectedBackends: map[string]*types.Backend{ - "backend-name1": { - Servers: map[string]types.Server{ - "server-ID1": { - URL: "http://10.10.10.10:80", - Weight: label.DefaultWeight, - }, - }, - }, - }, - }, - { - desc: "With digest auth", - tasks: []state.Task{ - // App 1 - aTask("ID1", - withIP("10.10.10.10"), - withInfo("name1", - withPorts(withPort("TCP", 80, "WEB"))), - withStatus(withHealthy(true), withState("TASK_RUNNING")), - withLabel(label.TraefikFrontendAuthDigestRemoveHeader, "true"), - withLabel(label.TraefikFrontendAuthDigestUsers, "test:$apr1$H6uskkkW$IgXLP6ewTrSuBkTrqE8wj/,test2:$apr1$d9hr9HBB$4HxwgUir3HP4EsggP/QNo0"), - withLabel(label.TraefikFrontendAuthDigestUsersFile, ".htpasswd"), - withLabel(label.TraefikFrontendAuthHeaderField, "X-WebAuth-User"), - ), - }, - expectedFrontends: map[string]*types.Frontend{ - "frontend-ID1": { - Backend: "backend-name1", - EntryPoints: []string{}, - PassHostHeader: true, - Routes: map[string]types.Route{ - "route-host-ID1": { - Rule: "Host:name1.mesos.localhost", - }, - }, - Auth: &types.Auth{ - HeaderField: "X-WebAuth-User", - Digest: &types.Digest{ - RemoveHeader: true, - Users: []string{"test:$apr1$H6uskkkW$IgXLP6ewTrSuBkTrqE8wj/", - "test2:$apr1$d9hr9HBB$4HxwgUir3HP4EsggP/QNo0"}, - UsersFile: ".htpasswd", - }, - }, - }, - }, - expectedBackends: map[string]*types.Backend{ - "backend-name1": { - Servers: map[string]types.Server{ - "server-ID1": { - URL: "http://10.10.10.10:80", - Weight: label.DefaultWeight, - }, - }, - }, - }, - }, - { - desc: "With Forward auth", - tasks: []state.Task{ - // App 1 - aTask("ID1", - withIP("10.10.10.10"), - withInfo("name1", - withPorts(withPort("TCP", 80, "WEB"))), - withStatus(withHealthy(true), withState("TASK_RUNNING")), - withLabel(label.TraefikFrontendAuthForwardAddress, "auth.server"), - withLabel(label.TraefikFrontendAuthForwardTrustForwardHeader, "true"), - withLabel(label.TraefikFrontendAuthForwardTLSCa, "ca.crt"), - withLabel(label.TraefikFrontendAuthForwardTLSCaOptional, "true"), - withLabel(label.TraefikFrontendAuthForwardTLSCert, "server.crt"), - withLabel(label.TraefikFrontendAuthForwardTLSKey, "server.key"), - withLabel(label.TraefikFrontendAuthForwardTLSInsecureSkipVerify, "true"), - withLabel(label.TraefikFrontendAuthHeaderField, "X-WebAuth-User"), - withLabel(label.TraefikFrontendAuthForwardAuthResponseHeaders, "X-Auth-User,X-Auth-Token"), - ), - }, - expectedFrontends: map[string]*types.Frontend{ - "frontend-ID1": { - Backend: "backend-name1", - EntryPoints: []string{}, - PassHostHeader: true, - Routes: map[string]types.Route{ - "route-host-ID1": { - Rule: "Host:name1.mesos.localhost", - }, - }, - Auth: &types.Auth{ - HeaderField: "X-WebAuth-User", - Forward: &types.Forward{ - Address: "auth.server", - TLS: &types.ClientTLS{ - CA: "ca.crt", - CAOptional: true, - InsecureSkipVerify: true, - Cert: "server.crt", - Key: "server.key", - }, - TrustForwardHeader: true, - AuthResponseHeaders: []string{"X-Auth-User", "X-Auth-Token"}, - }, - }, - }, - }, - expectedBackends: map[string]*types.Backend{ - "backend-name1": { - Servers: map[string]types.Server{ - "server-ID1": { - URL: "http://10.10.10.10:80", - Weight: label.DefaultWeight, - }, - }, - }, - }, - }, - { - desc: "with all labels", - tasks: []state.Task{ - aTask("ID1", - withLabel(label.TraefikPort, "666"), - withLabel(label.TraefikProtocol, "https"), - withLabel(label.TraefikWeight, "12"), - - withLabel(label.TraefikBackend, "foobar"), - - withLabel(label.TraefikBackendCircuitBreakerExpression, "NetworkErrorRatio() > 0.5"), - withLabel(label.TraefikBackendResponseForwardingFlushInterval, "10ms"), - withLabel(label.TraefikBackendHealthCheckScheme, "http"), - withLabel(label.TraefikBackendHealthCheckPath, "/health"), - withLabel(label.TraefikBackendHealthCheckPort, "880"), - withLabel(label.TraefikBackendHealthCheckInterval, "6"), - withLabel(label.TraefikBackendHealthCheckTimeout, "3"), - withLabel(label.TraefikBackendHealthCheckHostname, "foo.com"), - withLabel(label.TraefikBackendHealthCheckHeaders, "Foo:bar || Bar:foo"), - - withLabel(label.TraefikBackendLoadBalancerMethod, "drr"), - withLabel(label.TraefikBackendLoadBalancerStickiness, "true"), - withLabel(label.TraefikBackendLoadBalancerStickinessCookieName, "chocolate"), - withLabel(label.TraefikBackendMaxConnAmount, "666"), - withLabel(label.TraefikBackendMaxConnExtractorFunc, "client.ip"), - withLabel(label.TraefikBackendBufferingMaxResponseBodyBytes, "10485760"), - withLabel(label.TraefikBackendBufferingMemResponseBodyBytes, "2097152"), - withLabel(label.TraefikBackendBufferingMaxRequestBodyBytes, "10485760"), - withLabel(label.TraefikBackendBufferingMemRequestBodyBytes, "2097152"), - withLabel(label.TraefikBackendBufferingRetryExpression, "IsNetworkError() && Attempts() <= 2"), - - withLabel(label.TraefikFrontendPassTLSClientCertPem, "true"), - withLabel(label.TraefikFrontendPassTLSClientCertInfosNotBefore, "true"), - withLabel(label.TraefikFrontendPassTLSClientCertInfosNotAfter, "true"), - withLabel(label.TraefikFrontendPassTLSClientCertInfosSans, "true"), - withLabel(label.TraefikFrontendPassTLSClientCertInfosIssuerCommonName, "true"), - withLabel(label.TraefikFrontendPassTLSClientCertInfosIssuerCountry, "true"), - withLabel(label.TraefikFrontendPassTLSClientCertInfosIssuerDomainComponent, "true"), - withLabel(label.TraefikFrontendPassTLSClientCertInfosIssuerLocality, "true"), - withLabel(label.TraefikFrontendPassTLSClientCertInfosIssuerOrganization, "true"), - withLabel(label.TraefikFrontendPassTLSClientCertInfosIssuerProvince, "true"), - withLabel(label.TraefikFrontendPassTLSClientCertInfosIssuerSerialNumber, "true"), - withLabel(label.TraefikFrontendPassTLSClientCertInfosSubjectCommonName, "true"), - withLabel(label.TraefikFrontendPassTLSClientCertInfosSubjectCountry, "true"), - withLabel(label.TraefikFrontendPassTLSClientCertInfosSubjectDomainComponent, "true"), - withLabel(label.TraefikFrontendPassTLSClientCertInfosSubjectLocality, "true"), - withLabel(label.TraefikFrontendPassTLSClientCertInfosSubjectOrganization, "true"), - withLabel(label.TraefikFrontendPassTLSClientCertInfosSubjectProvince, "true"), - withLabel(label.TraefikFrontendPassTLSClientCertInfosSubjectSerialNumber, "true"), - - withLabel(label.TraefikFrontendAuthBasic, "test:$apr1$H6uskkkW$IgXLP6ewTrSuBkTrqE8wj/,test2:$apr1$d9hr9HBB$4HxwgUir3HP4EsggP/QNo0"), - withLabel(label.TraefikFrontendAuthBasicRemoveHeader, "true"), - withLabel(label.TraefikFrontendAuthBasicUsers, "test:$apr1$H6uskkkW$IgXLP6ewTrSuBkTrqE8wj/,test2:$apr1$d9hr9HBB$4HxwgUir3HP4EsggP/QNo0"), - withLabel(label.TraefikFrontendAuthBasicUsersFile, ".htpasswd"), - withLabel(label.TraefikFrontendAuthDigestRemoveHeader, "true"), - withLabel(label.TraefikFrontendAuthDigestUsers, "test:$apr1$H6uskkkW$IgXLP6ewTrSuBkTrqE8wj/,test2:$apr1$d9hr9HBB$4HxwgUir3HP4EsggP/QNo0"), - withLabel(label.TraefikFrontendAuthDigestUsersFile, ".htpasswd"), - withLabel(label.TraefikFrontendAuthForwardAddress, "auth.server"), - withLabel(label.TraefikFrontendAuthForwardTrustForwardHeader, "true"), - withLabel(label.TraefikFrontendAuthForwardTLSCa, "ca.crt"), - withLabel(label.TraefikFrontendAuthForwardTLSCaOptional, "true"), - withLabel(label.TraefikFrontendAuthForwardTLSCert, "server.crt"), - withLabel(label.TraefikFrontendAuthForwardTLSKey, "server.key"), - withLabel(label.TraefikFrontendAuthForwardTLSInsecureSkipVerify, "true"), - withLabel(label.TraefikFrontendAuthHeaderField, "X-WebAuth-User"), - - withLabel(label.TraefikFrontendEntryPoints, "http,https"), - withLabel(label.TraefikFrontendPassHostHeader, "true"), - withLabel(label.TraefikFrontendPassTLSCert, "true"), - withLabel(label.TraefikFrontendPriority, "666"), - withLabel(label.TraefikFrontendRedirectEntryPoint, "https"), - withLabel(label.TraefikFrontendRedirectRegex, "nope"), - withLabel(label.TraefikFrontendRedirectReplacement, "nope"), - withLabel(label.TraefikFrontendRedirectPermanent, "true"), - withLabel(label.TraefikFrontendRule, "Host:traefik.io"), - withLabel(label.TraefikFrontendWhiteListSourceRange, "10.10.10.10"), - withLabel(label.TraefikFrontendWhiteListIPStrategyExcludedIPS, "10.10.10.10,10.10.10.11"), - withLabel(label.TraefikFrontendWhiteListIPStrategyDepth, "5"), - - withLabel(label.TraefikFrontendRequestHeaders, "Access-Control-Allow-Methods:POST,GET,OPTIONS || Content-type:application/json; charset=utf-8"), - withLabel(label.TraefikFrontendResponseHeaders, "Access-Control-Allow-Methods:POST,GET,OPTIONS || Content-type:application/json; charset=utf-8"), - withLabel(label.TraefikFrontendSSLProxyHeaders, "Access-Control-Allow-Methods:POST,GET,OPTIONS || Content-type:application/json; charset=utf-8"), - withLabel(label.TraefikFrontendAllowedHosts, "foo,bar,bor"), - withLabel(label.TraefikFrontendHostsProxyHeaders, "foo,bar,bor"), - withLabel(label.TraefikFrontendSSLForceHost, "true"), - withLabel(label.TraefikFrontendSSLHost, "foo"), - withLabel(label.TraefikFrontendCustomFrameOptionsValue, "foo"), - withLabel(label.TraefikFrontendContentSecurityPolicy, "foo"), - withLabel(label.TraefikFrontendPublicKey, "foo"), - withLabel(label.TraefikFrontendReferrerPolicy, "foo"), - withLabel(label.TraefikFrontendCustomBrowserXSSValue, "foo"), - withLabel(label.TraefikFrontendSTSSeconds, "666"), - withLabel(label.TraefikFrontendSSLRedirect, "true"), - withLabel(label.TraefikFrontendSSLTemporaryRedirect, "true"), - withLabel(label.TraefikFrontendSTSIncludeSubdomains, "true"), - withLabel(label.TraefikFrontendSTSPreload, "true"), - withLabel(label.TraefikFrontendForceSTSHeader, "true"), - withLabel(label.TraefikFrontendFrameDeny, "true"), - withLabel(label.TraefikFrontendContentTypeNosniff, "true"), - withLabel(label.TraefikFrontendBrowserXSSFilter, "true"), - withLabel(label.TraefikFrontendIsDevelopment, "true"), - - withLabel(label.Prefix+label.BaseFrontendErrorPage+"foo."+label.SuffixErrorPageStatus, "404"), - withLabel(label.Prefix+label.BaseFrontendErrorPage+"foo."+label.SuffixErrorPageBackend, "foobar"), - withLabel(label.Prefix+label.BaseFrontendErrorPage+"foo."+label.SuffixErrorPageQuery, "foo_query"), - withLabel(label.Prefix+label.BaseFrontendErrorPage+"bar."+label.SuffixErrorPageStatus, "500,600"), - withLabel(label.Prefix+label.BaseFrontendErrorPage+"bar."+label.SuffixErrorPageBackend, "foobar"), - withLabel(label.Prefix+label.BaseFrontendErrorPage+"bar."+label.SuffixErrorPageQuery, "bar_query"), - - withLabel(label.TraefikFrontendRateLimitExtractorFunc, "client.ip"), - withLabel(label.Prefix+label.BaseFrontendRateLimit+"foo."+label.SuffixRateLimitPeriod, "6"), - withLabel(label.Prefix+label.BaseFrontendRateLimit+"foo."+label.SuffixRateLimitAverage, "12"), - withLabel(label.Prefix+label.BaseFrontendRateLimit+"foo."+label.SuffixRateLimitBurst, "18"), - withLabel(label.Prefix+label.BaseFrontendRateLimit+"bar."+label.SuffixRateLimitPeriod, "3"), - withLabel(label.Prefix+label.BaseFrontendRateLimit+"bar."+label.SuffixRateLimitAverage, "6"), - withLabel(label.Prefix+label.BaseFrontendRateLimit+"bar."+label.SuffixRateLimitBurst, "9"), - withIP("10.10.10.10"), - withInfo("name1", withPorts( - withPortTCP(80, "n"), - withPortTCP(666, "n"))), - withStatus(withHealthy(true), withState("TASK_RUNNING")), - ), - }, - expectedFrontends: map[string]*types.Frontend{ - "frontend-ID1": { - EntryPoints: []string{ - "http", - "https", - }, - Backend: "backend-foobar", - Routes: map[string]types.Route{ - "route-host-ID1": { - Rule: "Host:traefik.io", - }, - }, - PassHostHeader: true, - PassTLSCert: true, - Priority: 666, - PassTLSClientCert: &types.TLSClientHeaders{ - PEM: true, - Infos: &types.TLSClientCertificateInfos{ - NotBefore: true, - Sans: true, - NotAfter: true, - Subject: &types.TLSCLientCertificateDNInfos{ - CommonName: true, - Country: true, - DomainComponent: true, - Locality: true, - Organization: true, - Province: true, - SerialNumber: true, - }, - Issuer: &types.TLSCLientCertificateDNInfos{ - CommonName: true, - Country: true, - DomainComponent: true, - Locality: true, - Organization: true, - Province: true, - SerialNumber: true, - }, - }, - }, - Auth: &types.Auth{ - HeaderField: "X-WebAuth-User", - Basic: &types.Basic{ - RemoveHeader: true, - Users: []string{"test:$apr1$H6uskkkW$IgXLP6ewTrSuBkTrqE8wj/", - "test2:$apr1$d9hr9HBB$4HxwgUir3HP4EsggP/QNo0"}, - UsersFile: ".htpasswd", - }, - }, - WhiteList: &types.WhiteList{ - SourceRange: []string{"10.10.10.10"}, - IPStrategy: &types.IPStrategy{ - Depth: 5, - ExcludedIPs: []string{"10.10.10.10", "10.10.10.11"}, - }, - }, - Headers: &types.Headers{ - CustomRequestHeaders: map[string]string{ - "Access-Control-Allow-Methods": "POST,GET,OPTIONS", - "Content-Type": "application/json; charset=utf-8", - }, - CustomResponseHeaders: map[string]string{ - "Access-Control-Allow-Methods": "POST,GET,OPTIONS", - "Content-Type": "application/json; charset=utf-8", - }, - AllowedHosts: []string{ - "foo", - "bar", - "bor", - }, - HostsProxyHeaders: []string{ - "foo", - "bar", - "bor", - }, - SSLRedirect: true, - SSLTemporaryRedirect: true, - SSLForceHost: true, - SSLHost: "foo", - SSLProxyHeaders: map[string]string{ - "Access-Control-Allow-Methods": "POST,GET,OPTIONS", - "Content-Type": "application/json; charset=utf-8", - }, - STSSeconds: 666, - STSIncludeSubdomains: true, - STSPreload: true, - ForceSTSHeader: true, - FrameDeny: true, - CustomFrameOptionsValue: "foo", - ContentTypeNosniff: true, - BrowserXSSFilter: true, - CustomBrowserXSSValue: "foo", - ContentSecurityPolicy: "foo", - PublicKey: "foo", - ReferrerPolicy: "foo", - IsDevelopment: true, - }, - Errors: map[string]*types.ErrorPage{ - "foo": { - Status: []string{"404"}, - Query: "foo_query", - Backend: "backend-foobar", - }, - "bar": { - Status: []string{"500", "600"}, - Query: "bar_query", - Backend: "backend-foobar", - }, - }, - RateLimit: &types.RateLimit{ - ExtractorFunc: "client.ip", - RateSet: map[string]*types.Rate{ - "foo": { - Period: parse.Duration(6 * time.Second), - Average: 12, - Burst: 18, - }, - "bar": { - Period: parse.Duration(3 * time.Second), - Average: 6, - Burst: 9, - }, - }, - }, - Redirect: &types.Redirect{ - EntryPoint: "https", - Regex: "", - Replacement: "", - Permanent: true, - }, - }, - }, - expectedBackends: map[string]*types.Backend{ - "backend-foobar": { - Servers: map[string]types.Server{ - "server-ID1": { - URL: "https://10.10.10.10:666", - Weight: 12, - }, - }, - CircuitBreaker: &types.CircuitBreaker{ - Expression: "NetworkErrorRatio() > 0.5", - }, - ResponseForwarding: &types.ResponseForwarding{ - FlushInterval: "10ms", - }, - LoadBalancer: &types.LoadBalancer{ - Method: "drr", - Stickiness: &types.Stickiness{ - CookieName: "chocolate", - }, - }, - MaxConn: &types.MaxConn{ - Amount: 666, - ExtractorFunc: "client.ip", - }, - HealthCheck: &types.HealthCheck{ - Scheme: "http", - Path: "/health", - Port: 880, - Interval: "6", - Timeout: "3", - Hostname: "foo.com", - Headers: map[string]string{ - "Foo": "bar", - "Bar": "foo", - }, - }, - Buffering: &types.Buffering{ - MaxResponseBodyBytes: 10485760, - MemResponseBodyBytes: 2097152, - MaxRequestBodyBytes: 10485760, - MemRequestBodyBytes: 2097152, - RetryExpression: "IsNetworkError() && Attempts() <= 2", - }, - }, - }, - }, - } - - for _, test := range testCases { - test := test - t.Run(test.desc, func(t *testing.T) { - t.Parallel() - - actualConfig := p.buildConfiguration(test.tasks) - - require.NotNil(t, actualConfig) - assert.Equal(t, test.expectedBackends, actualConfig.Backends) - assert.Equal(t, test.expectedFrontends, actualConfig.Frontends) - }) - } -} - -func TestTaskFilter(t *testing.T) { - testCases := []struct { - desc string - mesosTask taskData - exposedByDefault bool - expected bool - }{ - { - desc: "no task", - mesosTask: taskData{}, - exposedByDefault: true, - expected: false, - }, - { - desc: "task not healthy", - mesosTask: aTaskData("test", "", withStatus(withState("TASK_RUNNING"))), - exposedByDefault: true, - expected: false, - }, - { - desc: "exposedByDefault false and traefik.enable false", - mesosTask: aTaskData("test", "", - withDefaultStatus(), - withLabel(label.TraefikEnable, "false"), - withInfo("test", withPorts(withPortTCP(80, "WEB"))), - ), - exposedByDefault: false, - expected: false, - }, - { - desc: "traefik.enable = true", - mesosTask: aTaskData("test", "", - withDefaultStatus(), - withLabel(label.TraefikEnable, "true"), - withInfo("test", withPorts(withPortTCP(80, "WEB"))), - ), - exposedByDefault: false, - expected: true, - }, - { - desc: "exposedByDefault true and traefik.enable true", - mesosTask: aTaskData("test", "", - withDefaultStatus(), - withLabel(label.TraefikEnable, "true"), - withInfo("test", withPorts(withPortTCP(80, "WEB"))), - ), - exposedByDefault: true, - expected: true, - }, - { - desc: "exposedByDefault true and traefik.enable false", - mesosTask: aTaskData("test", "", - withDefaultStatus(), - withLabel(label.TraefikEnable, "false"), - withInfo("test", withPorts(withPortTCP(80, "WEB"))), - ), - exposedByDefault: true, - expected: false, - }, - { - desc: "traefik.portIndex and traefik.port both set", - mesosTask: aTaskData("test", "", - withDefaultStatus(), - withLabel(label.TraefikEnable, "true"), - withLabel(label.TraefikPortIndex, "1"), - withLabel(label.TraefikPort, "80"), - withInfo("test", withPorts(withPortTCP(80, "WEB"))), - ), - exposedByDefault: true, - expected: false, - }, - { - desc: "valid traefik.portIndex", - mesosTask: aTaskData("test", "", - withDefaultStatus(), - withLabel(label.TraefikEnable, "true"), - withLabel(label.TraefikPortIndex, "1"), - withInfo("test", withPorts( - withPortTCP(80, "WEB"), - withPortTCP(443, "WEB HTTPS"), - )), - ), - exposedByDefault: true, - expected: true, - }, - { - desc: "valid traefik.portName", - mesosTask: aTaskData("test", "", - withDefaultStatus(), - withLabel(label.TraefikEnable, "true"), - withLabel(label.TraefikPortName, "https"), - withInfo("test", withPorts( - withPortTCP(80, "http"), - withPortTCP(443, "https"), - )), - ), - exposedByDefault: true, - expected: true, - }, - { - desc: "missing traefik.portName", - mesosTask: aTaskData("test", "", - withDefaultStatus(), - withLabel(label.TraefikEnable, "true"), - withLabel(label.TraefikPortName, "foo"), - withInfo("test", withPorts( - withPortTCP(80, "http"), - withPortTCP(443, "https"), - )), - ), - exposedByDefault: true, - expected: false, - }, - { - desc: "default to first port index", - mesosTask: aTaskData("test", "", - withDefaultStatus(), - withLabel(label.TraefikEnable, "true"), - withInfo("test", withPorts( - withPortTCP(80, "WEB"), - withPortTCP(443, "WEB HTTPS"), - )), - ), - exposedByDefault: true, - expected: true, - }, - { - desc: "traefik.portIndex and discoveryPorts don't correspond", - mesosTask: aTaskData("test", "", - withDefaultStatus(), - withLabel(label.TraefikEnable, "true"), - withLabel(label.TraefikPortIndex, "1"), - withInfo("test", withPorts(withPortTCP(80, "WEB"))), - ), - exposedByDefault: true, - expected: false, - }, - { - desc: "traefik.portIndex and discoveryPorts correspond", - mesosTask: aTaskData("test", "", - withDefaultStatus(), - withLabel(label.TraefikEnable, "true"), - withLabel(label.TraefikPortIndex, "0"), - withInfo("test", withPorts(withPortTCP(80, "WEB"))), - ), - exposedByDefault: true, - expected: true, - }, - { - desc: "traefik.port is not an integer", - mesosTask: aTaskData("test", "", - withDefaultStatus(), - withLabel(label.TraefikEnable, "true"), - withLabel(label.TraefikPort, "TRAEFIK"), - withInfo("test", withPorts(withPortTCP(80, "WEB"))), - ), - exposedByDefault: true, - expected: false, - }, - { - desc: "traefik.port is not the same as discovery.port", - mesosTask: aTaskData("test", "", - withDefaultStatus(), - withLabel(label.TraefikEnable, "true"), - withLabel(label.TraefikPort, "443"), - withInfo("test", withPorts(withPortTCP(80, "WEB"))), - ), - exposedByDefault: true, - expected: false, - }, - { - desc: "traefik.port is the same as discovery.port", - mesosTask: aTaskData("test", "", - withDefaultStatus(), - withLabel(label.TraefikEnable, "true"), - withLabel(label.TraefikPort, "80"), - withInfo("test", withPorts(withPortTCP(80, "WEB"))), - ), - exposedByDefault: true, - expected: true, - }, - { - desc: "healthy nil", - mesosTask: aTaskData("test", "", - withStatus( - withState("TASK_RUNNING"), - ), - withLabel(label.TraefikEnable, "true"), - withLabel(label.TraefikPort, "80"), - withInfo("test", withPorts(withPortTCP(80, "WEB"))), - ), - exposedByDefault: true, - expected: true, - }, - { - desc: "healthy false", - mesosTask: aTaskData("test", "", - withStatus( - withState("TASK_RUNNING"), - withHealthy(false), - ), - withLabel(label.TraefikEnable, "true"), - withLabel(label.TraefikPort, "80"), - withInfo("test", withPorts(withPortTCP(80, "WEB"))), - ), - exposedByDefault: true, - expected: false, - }, - } - - for _, test := range testCases { - test := test - t.Run(test.desc, func(t *testing.T) { - t.Parallel() - - actual := taskFilter(test.mesosTask, test.exposedByDefault) - ok := assert.Equal(t, test.expected, actual) - if !ok { - t.Logf("Statuses : %v", test.mesosTask.Statuses) - t.Logf("Label : %v", test.mesosTask.Labels) - t.Logf("DiscoveryInfo : %v", test.mesosTask.DiscoveryInfo) - t.Fatalf("Expected %v, got %v", test.expected, actual) - } - }) - } -} - -func TestGetServerPort(t *testing.T) { - testCases := []struct { - desc string - task taskData - expected string - }{ - { - desc: "port missing", - task: aTaskData("", ""), - expected: "", - }, - { - desc: "numeric port", - task: aTaskData("", "", withLabel(label.TraefikPort, "80")), - expected: "80", - }, - { - desc: "string port", - task: aTaskData("", "", - withLabel(label.TraefikPort, "foobar"), - withInfo("", withPorts(withPort("TCP", 80, ""))), - ), - expected: "", - }, - { - desc: "negative port", - task: aTaskData("", "", - withLabel(label.TraefikPort, "-1"), - withInfo("", withPorts(withPort("TCP", 80, ""))), - ), - expected: "", - }, - { - desc: "task port available", - task: aTaskData("", "", - withInfo("", withPorts(withPort("TCP", 80, ""))), - ), - expected: "80", - }, - { - desc: "multiple task ports available", - task: aTaskData("", "", - withInfo("", withPorts( - withPort("TCP", 80, ""), - withPort("TCP", 443, ""), - )), - ), - expected: "80", - }, - { - desc: "numeric port index specified", - task: aTaskData("", "", - withLabel(label.TraefikPortIndex, "1"), - withInfo("", withPorts( - withPort("TCP", 80, ""), - withPort("TCP", 443, ""), - )), - ), - expected: "443", - }, - { - desc: "string port name specified", - task: aTaskData("", "", - withLabel(label.TraefikPortName, "https"), - withInfo("", withPorts( - withPort("TCP", 80, "http"), - withPort("TCP", 443, "https"), - )), - ), - expected: "443", - }, - { - desc: "string port index specified", - task: aTaskData("", "", - withLabel(label.TraefikPortIndex, "foobar"), - withInfo("", withPorts( - withPort("TCP", 80, ""), - )), - ), - expected: "80", - }, - { - desc: "port and port index specified", - task: aTaskData("", "", - withLabel(label.TraefikPort, "80"), - withLabel(label.TraefikPortIndex, "1"), - withInfo("", withPorts( - withPort("TCP", 80, ""), - withPort("TCP", 443, ""), - )), - ), - expected: "80", - }, - { - desc: "multiple task ports with service index available", - task: aTaskData("", "http", - withSegmentLabel(label.TraefikPortIndex, "0", "http"), - withInfo("", withPorts( - withPort("TCP", 80, ""), - withPort("TCP", 443, ""), - )), - ), - expected: "80", - }, - { - desc: "multiple task ports with service port available", - task: aTaskData("", "https", - withSegmentLabel(label.TraefikPort, "443", "https"), - withInfo("", withPorts( - withPort("TCP", 80, ""), - withPort("TCP", 443, ""), - )), - ), - expected: "443", - }, - { - desc: "multiple task ports with service port name available", - task: aTaskData("", "https", - withSegmentLabel(label.TraefikPortName, "b", "https"), - withInfo("", withPorts( - withPort("TCP", 80, "a"), - withPort("TCP", 443, "b"), - )), - ), - expected: "443", - }, - { - desc: "multiple task ports with segment matching port name", - task: aTaskData("", "b", - withInfo("", withPorts( - withPort("TCP", 80, "a"), - withPort("TCP", 443, "b"), - )), - ), - expected: "443", - }, - { - desc: "multiple task ports with services but default port available", - task: aTaskData("", "http", - withSegmentLabel(label.TraefikWeight, "100", "http"), - withInfo("", withPorts( - withPort("TCP", 80, ""), - withPort("TCP", 443, ""), - )), - ), - expected: "80", - }, - } - - p := &Provider{ - ExposedByDefault: true, - IPSources: "host", - } - - for _, test := range testCases { - test := test - t.Run(test.desc, func(t *testing.T) { - t.Parallel() - - actual := p.getServerPort(test.task) - - assert.Equal(t, test.expected, actual) - }) - } -} - -func TestGetSubDomain(t *testing.T) { - providerGroups := &Provider{GroupsAsSubDomains: true} - providerNoGroups := &Provider{GroupsAsSubDomains: false} - - testCases := []struct { - path string - expected string - provider *Provider - }{ - {"/test", "test", providerNoGroups}, - {"/test", "test", providerGroups}, - {"/a/b/c/d", "d.c.b.a", providerGroups}, - {"/b/a/d/c", "c.d.a.b", providerGroups}, - {"/d/c/b/a", "a.b.c.d", providerGroups}, - {"/c/d/a/b", "b.a.d.c", providerGroups}, - {"/a/b/c/d", "a-b-c-d", providerNoGroups}, - {"/b/a/d/c", "b-a-d-c", providerNoGroups}, - {"/d/c/b/a", "d-c-b-a", providerNoGroups}, - {"/c/d/a/b", "c-d-a-b", providerNoGroups}, - } - - for _, test := range testCases { - test := test - t.Run(test.path, func(t *testing.T) { - t.Parallel() - - actual := test.provider.getSubDomain(test.path) - - assert.Equal(t, test.expected, actual) - }) - } -} - -func TestGetServers(t *testing.T) { - testCases := []struct { - desc string - tasks []taskData - expected map[string]types.Server - }{ - { - desc: "", - tasks: []taskData{ - // App 1 - aTaskData("ID1", "", - withIP("10.10.10.10"), - withInfo("name1", - withPorts(withPort("TCP", 80, "WEB"))), - withStatus(withHealthy(true), withState("TASK_RUNNING")), - ), - aTaskData("ID2", "", - withIP("10.10.10.11"), - withLabel(label.TraefikWeight, "18"), - withInfo("name1", - withPorts(withPort("TCP", 81, "WEB"))), - withStatus(withHealthy(true), withState("TASK_RUNNING")), - ), - // App 2 - aTaskData("ID3", "", - withLabel(label.TraefikWeight, "12"), - withIP("20.10.10.10"), - withInfo("name2", - withPorts(withPort("TCP", 80, "WEB"))), - withStatus(withHealthy(true), withState("TASK_RUNNING")), - ), - aTaskData("ID4", "", - withLabel(label.TraefikWeight, "6"), - withIP("20.10.10.11"), - withInfo("name2", - withPorts(withPort("TCP", 81, "WEB"))), - withStatus(withHealthy(true), withState("TASK_RUNNING")), - ), - }, - expected: map[string]types.Server{ - "server-ID1": { - URL: "http://10.10.10.10:80", - Weight: label.DefaultWeight, - }, - "server-ID2": { - URL: "http://10.10.10.11:81", - Weight: 18, - }, - "server-ID3": { - URL: "http://20.10.10.10:80", - Weight: 12, - }, - "server-ID4": { - URL: "http://20.10.10.11:81", - Weight: 6, - }, - }, - }, - { - desc: "with segments matching port names", - tasks: segmentedTaskData([]string{"WEB1", "WEB2", "WEB3"}, - aTask("ID1", - withIP("10.10.10.10"), - withInfo("name1", - withPorts( - withPort("TCP", 81, "WEB1"), - withPort("TCP", 82, "WEB2"), - withPort("TCP", 83, "WEB3"), - )), - withStatus(withHealthy(true), withState("TASK_RUNNING")), - ), - ), - expected: map[string]types.Server{ - "server-ID1-service-WEB1": { - URL: "http://10.10.10.10:81", - Weight: label.DefaultWeight, - }, - "server-ID1-service-WEB2": { - URL: "http://10.10.10.10:82", - Weight: label.DefaultWeight, - }, - "server-ID1-service-WEB3": { - URL: "http://10.10.10.10:83", - Weight: label.DefaultWeight, - }, - }, - }, - { - desc: "with segments and portname labels", - tasks: segmentedTaskData([]string{"a", "b", "c"}, - aTask("ID1", - withIP("10.10.10.10"), - withInfo("name1", - withPorts( - withPort("TCP", 81, "WEB1"), - withPort("TCP", 82, "WEB2"), - withPort("TCP", 83, "WEB3"), - )), - withSegmentLabel(label.TraefikPortName, "WEB2", "a"), - withSegmentLabel(label.TraefikPortName, "WEB3", "b"), - withSegmentLabel(label.TraefikPortName, "WEB1", "c"), - withStatus(withHealthy(true), withState("TASK_RUNNING")), - ), - ), - - expected: map[string]types.Server{ - "server-ID1-service-a": { - URL: "http://10.10.10.10:82", - Weight: label.DefaultWeight, - }, - "server-ID1-service-b": { - URL: "http://10.10.10.10:83", - Weight: label.DefaultWeight, - }, - "server-ID1-service-c": { - URL: "http://10.10.10.10:81", - Weight: label.DefaultWeight, - }, - }, - }, - } - - p := &Provider{ - Domain: "docker.localhost", - ExposedByDefault: true, - IPSources: "host", - } - - for _, test := range testCases { - test := test - t.Run(test.desc, func(t *testing.T) { - t.Parallel() - - actual := p.getServers(test.tasks) - - assert.Equal(t, test.expected, actual) - }) - } -} - -func TestGetBackendName(t *testing.T) { - testCases := []struct { - desc string - mesosTask taskData - expected string - }{ - { - desc: "label missing", - mesosTask: aTaskData("group-app-taskID", "", - withInfo("/group/app"), - ), - expected: "group-app", - }, - { - desc: "label existing", - mesosTask: aTaskData("", "", - withInfo(""), - withLabel(label.TraefikBackend, "bar"), - ), - expected: "bar", - }, - { - desc: "segment label existing", - mesosTask: aTaskData("", "app", - withInfo(""), - withSegmentLabel(label.TraefikBackend, "bar", "app"), - ), - expected: "bar", - }, - } - - for _, test := range testCases { - test := test - t.Run(test.desc, func(t *testing.T) { - t.Parallel() - - actual := getBackendName(test.mesosTask) - - assert.Equal(t, test.expected, actual) - }) - } -} - -func TestGetFrontendRule(t *testing.T) { - p := Provider{ - Domain: "mesos.localhost", - } - - testCases := []struct { - desc string - mesosTask taskData - expected string - }{ - { - desc: "label missing", - mesosTask: aTaskData("test", "", - withInfo("foo"), - ), - expected: "Host:foo.mesos.localhost", - }, - { - desc: "label domain", - mesosTask: aTaskData("test", "", - withInfo("foo"), - withLabel(label.TraefikDomain, "traefik.localhost"), - ), - expected: "Host:foo.traefik.localhost", - }, - { - desc: "with segment", - mesosTask: aTaskData("test", "bar", - withInfo("foo"), - withLabel(label.TraefikDomain, "traefik.localhost"), - ), - expected: "Host:bar.foo.traefik.localhost", - }, - { - desc: "frontend rule available", - mesosTask: aTaskData("test", "", - withInfo("foo"), - withLabel(label.TraefikFrontendRule, "Host:foo.bar"), - ), - expected: "Host:foo.bar", - }, - } - - for _, test := range testCases { - test := test - t.Run(test.desc, func(t *testing.T) { - t.Parallel() - - rule := p.getFrontendRule(test.mesosTask) - - assert.Equal(t, test.expected, rule) - }) - } -} diff --git a/old/provider/mesos/mesos.go b/old/provider/mesos/mesos.go deleted file mode 100644 index c29d83da7..000000000 --- a/old/provider/mesos/mesos.go +++ /dev/null @@ -1,173 +0,0 @@ -package mesos - -import ( - "fmt" - "strings" - "time" - - "github.com/cenkalti/backoff" - "github.com/containous/traefik/old/log" - "github.com/containous/traefik/old/provider" - "github.com/containous/traefik/old/types" - "github.com/containous/traefik/pkg/job" - "github.com/containous/traefik/pkg/safe" - "github.com/mesos/mesos-go/detector" - "github.com/mesosphere/mesos-dns/records" - "github.com/mesosphere/mesos-dns/records/state" - - // Register mesos zoo the detector - _ "github.com/mesos/mesos-go/detector/zoo" - "github.com/mesosphere/mesos-dns/detect" - "github.com/mesosphere/mesos-dns/logging" - "github.com/mesosphere/mesos-dns/util" -) - -var _ provider.Provider = (*Provider)(nil) - -// Provider holds configuration of the provider. -type Provider struct { - provider.BaseProvider - Endpoint string `description:"Mesos server endpoint. You can also specify multiple endpoint for Mesos"` - Domain string `description:"Default domain used"` - ExposedByDefault bool `description:"Expose Mesos apps by default" export:"true"` - GroupsAsSubDomains bool `description:"Convert Mesos groups to subdomains" export:"true"` - ZkDetectionTimeout int `description:"Zookeeper timeout (in seconds)" export:"true"` - RefreshSeconds int `description:"Polling interval (in seconds)" export:"true"` - IPSources string `description:"IPSources (e.g. host, docker, mesos, netinfo)" export:"true"` - StateTimeoutSecond int `description:"HTTP Timeout (in seconds)" export:"true"` - Masters []string -} - -// Init the provider -func (p *Provider) Init(constraints types.Constraints) error { - return p.BaseProvider.Init(constraints) -} - -// Provide allows the mesos provider to provide configurations to traefik -// using the given configuration channel. -func (p *Provider) Provide(configurationChan chan<- types.ConfigMessage, pool *safe.Pool) error { - operation := func() error { - - // initialize logging - logging.SetupLogs() - - log.Debugf("%s", p.IPSources) - - var zk string - var masters []string - - if strings.HasPrefix(p.Endpoint, "zk://") { - zk = p.Endpoint - } else { - masters = strings.Split(p.Endpoint, ",") - } - - errch := make(chan error) - - changed := detectMasters(zk, masters) - reload := time.NewTicker(time.Second * time.Duration(p.RefreshSeconds)) - zkTimeout := time.Second * time.Duration(p.ZkDetectionTimeout) - timeout := time.AfterFunc(zkTimeout, func() { - if zkTimeout > 0 { - errch <- fmt.Errorf("master detection timed out after %s", zkTimeout) - } - }) - - defer reload.Stop() - defer util.HandleCrash() - - if !p.Watch { - reload.Stop() - timeout.Stop() - } - - for { - select { - case <-reload.C: - tasks := p.getTasks() - configuration := p.buildConfiguration(tasks) - if configuration != nil { - configurationChan <- types.ConfigMessage{ - ProviderName: "mesos", - Configuration: configuration, - } - } - case masters := <-changed: - if len(masters) == 0 || masters[0] == "" { - // no leader - timeout.Reset(zkTimeout) - } else { - timeout.Stop() - } - log.Debugf("new masters detected: %v", masters) - p.Masters = masters - tasks := p.getTasks() - configuration := p.buildConfiguration(tasks) - if configuration != nil { - configurationChan <- types.ConfigMessage{ - ProviderName: "mesos", - Configuration: configuration, - } - } - case err := <-errch: - log.Errorf("%s", err) - } - } - } - - notify := func(err error, time time.Duration) { - log.Errorf("Mesos connection error %+v, retrying in %s", err, time) - } - err := backoff.RetryNotify(safe.OperationWithRecover(operation), job.NewBackOff(backoff.NewExponentialBackOff()), notify) - if err != nil { - log.Errorf("Cannot connect to Mesos server %+v", err) - } - return nil -} - -func detectMasters(zk string, masters []string) <-chan []string { - changed := make(chan []string, 1) - if zk != "" { - log.Debugf("Starting master detector for ZK %s", zk) - if md, err := detector.New(zk); err != nil { - log.Errorf("Failed to create master detector: %v", err) - } else if err := md.Detect(detect.NewMasters(masters, changed)); err != nil { - log.Errorf("Failed to initialize master detector: %v", err) - } - } else { - changed <- masters - } - return changed -} - -func (p *Provider) getTasks() []state.Task { - rg := records.NewRecordGenerator(time.Duration(p.StateTimeoutSecond) * time.Second) - - st, err := rg.FindMaster(p.Masters...) - if err != nil { - log.Errorf("Failed to create a client for Mesos, error: %v", err) - return nil - } - - return taskRecords(st) -} - -func taskRecords(st state.State) []state.Task { - var tasks []state.Task - for _, f := range st.Frameworks { - for _, task := range f.Tasks { - for _, slave := range st.Slaves { - if task.SlaveID == slave.ID { - task.SlaveIP = slave.PID.Host - } - } - - // only do running and discoverable tasks - if task.State == "TASK_RUNNING" { - tasks = append(tasks, task) - } - } - } - - return tasks -} diff --git a/old/provider/mesos/mesos_helper_test.go b/old/provider/mesos/mesos_helper_test.go deleted file mode 100644 index dc1881923..000000000 --- a/old/provider/mesos/mesos_helper_test.go +++ /dev/null @@ -1,184 +0,0 @@ -package mesos - -import ( - "strings" - "testing" - - "github.com/containous/traefik/old/provider/label" - "github.com/mesosphere/mesos-dns/records/state" - "github.com/stretchr/testify/assert" -) - -// test helpers - -func TestBuilder(t *testing.T) { - result := aTask("ID1", - withIP("10.10.10.10"), - withLabel("foo", "bar"), - withLabel("fii", "bar"), - withLabel("fuu", "bar"), - withInfo("name1", - withPorts(withPort("TCP", 80, "p"), - withPortTCP(81, "n"))), - withStatus(withHealthy(true), withState("a"))) - - expected := state.Task{ - FrameworkID: "", - ID: "ID1", - SlaveIP: "10.10.10.10", - Name: "", - SlaveID: "", - State: "", - Statuses: []state.Status{{ - State: "a", - Healthy: Bool(true), - ContainerStatus: state.ContainerStatus{}, - }}, - DiscoveryInfo: state.DiscoveryInfo{ - Name: "name1", - Labels: struct { - Labels []state.Label `json:"labels"` - }{}, - Ports: state.Ports{DiscoveryPorts: []state.DiscoveryPort{ - {Protocol: "TCP", Number: 80, Name: "p"}, - {Protocol: "TCP", Number: 81, Name: "n"}}}}, - Labels: []state.Label{ - {Key: "foo", Value: "bar"}, - {Key: "fii", Value: "bar"}, - {Key: "fuu", Value: "bar"}}} - - assert.Equal(t, expected, result) -} - -func aTaskData(id, segment string, ops ...func(*state.Task)) taskData { - ts := &state.Task{ID: id} - for _, op := range ops { - op(ts) - } - lbls := label.ExtractTraefikLabels(extractLabels(*ts)) - if len(lbls[segment]) > 0 { - return taskData{Task: *ts, TraefikLabels: lbls[segment], SegmentName: segment} - } - return taskData{Task: *ts, TraefikLabels: lbls[""], SegmentName: segment} -} - -func segmentedTaskData(segments []string, ts state.Task) []taskData { - var td []taskData - lbls := label.ExtractTraefikLabels(extractLabels(ts)) - for _, s := range segments { - if l, ok := lbls[s]; !ok { - td = append(td, taskData{Task: ts, TraefikLabels: lbls[""], SegmentName: s}) - } else { - td = append(td, taskData{Task: ts, TraefikLabels: l, SegmentName: s}) - } - } - return td -} - -func aTask(id string, ops ...func(*state.Task)) state.Task { - ts := &state.Task{ID: id} - for _, op := range ops { - op(ts) - } - return *ts -} - -func withIP(ip string) func(*state.Task) { - return func(task *state.Task) { - task.SlaveIP = ip - } -} - -func withInfo(name string, ops ...func(*state.DiscoveryInfo)) func(*state.Task) { - return func(task *state.Task) { - info := &state.DiscoveryInfo{Name: name} - for _, op := range ops { - op(info) - } - task.DiscoveryInfo = *info - } -} - -func withPorts(ops ...func(port *state.DiscoveryPort)) func(*state.DiscoveryInfo) { - return func(info *state.DiscoveryInfo) { - var ports []state.DiscoveryPort - for _, op := range ops { - pt := &state.DiscoveryPort{} - op(pt) - ports = append(ports, *pt) - } - - info.Ports = state.Ports{ - DiscoveryPorts: ports, - } - } -} - -func withPort(proto string, port int, name string) func(port *state.DiscoveryPort) { - return func(p *state.DiscoveryPort) { - p.Protocol = proto - p.Number = port - p.Name = name - } -} - -func withPortTCP(port int, name string) func(port *state.DiscoveryPort) { - return withPort("TCP", port, name) -} - -func withStatus(ops ...func(*state.Status)) func(*state.Task) { - return func(task *state.Task) { - st := &state.Status{} - for _, op := range ops { - op(st) - } - task.Statuses = append(task.Statuses, *st) - } -} -func withDefaultStatus(ops ...func(*state.Status)) func(*state.Task) { - return func(task *state.Task) { - for _, op := range ops { - st := &state.Status{ - State: "TASK_RUNNING", - Healthy: Bool(true), - } - op(st) - task.Statuses = append(task.Statuses, *st) - } - } -} - -func withHealthy(st bool) func(*state.Status) { - return func(status *state.Status) { - status.Healthy = Bool(st) - } -} - -func withState(st string) func(*state.Status) { - return func(status *state.Status) { - status.State = st - } -} - -func withLabel(key, value string) func(*state.Task) { - return func(task *state.Task) { - lbl := state.Label{Key: key, Value: value} - task.Labels = append(task.Labels, lbl) - } -} - -func withSegmentLabel(key, value, segmentName string) func(*state.Task) { - if len(segmentName) == 0 { - panic("segmentName can not be empty") - } - - property := strings.TrimPrefix(key, label.Prefix) - return func(task *state.Task) { - lbl := state.Label{Key: label.Prefix + segmentName + "." + property, Value: value} - task.Labels = append(task.Labels, lbl) - } -} - -func Bool(v bool) *bool { - return &v -} diff --git a/old/provider/mesos/mesos_test.go b/old/provider/mesos/mesos_test.go deleted file mode 100644 index 1459c27e3..000000000 --- a/old/provider/mesos/mesos_test.go +++ /dev/null @@ -1,37 +0,0 @@ -package mesos - -import ( - "testing" - - "github.com/mesos/mesos-go/upid" - "github.com/mesosphere/mesos-dns/records/state" -) - -func TestTaskRecords(t *testing.T) { - var task = state.Task{ - SlaveID: "s_id", - State: "TASK_RUNNING", - } - var framework = state.Framework{ - Tasks: []state.Task{task}, - } - var slave = state.Slave{ - ID: "s_id", - Hostname: "127.0.0.1", - } - slave.PID.UPID = &upid.UPID{} - slave.PID.Host = slave.Hostname - - var taskState = state.State{ - Slaves: []state.Slave{slave}, - Frameworks: []state.Framework{framework}, - } - - var p = taskRecords(taskState) - if len(p) == 0 { - t.Fatal("No task") - } - if p[0].SlaveIP != slave.Hostname { - t.Fatalf("The SlaveIP (%s) should be set with the slave hostname (%s)", p[0].SlaveID, slave.Hostname) - } -} diff --git a/old/provider/provider.go b/old/provider/provider.go deleted file mode 100644 index fe73c751d..000000000 --- a/old/provider/provider.go +++ /dev/null @@ -1,132 +0,0 @@ -package provider - -import ( - "bytes" - "strings" - "text/template" - "unicode" - - "github.com/BurntSushi/toml" - "github.com/Masterminds/sprig" - "github.com/containous/traefik/old/log" - "github.com/containous/traefik/old/types" - "github.com/containous/traefik/pkg/safe" -) - -// Provider defines methods of a provider. -type Provider interface { - // Provide allows the provider to provide configurations to traefik - // using the given configuration channel. - Provide(configurationChan chan<- types.ConfigMessage, pool *safe.Pool) error - Init(constraints types.Constraints) error -} - -// BaseProvider should be inherited by providers -type BaseProvider struct { - Watch bool `description:"Watch provider" export:"true"` - Filename string `description:"Override default configuration template. For advanced users :)" export:"true"` - Constraints types.Constraints `description:"Filter services by constraint, matching with Traefik tags." export:"true"` - Trace bool `description:"Display additional provider logs (if available)." export:"true"` - DebugLogGeneratedTemplate bool `description:"Enable debug logging of generated configuration template." export:"true"` -} - -// Init for compatibility reason the BaseProvider implements an empty Init -func (p *BaseProvider) Init(constraints types.Constraints) error { - p.Constraints = append(p.Constraints, constraints...) - return nil -} - -// MatchConstraints must match with EVERY single constraint -// returns first constraint that do not match or nil -func (p *BaseProvider) MatchConstraints(tags []string) (bool, *types.Constraint) { - // if there is no tags and no constraints, filtering is disabled - if len(tags) == 0 && len(p.Constraints) == 0 { - return true, nil - } - - for _, constraint := range p.Constraints { - // xor: if ok and constraint.MustMatch are equal, then no tag is currently matching with the constraint - if ok := constraint.MatchConstraintWithAtLeastOneTag(tags); ok != constraint.MustMatch { - return false, constraint - } - } - - // If no constraint or every constraints matching - return true, nil -} - -// GetConfiguration return the provider configuration from default template (file or content) or overrode template file -func (p *BaseProvider) GetConfiguration(defaultTemplate string, funcMap template.FuncMap, templateObjects interface{}) (*types.Configuration, error) { - tmplContent, err := p.getTemplateContent(defaultTemplate) - if err != nil { - return nil, err - } - return p.CreateConfiguration(tmplContent, funcMap, templateObjects) -} - -// CreateConfiguration create a provider configuration from content using templating -func (p *BaseProvider) CreateConfiguration(tmplContent string, funcMap template.FuncMap, templateObjects interface{}) (*types.Configuration, error) { - var defaultFuncMap = sprig.TxtFuncMap() - // tolower is deprecated in favor of sprig's lower function - defaultFuncMap["tolower"] = strings.ToLower - defaultFuncMap["normalize"] = Normalize - defaultFuncMap["split"] = split - for funcID, funcElement := range funcMap { - defaultFuncMap[funcID] = funcElement - } - - tmpl := template.New(p.Filename).Funcs(defaultFuncMap) - - _, err := tmpl.Parse(tmplContent) - if err != nil { - return nil, err - } - - var buffer bytes.Buffer - err = tmpl.Execute(&buffer, templateObjects) - if err != nil { - return nil, err - } - - var renderedTemplate = buffer.String() - if p.DebugLogGeneratedTemplate { - log.Debugf("Template content: %s", tmplContent) - log.Debugf("Rendering results: %s", renderedTemplate) - } - return p.DecodeConfiguration(renderedTemplate) -} - -// DecodeConfiguration Decode a *types.Configuration from a content -func (p *BaseProvider) DecodeConfiguration(content string) (*types.Configuration, error) { - configuration := new(types.Configuration) - if _, err := toml.Decode(content, configuration); err != nil { - return nil, err - } - return configuration, nil -} - -// genTemplate does not do anything anymore because we removed the templates -func (p *BaseProvider) getTemplateContent(defaultTemplateFile string) (string, error) { - return "", nil -} - -func split(sep, s string) []string { - return strings.Split(s, sep) -} - -// Normalize transform a string that work with the rest of traefik -// Replace '.' with '-' in quoted keys because of this issue https://github.com/BurntSushi/toml/issues/78 -func Normalize(name string) string { - fargs := func(c rune) bool { - return !unicode.IsLetter(c) && !unicode.IsNumber(c) - } - // get function - return strings.Join(strings.FieldsFunc(name, fargs), "-") -} - -// ReverseStringSlice invert the order of the given slice of string -func ReverseStringSlice(slice *[]string) { - for i, j := 0, len(*slice)-1; i < j; i, j = i+1, j-1 { - (*slice)[i], (*slice)[j] = (*slice)[j], (*slice)[i] - } -} diff --git a/old/provider/provider_test.go b/old/provider/provider_test.go deleted file mode 100644 index 3744da038..000000000 --- a/old/provider/provider_test.go +++ /dev/null @@ -1,487 +0,0 @@ -package provider - -import ( - "io/ioutil" - "os" - "strings" - "testing" - "text/template" - - "github.com/containous/traefik/old/types" - "github.com/stretchr/testify/assert" - "github.com/stretchr/testify/require" -) - -type myProvider struct { - BaseProvider - TLS *types.ClientTLS -} - -func (p *myProvider) Foo() string { - return "bar" -} - -func TestConfigurationErrors(t *testing.T) { - t.Skip("deprecated") - templateErrorFile, err := ioutil.TempFile("", "provider-configuration-error") - require.NoError(t, err) - - defer os.RemoveAll(templateErrorFile.Name()) - - data := []byte("Not a valid template {{ Bar }}") - - err = ioutil.WriteFile(templateErrorFile.Name(), data, 0700) - require.NoError(t, err) - - templateInvalidTOMLFile, err := ioutil.TempFile("", "provider-configuration-error") - require.NoError(t, err) - - defer os.RemoveAll(templateInvalidTOMLFile.Name()) - - data = []byte(`Hello {{ .Name }} -{{ Foo }}`) - - err = ioutil.WriteFile(templateInvalidTOMLFile.Name(), data, 0700) - require.NoError(t, err) - - invalids := []struct { - provider *myProvider - defaultTemplate string - expectedError string - funcMap template.FuncMap - templateObjects interface{} - }{ - { - provider: &myProvider{ - BaseProvider: BaseProvider{ - Filename: "/non/existent/template.tmpl", - }, - }, - expectedError: "open /non/existent/template.tmpl: no such file or directory", - }, - { - provider: &myProvider{}, - defaultTemplate: "non/existent/template.tmpl", - expectedError: "Asset non/existent/template.tmpl not found", - }, - { - provider: &myProvider{ - BaseProvider: BaseProvider{ - Filename: templateErrorFile.Name(), - }, - }, - expectedError: `function "Bar" not defined`, - }, - { - provider: &myProvider{ - BaseProvider: BaseProvider{ - Filename: templateInvalidTOMLFile.Name(), - }, - }, - expectedError: "Near line 1 (last key parsed 'Hello'): expected key separator '=', but got '<' instead", - funcMap: template.FuncMap{ - "Foo": func() string { - return "bar" - }, - }, - templateObjects: struct{ Name string }{Name: "bar"}, - }, - } - - for _, invalid := range invalids { - configuration, err := invalid.provider.GetConfiguration(invalid.defaultTemplate, invalid.funcMap, nil) - if err == nil || !strings.Contains(err.Error(), invalid.expectedError) { - t.Fatalf("should have generate an error with %q, got %v", invalid.expectedError, err) - } - - assert.Nil(t, configuration) - } -} - -func TestGetConfiguration(t *testing.T) { - t.Skip("deprecated") - templateFile, err := ioutil.TempFile("", "provider-configuration") - require.NoError(t, err) - - defer os.RemoveAll(templateFile.Name()) - - data := []byte(`[backends] - [backends.backend1] - [backends.backend1.circuitbreaker] - expression = "NetworkErrorRatio() > 0.5" - [backends.backend1.servers.server1] - url = "http://172.17.0.2:80" - weight = 10 - [backends.backend1.servers.server2] - url = "http://172.17.0.3:80" - weight = 1 - -[frontends] - [frontends.frontend1] - backend = "backend1" - passHostHeader = true - [frontends.frontend11.routes.test_2] - rule = "Path" - value = "/test"`) - - err = ioutil.WriteFile(templateFile.Name(), data, 0700) - require.NoError(t, err) - - provider := &myProvider{ - BaseProvider: BaseProvider{ - Filename: templateFile.Name(), - }, - } - - configuration, err := provider.GetConfiguration(templateFile.Name(), nil, nil) - require.NoError(t, err) - - assert.NotNil(t, configuration) -} - -func TestGetConfigurationReturnsCorrectMaxConnConfiguration(t *testing.T) { - t.Skip("deprecated") - templateFile, err := ioutil.TempFile("", "provider-configuration") - require.NoError(t, err) - - defer os.RemoveAll(templateFile.Name()) - - data := []byte(`[backends] - [backends.backend1] - [backends.backend1.maxconn] - amount = 10 - extractorFunc = "request.host"`) - - err = ioutil.WriteFile(templateFile.Name(), data, 0700) - require.NoError(t, err) - - provider := &myProvider{ - BaseProvider: BaseProvider{ - Filename: templateFile.Name(), - }, - } - - configuration, err := provider.GetConfiguration(templateFile.Name(), nil, nil) - require.NoError(t, err) - - require.NotNil(t, configuration) - require.Contains(t, configuration.Backends, "backend1") - assert.EqualValues(t, 10, configuration.Backends["backend1"].MaxConn.Amount) - assert.Equal(t, "request.host", configuration.Backends["backend1"].MaxConn.ExtractorFunc) -} - -func TestNilClientTLS(t *testing.T) { - t.Skip("deprecated") - p := &myProvider{ - BaseProvider: BaseProvider{ - Filename: "", - }, - } - - _, err := p.TLS.CreateTLSConfig() - require.NoError(t, err, "CreateTLSConfig should assume that consumer does not want a TLS configuration if input is nil") -} - -func TestInsecureSkipVerifyClientTLS(t *testing.T) { - t.Skip("deprecated") - p := &myProvider{ - BaseProvider: BaseProvider{ - Filename: "", - }, - TLS: &types.ClientTLS{ - InsecureSkipVerify: true, - }, - } - - config, err := p.TLS.CreateTLSConfig() - require.NoError(t, err, "CreateTLSConfig should assume that consumer does not want a TLS configuration if input is nil") - - assert.True(t, config.InsecureSkipVerify, "CreateTLSConfig should support setting only InsecureSkipVerify property") -} - -func TestInsecureSkipVerifyFalseClientTLS(t *testing.T) { - t.Skip("deprecated") - p := &myProvider{ - BaseProvider: BaseProvider{ - Filename: "", - }, - TLS: &types.ClientTLS{ - InsecureSkipVerify: false, - }, - } - - _, err := p.TLS.CreateTLSConfig() - assert.Errorf(t, err, "CreateTLSConfig should error if consumer does not set a TLS cert or key configuration and not chooses InsecureSkipVerify to be true") -} - -func TestMatchingConstraints(t *testing.T) { - t.Skip("deprecated") - testCases := []struct { - desc string - constraints types.Constraints - tags []string - expected bool - }{ - // simple test: must match - { - desc: "tag==us-east-1 with us-east-1", - constraints: types.Constraints{ - { - Key: "tag", - MustMatch: true, - Regex: "us-east-1", - }, - }, - tags: []string{ - "us-east-1", - }, - expected: true, - }, - // simple test: must match but does not match - { - desc: "tag==us-east-1 with us-east-2", - constraints: types.Constraints{ - { - Key: "tag", - MustMatch: true, - Regex: "us-east-1", - }, - }, - tags: []string{ - "us-east-2", - }, - expected: false, - }, - // simple test: must not match - { - desc: "tag!=us-east-1 with us-east-1", - constraints: types.Constraints{ - { - Key: "tag", - MustMatch: false, - Regex: "us-east-1", - }, - }, - tags: []string{ - "us-east-1", - }, - expected: false, - }, - // complex test: globbing - { - desc: "tag!=us-east-* with us-east-1", - constraints: types.Constraints{ - { - Key: "tag", - MustMatch: true, - Regex: "us-east-*", - }, - }, - tags: []string{ - "us-east-1", - }, - expected: true, - }, - // complex test: multiple constraints - { - desc: "tag==us-east-* & tag!=api with us-east-1 & api", - constraints: types.Constraints{ - { - Key: "tag", - MustMatch: true, - Regex: "us-east-*", - }, - { - Key: "tag", - MustMatch: false, - Regex: "api", - }, - }, - tags: []string{ - "api", - "us-east-1", - }, - expected: false, - }, - } - - for _, test := range testCases { - p := myProvider{ - BaseProvider: BaseProvider{ - Constraints: test.constraints, - }, - } - - actual, _ := p.MatchConstraints(test.tags) - assert.Equal(t, test.expected, actual) - } -} - -func TestDefaultFuncMap(t *testing.T) { - t.Skip("deprecated") - templateFile, err := ioutil.TempFile("", "provider-configuration") - require.NoError(t, err) - defer os.RemoveAll(templateFile.Name()) - - data := []byte(` - [backends] - [backends.{{ "backend-1" | replace "-" "" }}] - [backends.{{ "BACKEND1" | tolower }}.circuitbreaker] - expression = "NetworkErrorRatio() > 0.5" - [backends.servers.server1] - url = "http://172.17.0.2:80" - weight = 10 - [backends.backend1.servers.server2] - url = "http://172.17.0.3:80" - weight = 1 - -[frontends] - [frontends.{{normalize "frontend/1"}}] - {{ $backend := "backend1/test/value" | split "/" }} - {{ $backendid := index $backend 1 }} - {{ if "backend1" | contains "backend" }} - backend = "backend1" - {{end}} - passHostHeader = true - [frontends.frontend-1.routes.test_2] - rule = "Path" - value = "/test"`) - - err = ioutil.WriteFile(templateFile.Name(), data, 0700) - require.NoError(t, err) - - provider := &myProvider{ - BaseProvider: BaseProvider{ - Filename: templateFile.Name(), - }, - } - - configuration, err := provider.GetConfiguration(templateFile.Name(), nil, nil) - require.NoError(t, err) - - require.NotNil(t, configuration) - assert.Contains(t, configuration.Backends, "backend1") - assert.Contains(t, configuration.Frontends, "frontend-1") -} - -func TestSprigFunctions(t *testing.T) { - t.Skip("deprecated") - templateFile, err := ioutil.TempFile("", "provider-configuration") - require.NoError(t, err) - - defer os.RemoveAll(templateFile.Name()) - - data := []byte(` - {{$backend_name := trimAll "-" uuidv4}} - [backends] - [backends.{{$backend_name}}] - [backends.{{$backend_name}}.circuitbreaker] - [backends.{{$backend_name}}.servers.server2] - url = "http://172.17.0.3:80" - weight = 1 - -[frontends] - [frontends.{{normalize "frontend/1"}}] - backend = "{{$backend_name}}" - passHostHeader = true - [frontends.frontend-1.routes.test_2] - rule = "Path" - value = "/test"`) - - err = ioutil.WriteFile(templateFile.Name(), data, 0700) - require.NoError(t, err) - - provider := &myProvider{ - BaseProvider: BaseProvider{ - Filename: templateFile.Name(), - }, - } - - configuration, err := provider.GetConfiguration(templateFile.Name(), nil, nil) - require.NoError(t, err) - - require.NotNil(t, configuration) - assert.Len(t, configuration.Backends, 1) - assert.Contains(t, configuration.Frontends, "frontend-1") -} - -func TestBaseProvider_GetConfiguration(t *testing.T) { - t.Skip("deprecated") - baseProvider := BaseProvider{} - - testCases := []struct { - name string - defaultTemplateFile string - expectedContent string - }{ - { - defaultTemplateFile: "templates/docker.tmpl", - expectedContent: readTemplateFile(t, "./../templates/docker.tmpl"), - }, - { - defaultTemplateFile: `template content`, - expectedContent: `template content`, - }, - } - - for _, test := range testCases { - test := test - t.Run(test.name, func(t *testing.T) { - - content, err := baseProvider.getTemplateContent(test.defaultTemplateFile) - require.NoError(t, err) - - assert.Equal(t, test.expectedContent, content) - }) - } -} - -func TestNormalize(t *testing.T) { - t.Skip("deprecated") - testCases := []struct { - desc string - name string - expected string - }{ - { - desc: "without special chars", - name: "foobar", - expected: "foobar", - }, - { - desc: "with special chars", - name: "foo.foo.foo;foo:foo!foo/foo\\foo)foo_123-ç_àéè", - expected: "foo-foo-foo-foo-foo-foo-foo-foo-foo-123-ç-àéè", - }, - { - desc: "starts with special chars", - name: ".foo.foo", - expected: "foo-foo", - }, - { - desc: "ends with special chars", - name: "foo.foo.", - expected: "foo-foo", - }, - } - - for _, test := range testCases { - test := test - t.Run(test.desc, func(t *testing.T) { - t.Parallel() - - actual := Normalize(test.name) - assert.Equal(t, test.expected, actual) - }) - } -} - -func readTemplateFile(t *testing.T, path string) string { - t.Skip("deprecated") - t.Helper() - expectedContent, err := ioutil.ReadFile(path) - if err != nil { - t.Fatal(err) - } - return string(expectedContent) -} diff --git a/old/provider/rancher/api.go b/old/provider/rancher/api.go deleted file mode 100644 index 1217ff608..000000000 --- a/old/provider/rancher/api.go +++ /dev/null @@ -1,273 +0,0 @@ -package rancher - -import ( - "context" - "os" - "time" - - "github.com/cenkalti/backoff" - "github.com/containous/traefik/old/log" - "github.com/containous/traefik/old/types" - "github.com/containous/traefik/pkg/job" - "github.com/containous/traefik/pkg/safe" - "github.com/mitchellh/mapstructure" - rancher "github.com/rancher/go-rancher/v2" -) - -const ( - labelRancherStackServiceName = "io.rancher.stack_service.name" - hostNetwork = "host" -) - -var withoutPagination *rancher.ListOpts - -// APIConfiguration contains configuration properties specific to the Rancher -// API provider. -type APIConfiguration struct { - Endpoint string `description:"Rancher server API HTTP(S) endpoint"` - AccessKey string `description:"Rancher server API access key"` - SecretKey string `description:"Rancher server API secret key"` -} - -func init() { - withoutPagination = &rancher.ListOpts{ - Filters: map[string]interface{}{"limit": 0}, - } -} - -func (p *Provider) createClient() (*rancher.RancherClient, error) { - rancherURL := getenv("CATTLE_URL", p.API.Endpoint) - accessKey := getenv("CATTLE_ACCESS_KEY", p.API.AccessKey) - secretKey := getenv("CATTLE_SECRET_KEY", p.API.SecretKey) - - return rancher.NewRancherClient(&rancher.ClientOpts{ - Url: rancherURL, - AccessKey: accessKey, - SecretKey: secretKey, - }) -} - -func getenv(key, fallback string) string { - value := os.Getenv(key) - if len(value) == 0 { - return fallback - } - return value -} - -func (p *Provider) apiProvide(configurationChan chan<- types.ConfigMessage, pool *safe.Pool) error { - - if p.API == nil { - p.API = &APIConfiguration{} - } - - safe.Go(func() { - operation := func() error { - rancherClient, err := p.createClient() - - if err != nil { - log.Errorf("Failed to create a client for rancher, error: %s", err) - return err - } - - ctx := context.Background() - stacks, err := listRancherStacks(rancherClient) - if err != nil { - return err - } - services, err := listRancherServices(rancherClient) - if err != nil { - return err - } - container, err := listRancherContainer(rancherClient) - if err != nil { - return err - } - - var rancherData = parseAPISourcedRancherData(stacks, services, container) - - configuration := p.buildConfiguration(rancherData) - configurationChan <- types.ConfigMessage{ - ProviderName: "rancher", - Configuration: configuration, - } - - if p.Watch { - _, cancel := context.WithCancel(ctx) - ticker := time.NewTicker(time.Second * time.Duration(p.RefreshSeconds)) - pool.Go(func(stop chan bool) { - for { - select { - case <-ticker.C: - checkAPI, errAPI := rancherClient.ApiKey.List(withoutPagination) - - if errAPI != nil { - log.Errorf("Cannot establish connection: %+v, Rancher API return: %+v; Skipping refresh Data from Rancher API.", errAPI, checkAPI) - continue - } - log.Debugf("Refreshing new Data from Rancher API") - stacks, err = listRancherStacks(rancherClient) - if err != nil { - continue - } - services, err = listRancherServices(rancherClient) - if err != nil { - continue - } - container, err = listRancherContainer(rancherClient) - if err != nil { - continue - } - - rancherData := parseAPISourcedRancherData(stacks, services, container) - - configuration := p.buildConfiguration(rancherData) - if configuration != nil { - configurationChan <- types.ConfigMessage{ - ProviderName: "rancher", - Configuration: configuration, - } - } - case <-stop: - ticker.Stop() - cancel() - return - } - } - }) - } - - return nil - } - notify := func(err error, time time.Duration) { - log.Errorf("Provider connection error %+v, retrying in %s", err, time) - } - err := backoff.RetryNotify(operation, job.NewBackOff(backoff.NewExponentialBackOff()), notify) - if err != nil { - log.Errorf("Cannot connect to Provider Endpoint %+v", err) - } - }) - - return nil -} - -func listRancherStacks(client *rancher.RancherClient) ([]*rancher.Stack, error) { - - var stackList []*rancher.Stack - - stacks, err := client.Stack.List(withoutPagination) - - if err != nil { - log.Errorf("Cannot get Provider Stacks %+v", err) - } - - for k := range stacks.Data { - stackList = append(stackList, &stacks.Data[k]) - } - - return stackList, err -} - -func listRancherServices(client *rancher.RancherClient) ([]*rancher.Service, error) { - - var servicesList []*rancher.Service - - services, err := client.Service.List(withoutPagination) - - if err != nil { - log.Errorf("Cannot get Provider Services %+v", err) - } - - for k := range services.Data { - servicesList = append(servicesList, &services.Data[k]) - } - - return servicesList, err -} - -func listRancherContainer(client *rancher.RancherClient) ([]*rancher.Container, error) { - - var containerList []*rancher.Container - - container, err := client.Container.List(withoutPagination) - - if err != nil { - log.Errorf("Cannot get Provider Services %+v", err) - return containerList, err - } - - valid := true - - for valid { - for k := range container.Data { - containerList = append(containerList, &container.Data[k]) - } - - container, err = container.Next() - - if err != nil { - break - } - - if container == nil || len(container.Data) == 0 { - valid = false - } - } - - return containerList, err -} - -func parseAPISourcedRancherData(stacks []*rancher.Stack, services []*rancher.Service, containers []*rancher.Container) []rancherData { - var rancherDataList []rancherData - - for _, stack := range stacks { - - for _, service := range services { - - if service.StackId != stack.Id { - continue - } - - rData := rancherData{ - Name: service.Name + "/" + stack.Name, - Health: service.HealthState, - State: service.State, - Labels: make(map[string]string), - Containers: []string{}, - } - - if service.LaunchConfig == nil || service.LaunchConfig.Labels == nil { - log.Warnf("Rancher Service Labels are missing. Stack: %s, service: %s", stack.Name, service.Name) - } else { - for key, value := range service.LaunchConfig.Labels { - rData.Labels[key] = value.(string) - } - } - - for _, container := range containers { - if container.Labels[labelRancherStackServiceName] == stack.Name+"/"+service.Name && - containerFilter(container.Name, container.HealthState, container.State) { - - if container.NetworkMode == hostNetwork { - var endpoints []*rancher.PublicEndpoint - err := mapstructure.Decode(service.PublicEndpoints, &endpoints) - - if err != nil { - log.Errorf("Failed to decode PublicEndpoint: %v", err) - continue - } - - if len(endpoints) > 0 { - rData.Containers = append(rData.Containers, endpoints[0].IpAddress) - } - } else { - rData.Containers = append(rData.Containers, container.PrimaryIpAddress) - } - } - } - rancherDataList = append(rancherDataList, rData) - } - } - - return rancherDataList -} diff --git a/old/provider/rancher/config.go b/old/provider/rancher/config.go deleted file mode 100644 index 28f6863c7..000000000 --- a/old/provider/rancher/config.go +++ /dev/null @@ -1,210 +0,0 @@ -package rancher - -import ( - "fmt" - "net" - "strconv" - "strings" - "text/template" - - "github.com/BurntSushi/ty/fun" - "github.com/containous/traefik/old/log" - "github.com/containous/traefik/old/provider" - "github.com/containous/traefik/old/provider/label" - "github.com/containous/traefik/old/types" -) - -func (p *Provider) buildConfiguration(services []rancherData) *types.Configuration { - var RancherFuncMap = template.FuncMap{ - "getLabelValue": label.GetStringValue, - "getDomain": label.GetFuncString(label.TraefikDomain, p.Domain), - - // Backend functions - "getCircuitBreaker": label.GetCircuitBreaker, - "getLoadBalancer": label.GetLoadBalancer, - "getMaxConn": label.GetMaxConn, - "getHealthCheck": label.GetHealthCheck, - "getBuffering": label.GetBuffering, - "getResponseForwarding": label.GetResponseForwarding, - "getServers": getServers, - - // Frontend functions - "getBackendName": getBackendName, - "getFrontendRule": p.getFrontendRule, - "getPriority": label.GetFuncInt(label.TraefikFrontendPriority, label.DefaultFrontendPriority), - "getPassHostHeader": label.GetFuncBool(label.TraefikFrontendPassHostHeader, label.DefaultPassHostHeader), - "getPassTLSCert": label.GetFuncBool(label.TraefikFrontendPassTLSCert, label.DefaultPassTLSCert), - "getPassTLSClientCert": label.GetTLSClientCert, - "getEntryPoints": label.GetFuncSliceString(label.TraefikFrontendEntryPoints), - "getBasicAuth": label.GetFuncSliceString(label.TraefikFrontendAuthBasic), // Deprecated - "getAuth": label.GetAuth, - "getErrorPages": label.GetErrorPages, - "getRateLimit": label.GetRateLimit, - "getRedirect": label.GetRedirect, - "getHeaders": label.GetHeaders, - "getWhiteList": label.GetWhiteList, - } - - // filter services - filteredServices := fun.Filter(p.serviceFilter, services).([]rancherData) - - frontends := map[string]rancherData{} - backends := map[string]rancherData{} - - for _, service := range filteredServices { - segmentProperties := label.ExtractTraefikLabels(service.Labels) - for segmentName, labels := range segmentProperties { - service.SegmentLabels = labels - service.SegmentName = segmentName - - frontendName := p.getFrontendName(service) - frontends[frontendName] = service - backendName := getBackendName(service) - backends[backendName] = service - } - } - - templateObjects := struct { - Frontends map[string]rancherData - Backends map[string]rancherData - Domain string - }{ - Frontends: frontends, - Backends: backends, - Domain: p.Domain, - } - - configuration, err := p.GetConfiguration("templates/rancher.tmpl", RancherFuncMap, templateObjects) - if err != nil { - log.Error(err) - } - - return configuration -} - -func (p *Provider) serviceFilter(service rancherData) bool { - segmentProperties := label.ExtractTraefikLabels(service.Labels) - - for segmentName, labels := range segmentProperties { - _, err := checkSegmentPort(labels, segmentName) - if err != nil { - log.Debugf("Filtering service %s %s without traefik.port label", service.Name, segmentName) - return false - } - - if len(p.getFrontendRule(service.Name, labels)) == 0 { - log.Debugf("Filtering container with empty frontend rule %s %s", service.Name, segmentName) - return false - } - } - - if !label.IsEnabled(service.Labels, p.ExposedByDefault) { - log.Debugf("Filtering disabled service %s", service.Name) - return false - } - - constraintTags := label.GetSliceStringValue(service.Labels, label.TraefikTags) - if ok, failingConstraint := p.MatchConstraints(constraintTags); !ok { - if failingConstraint != nil { - log.Debugf("Filtering service %s with constraint %s", service.Name, failingConstraint.String()) - } - return false - } - - // Only filter services by Health (HealthState) and State if EnableServiceHealthFilter is true - if p.EnableServiceHealthFilter { - - if service.Health != "" && service.Health != healthy && service.Health != updatingHealthy { - log.Debugf("Filtering service %s with healthState of %s", service.Name, service.Health) - return false - } - if service.State != "" && service.State != active && service.State != updatingActive && service.State != upgraded && service.State != upgrading { - log.Debugf("Filtering service %s with state of %s", service.Name, service.State) - return false - } - } - - return true -} - -func (p *Provider) getFrontendRule(serviceName string, labels map[string]string) string { - domain := label.GetStringValue(labels, label.TraefikDomain, p.Domain) - if len(domain) > 0 { - domain = "." + domain - } - - defaultRule := "Host:" + strings.ToLower(strings.Replace(serviceName, "/", ".", -1)) + domain - - return label.GetStringValue(labels, label.TraefikFrontendRule, defaultRule) -} - -func (p *Provider) getFrontendName(service rancherData) string { - var name string - if len(service.SegmentName) > 0 { - name = getBackendName(service) - } else { - name = p.getFrontendRule(service.Name, service.SegmentLabels) - } - - return provider.Normalize(name) -} - -func getBackendName(service rancherData) string { - if len(service.SegmentName) > 0 { - return getSegmentBackendName(service) - } - - return getDefaultBackendName(service) -} - -func getSegmentBackendName(service rancherData) string { - if value := label.GetStringValue(service.SegmentLabels, label.TraefikBackend, ""); len(value) > 0 { - return provider.Normalize(service.Name + "-" + value) - } - - return provider.Normalize(service.Name + "-" + getDefaultBackendName(service) + "-" + service.SegmentName) -} - -func getDefaultBackendName(service rancherData) string { - backend := label.GetStringValue(service.SegmentLabels, label.TraefikBackend, service.Name) - return provider.Normalize(backend) -} - -func getServers(service rancherData) map[string]types.Server { - var servers map[string]types.Server - - for index, ip := range service.Containers { - if len(ip) == 0 { - log.Warnf("Unable to find the IP address for a container in the service %q: this container is ignored.", service.Name) - continue - } - - if servers == nil { - servers = make(map[string]types.Server) - } - - protocol := label.GetStringValue(service.SegmentLabels, label.TraefikProtocol, label.DefaultProtocol) - port := label.GetStringValue(service.SegmentLabels, label.TraefikPort, "") - weight := label.GetIntValue(service.SegmentLabels, label.TraefikWeight, label.DefaultWeight) - - serverName := "server-" + strconv.Itoa(index) - servers[serverName] = types.Server{ - URL: fmt.Sprintf("%s://%s", protocol, net.JoinHostPort(ip, port)), - Weight: weight, - } - } - - return servers -} - -func checkSegmentPort(labels map[string]string, segmentName string) (int, error) { - if rawPort, ok := labels[label.TraefikPort]; ok { - port, err := strconv.Atoi(rawPort) - if err != nil { - return port, fmt.Errorf("invalid port value %q for the segment %q: %v", rawPort, segmentName, err) - } - } else { - return 0, fmt.Errorf("port label is missing, please use %s as default value or define port label for all segments ('traefik..port')", label.TraefikPort) - } - return 0, nil -} diff --git a/old/provider/rancher/config_test.go b/old/provider/rancher/config_test.go deleted file mode 100644 index b87dc6bf3..000000000 --- a/old/provider/rancher/config_test.go +++ /dev/null @@ -1,1209 +0,0 @@ -// +build ignore - -package rancher - -import ( - "testing" - "time" - - "github.com/containous/flaeg/parse" - "github.com/containous/traefik/old/provider/label" - "github.com/containous/traefik/old/types" - "github.com/stretchr/testify/assert" - "github.com/stretchr/testify/require" -) - -func TestProviderBuildConfiguration(t *testing.T) { - provider := &Provider{ - Domain: "rancher.localhost", - ExposedByDefault: true, - } - - testCases := []struct { - desc string - services []rancherData - expectedFrontends map[string]*types.Frontend - expectedBackends map[string]*types.Backend - }{ - { - desc: "without services", - services: []rancherData{}, - expectedFrontends: map[string]*types.Frontend{}, - expectedBackends: map[string]*types.Backend{}, - }, - { - desc: "when all labels are set", - services: []rancherData{ - { - Labels: map[string]string{ - label.TraefikPort: "666", - label.TraefikProtocol: "https", - label.TraefikWeight: "12", - - label.TraefikBackend: "foobar", - - label.TraefikBackendCircuitBreakerExpression: "NetworkErrorRatio() > 0.5", - label.TraefikBackendResponseForwardingFlushInterval: "10ms", - label.TraefikBackendHealthCheckScheme: "http", - label.TraefikBackendHealthCheckPath: "/health", - label.TraefikBackendHealthCheckPort: "880", - label.TraefikBackendHealthCheckInterval: "6", - label.TraefikBackendHealthCheckTimeout: "3", - label.TraefikBackendHealthCheckHostname: "foo.com", - label.TraefikBackendHealthCheckHeaders: "Foo:bar || Bar:foo", - label.TraefikBackendLoadBalancerMethod: "drr", - label.TraefikBackendLoadBalancerStickiness: "true", - label.TraefikBackendLoadBalancerStickinessCookieName: "chocolate", - label.TraefikBackendMaxConnAmount: "666", - label.TraefikBackendMaxConnExtractorFunc: "client.ip", - label.TraefikBackendBufferingMaxResponseBodyBytes: "10485760", - label.TraefikBackendBufferingMemResponseBodyBytes: "2097152", - label.TraefikBackendBufferingMaxRequestBodyBytes: "10485760", - label.TraefikBackendBufferingMemRequestBodyBytes: "2097152", - label.TraefikBackendBufferingRetryExpression: "IsNetworkError() && Attempts() <= 2", - - label.TraefikFrontendPassTLSClientCertPem: "true", - label.TraefikFrontendPassTLSClientCertInfosIssuerCommonName: "true", - label.TraefikFrontendPassTLSClientCertInfosIssuerCountry: "true", - label.TraefikFrontendPassTLSClientCertInfosIssuerDomainComponent: "true", - label.TraefikFrontendPassTLSClientCertInfosIssuerLocality: "true", - label.TraefikFrontendPassTLSClientCertInfosIssuerOrganization: "true", - label.TraefikFrontendPassTLSClientCertInfosIssuerProvince: "true", - label.TraefikFrontendPassTLSClientCertInfosIssuerSerialNumber: "true", - label.TraefikFrontendPassTLSClientCertInfosSubjectCommonName: "true", - label.TraefikFrontendPassTLSClientCertInfosNotBefore: "true", - label.TraefikFrontendPassTLSClientCertInfosNotAfter: "true", - label.TraefikFrontendPassTLSClientCertInfosSans: "true", - label.TraefikFrontendPassTLSClientCertInfosSubjectCountry: "true", - label.TraefikFrontendPassTLSClientCertInfosSubjectDomainComponent: "true", - label.TraefikFrontendPassTLSClientCertInfosSubjectLocality: "true", - label.TraefikFrontendPassTLSClientCertInfosSubjectOrganization: "true", - label.TraefikFrontendPassTLSClientCertInfosSubjectProvince: "true", - label.TraefikFrontendPassTLSClientCertInfosSubjectSerialNumber: "true", - - label.TraefikFrontendAuthBasic: "test:$apr1$H6uskkkW$IgXLP6ewTrSuBkTrqE8wj/,test2:$apr1$d9hr9HBB$4HxwgUir3HP4EsggP/QNo0", - label.TraefikFrontendAuthBasicRemoveHeader: "true", - label.TraefikFrontendAuthBasicUsers: "test:$apr1$H6uskkkW$IgXLP6ewTrSuBkTrqE8wj/,test2:$apr1$d9hr9HBB$4HxwgUir3HP4EsggP/QNo0", - label.TraefikFrontendAuthBasicUsersFile: ".htpasswd", - label.TraefikFrontendAuthDigestRemoveHeader: "true", - label.TraefikFrontendAuthDigestUsers: "test:$apr1$H6uskkkW$IgXLP6ewTrSuBkTrqE8wj/,test2:$apr1$d9hr9HBB$4HxwgUir3HP4EsggP/QNo0", - label.TraefikFrontendAuthDigestUsersFile: ".htpasswd", - label.TraefikFrontendAuthForwardAddress: "auth.server", - label.TraefikFrontendAuthForwardTrustForwardHeader: "true", - label.TraefikFrontendAuthForwardTLSCa: "ca.crt", - label.TraefikFrontendAuthForwardTLSCaOptional: "true", - label.TraefikFrontendAuthForwardTLSCert: "server.crt", - label.TraefikFrontendAuthForwardTLSKey: "server.key", - label.TraefikFrontendAuthForwardTLSInsecureSkipVerify: "true", - label.TraefikFrontendAuthHeaderField: "X-WebAuth-User", - - label.TraefikFrontendEntryPoints: "http,https", - label.TraefikFrontendPassHostHeader: "true", - label.TraefikFrontendPassTLSCert: "true", - label.TraefikFrontendPriority: "666", - label.TraefikFrontendRedirectEntryPoint: "https", - label.TraefikFrontendRedirectRegex: "nope", - label.TraefikFrontendRedirectReplacement: "nope", - label.TraefikFrontendRedirectPermanent: "true", - label.TraefikFrontendRule: "Host:traefik.io", - label.TraefikFrontendWhiteListSourceRange: "10.10.10.10", - label.TraefikFrontendWhiteListIPStrategyExcludedIPS: "10.10.10.10,10.10.10.11", - label.TraefikFrontendWhiteListIPStrategyDepth: "5", - - label.TraefikFrontendRequestHeaders: "Access-Control-Allow-Methods:POST,GET,OPTIONS || Content-type: application/json; charset=utf-8", - label.TraefikFrontendResponseHeaders: "Access-Control-Allow-Methods:POST,GET,OPTIONS || Content-type: application/json; charset=utf-8", - label.TraefikFrontendSSLProxyHeaders: "Access-Control-Allow-Methods:POST,GET,OPTIONS || Content-type: application/json; charset=utf-8", - label.TraefikFrontendAllowedHosts: "foo,bar,bor", - label.TraefikFrontendHostsProxyHeaders: "foo,bar,bor", - label.TraefikFrontendSSLHost: "foo", - label.TraefikFrontendCustomFrameOptionsValue: "foo", - label.TraefikFrontendContentSecurityPolicy: "foo", - label.TraefikFrontendPublicKey: "foo", - label.TraefikFrontendReferrerPolicy: "foo", - label.TraefikFrontendCustomBrowserXSSValue: "foo", - label.TraefikFrontendSTSSeconds: "666", - label.TraefikFrontendSSLForceHost: "true", - label.TraefikFrontendSSLRedirect: "true", - label.TraefikFrontendSSLTemporaryRedirect: "true", - label.TraefikFrontendSTSIncludeSubdomains: "true", - label.TraefikFrontendSTSPreload: "true", - label.TraefikFrontendForceSTSHeader: "true", - label.TraefikFrontendFrameDeny: "true", - label.TraefikFrontendContentTypeNosniff: "true", - label.TraefikFrontendBrowserXSSFilter: "true", - label.TraefikFrontendIsDevelopment: "true", - - label.Prefix + label.BaseFrontendErrorPage + "foo." + label.SuffixErrorPageStatus: "404", - label.Prefix + label.BaseFrontendErrorPage + "foo." + label.SuffixErrorPageBackend: "foobar", - label.Prefix + label.BaseFrontendErrorPage + "foo." + label.SuffixErrorPageQuery: "foo_query", - label.Prefix + label.BaseFrontendErrorPage + "bar." + label.SuffixErrorPageStatus: "500,600", - label.Prefix + label.BaseFrontendErrorPage + "bar." + label.SuffixErrorPageBackend: "foobar", - label.Prefix + label.BaseFrontendErrorPage + "bar." + label.SuffixErrorPageQuery: "bar_query", - - label.TraefikFrontendRateLimitExtractorFunc: "client.ip", - label.Prefix + label.BaseFrontendRateLimit + "foo." + label.SuffixRateLimitPeriod: "6", - label.Prefix + label.BaseFrontendRateLimit + "foo." + label.SuffixRateLimitAverage: "12", - label.Prefix + label.BaseFrontendRateLimit + "foo." + label.SuffixRateLimitBurst: "18", - label.Prefix + label.BaseFrontendRateLimit + "bar." + label.SuffixRateLimitPeriod: "3", - label.Prefix + label.BaseFrontendRateLimit + "bar." + label.SuffixRateLimitAverage: "6", - label.Prefix + label.BaseFrontendRateLimit + "bar." + label.SuffixRateLimitBurst: "9", - }, - Health: "healthy", - Containers: []string{"10.0.0.1", "10.0.0.2"}, - }, - }, - expectedFrontends: map[string]*types.Frontend{ - "frontend-Host-traefik-io": { - EntryPoints: []string{ - "http", - "https", - }, - Backend: "backend-foobar", - Routes: map[string]types.Route{ - "route-frontend-Host-traefik-io": { - Rule: "Host:traefik.io", - }, - }, - PassHostHeader: true, - PassTLSCert: true, - Priority: 666, - PassTLSClientCert: &types.TLSClientHeaders{ - PEM: true, - Infos: &types.TLSClientCertificateInfos{ - NotBefore: true, - Sans: true, - NotAfter: true, - Subject: &types.TLSCLientCertificateDNInfos{ - CommonName: true, - Country: true, - DomainComponent: true, - Locality: true, - Organization: true, - Province: true, - SerialNumber: true, - }, - Issuer: &types.TLSCLientCertificateDNInfos{ - CommonName: true, - Country: true, - DomainComponent: true, - Locality: true, - Organization: true, - Province: true, - SerialNumber: true, - }, - }, - }, - Auth: &types.Auth{ - HeaderField: "X-WebAuth-User", - Basic: &types.Basic{ - RemoveHeader: true, - Users: []string{"test:$apr1$H6uskkkW$IgXLP6ewTrSuBkTrqE8wj/", - "test2:$apr1$d9hr9HBB$4HxwgUir3HP4EsggP/QNo0"}, - UsersFile: ".htpasswd", - }, - }, - WhiteList: &types.WhiteList{ - SourceRange: []string{ - "10.10.10.10", - }, - IPStrategy: &types.IPStrategy{ - Depth: 5, - ExcludedIPs: []string{"10.10.10.10", "10.10.10.11"}, - }, - }, - Headers: &types.Headers{ - CustomRequestHeaders: map[string]string{ - "Access-Control-Allow-Methods": "POST,GET,OPTIONS", - "Content-Type": "application/json; charset=utf-8", - }, - CustomResponseHeaders: map[string]string{ - "Access-Control-Allow-Methods": "POST,GET,OPTIONS", - "Content-Type": "application/json; charset=utf-8", - }, - AllowedHosts: []string{ - "foo", - "bar", - "bor", - }, - HostsProxyHeaders: []string{ - "foo", - "bar", - "bor", - }, - SSLRedirect: true, - SSLTemporaryRedirect: true, - SSLForceHost: true, - SSLHost: "foo", - SSLProxyHeaders: map[string]string{ - "Access-Control-Allow-Methods": "POST,GET,OPTIONS", - "Content-Type": "application/json; charset=utf-8", - }, - STSSeconds: 666, - STSIncludeSubdomains: true, - STSPreload: true, - ForceSTSHeader: true, - FrameDeny: true, - CustomFrameOptionsValue: "foo", - ContentTypeNosniff: true, - BrowserXSSFilter: true, - CustomBrowserXSSValue: "foo", - ContentSecurityPolicy: "foo", - PublicKey: "foo", - ReferrerPolicy: "foo", - IsDevelopment: true, - }, - Errors: map[string]*types.ErrorPage{ - "foo": { - Status: []string{"404"}, - Query: "foo_query", - Backend: "backend-foobar", - }, - "bar": { - Status: []string{"500", "600"}, - Query: "bar_query", - Backend: "backend-foobar", - }, - }, - RateLimit: &types.RateLimit{ - ExtractorFunc: "client.ip", - RateSet: map[string]*types.Rate{ - "foo": { - Period: parse.Duration(6 * time.Second), - Average: 12, - Burst: 18, - }, - "bar": { - Period: parse.Duration(3 * time.Second), - Average: 6, - Burst: 9, - }, - }, - }, - Redirect: &types.Redirect{ - EntryPoint: "https", - Regex: "", - Replacement: "", - Permanent: true, - }, - }, - }, - expectedBackends: map[string]*types.Backend{ - "backend-foobar": { - Servers: map[string]types.Server{ - "server-0": { - URL: "https://10.0.0.1:666", - Weight: 12, - }, - "server-1": { - URL: "https://10.0.0.2:666", - Weight: 12, - }, - }, - CircuitBreaker: &types.CircuitBreaker{ - Expression: "NetworkErrorRatio() > 0.5", - }, - ResponseForwarding: &types.ResponseForwarding{ - FlushInterval: "10ms", - }, - LoadBalancer: &types.LoadBalancer{ - Method: "drr", - Stickiness: &types.Stickiness{ - CookieName: "chocolate", - }, - }, - MaxConn: &types.MaxConn{ - Amount: 666, - ExtractorFunc: "client.ip", - }, - HealthCheck: &types.HealthCheck{ - Scheme: "http", - Path: "/health", - Port: 880, - Interval: "6", - Timeout: "3", - Hostname: "foo.com", - Headers: map[string]string{ - "Foo": "bar", - "Bar": "foo", - }, - }, - Buffering: &types.Buffering{ - MaxResponseBodyBytes: 10485760, - MemResponseBodyBytes: 2097152, - MaxRequestBodyBytes: 10485760, - MemRequestBodyBytes: 2097152, - RetryExpression: "IsNetworkError() && Attempts() <= 2", - }, - }, - }, - }, - { - desc: "when all segment labels are set", - services: []rancherData{ - { - Labels: map[string]string{ - label.Prefix + "sauternes." + label.SuffixPort: "666", - label.Prefix + "sauternes." + label.SuffixProtocol: "https", - label.Prefix + "sauternes." + label.SuffixWeight: "12", - - label.Prefix + "sauternes." + label.SuffixFrontendPassTLSClientCertInfosIssuerCommonName: "true", - label.Prefix + "sauternes." + label.SuffixFrontendPassTLSClientCertInfosIssuerCountry: "true", - label.Prefix + "sauternes." + label.SuffixFrontendPassTLSClientCertInfosIssuerDomainComponent: "true", - label.Prefix + "sauternes." + label.SuffixFrontendPassTLSClientCertInfosIssuerLocality: "true", - label.Prefix + "sauternes." + label.SuffixFrontendPassTLSClientCertInfosIssuerOrganization: "true", - label.Prefix + "sauternes." + label.SuffixFrontendPassTLSClientCertInfosIssuerProvince: "true", - label.Prefix + "sauternes." + label.SuffixFrontendPassTLSClientCertInfosIssuerSerialNumber: "true", - label.Prefix + "sauternes." + label.SuffixFrontendPassTLSClientCertPem: "true", - label.Prefix + "sauternes." + label.SuffixFrontendPassTLSClientCertInfosNotAfter: "true", - label.Prefix + "sauternes." + label.SuffixFrontendPassTLSClientCertInfosNotBefore: "true", - label.Prefix + "sauternes." + label.SuffixFrontendPassTLSClientCertInfosSans: "true", - label.Prefix + "sauternes." + label.SuffixFrontendPassTLSClientCertInfosSubjectCommonName: "true", - label.Prefix + "sauternes." + label.SuffixFrontendPassTLSClientCertInfosSubjectCountry: "true", - label.Prefix + "sauternes." + label.SuffixFrontendPassTLSClientCertInfosSubjectDomainComponent: "true", - label.Prefix + "sauternes." + label.SuffixFrontendPassTLSClientCertInfosSubjectLocality: "true", - label.Prefix + "sauternes." + label.SuffixFrontendPassTLSClientCertInfosSubjectOrganization: "true", - label.Prefix + "sauternes." + label.SuffixFrontendPassTLSClientCertInfosSubjectProvince: "true", - label.Prefix + "sauternes." + label.SuffixFrontendPassTLSClientCertInfosSubjectSerialNumber: "true", - - label.Prefix + "sauternes." + label.SuffixFrontendRule: "Host:traefik.wtf", - label.Prefix + "sauternes." + label.SuffixFrontendAuthBasicRemoveHeader: "true", - label.Prefix + "sauternes." + label.SuffixFrontendAuthBasicUsers: "test:$apr1$H6uskkkW$IgXLP6ewTrSuBkTrqE8wj/,test2:$apr1$d9hr9HBB$4HxwgUir3HP4EsggP/QNo0", - label.Prefix + "sauternes." + label.SuffixFrontendAuthBasicUsersFile: ".htpasswd", - label.Prefix + "sauternes." + label.SuffixFrontendAuthDigestRemoveHeader: "true", - label.Prefix + "sauternes." + label.SuffixFrontendAuthDigestUsers: "test:$apr1$H6uskkkW$IgXLP6ewTrSuBkTrqE8wj/,test2:$apr1$d9hr9HBB$4HxwgUir3HP4EsggP/QNo0", - label.Prefix + "sauternes." + label.SuffixFrontendAuthDigestUsersFile: ".htpasswd", - label.Prefix + "sauternes." + label.SuffixFrontendAuthForwardAddress: "auth.server", - label.Prefix + "sauternes." + label.SuffixFrontendAuthForwardTrustForwardHeader: "true", - label.Prefix + "sauternes." + label.SuffixFrontendAuthForwardTLSCa: "ca.crt", - label.Prefix + "sauternes." + label.SuffixFrontendAuthForwardTLSCaOptional: "true", - label.Prefix + "sauternes." + label.SuffixFrontendAuthForwardTLSCert: "server.crt", - label.Prefix + "sauternes." + label.SuffixFrontendAuthForwardTLSKey: "server.key", - label.Prefix + "sauternes." + label.SuffixFrontendAuthForwardTLSInsecureSkipVerify: "true", - label.Prefix + "sauternes." + label.SuffixFrontendAuthHeaderField: "X-WebAuth-User", - - label.Prefix + "sauternes." + label.SuffixFrontendEntryPoints: "http,https", - label.Prefix + "sauternes." + label.SuffixFrontendPassHostHeader: "true", - label.Prefix + "sauternes." + label.SuffixFrontendPassTLSCert: "true", - label.Prefix + "sauternes." + label.SuffixFrontendPriority: "666", - label.Prefix + "sauternes." + label.SuffixFrontendRedirectEntryPoint: "https", - label.Prefix + "sauternes." + label.SuffixFrontendRedirectRegex: "nope", - label.Prefix + "sauternes." + label.SuffixFrontendRedirectReplacement: "nope", - label.Prefix + "sauternes." + label.SuffixFrontendRedirectPermanent: "true", - label.Prefix + "sauternes." + label.SuffixFrontendWhiteListSourceRange: "10.10.10.10", - - label.Prefix + "sauternes." + label.SuffixFrontendRequestHeaders: "Access-Control-Allow-Methods:POST,GET,OPTIONS || Content-type: application/json; charset=utf-8", - label.Prefix + "sauternes." + label.SuffixFrontendResponseHeaders: "Access-Control-Allow-Methods:POST,GET,OPTIONS || Content-type: application/json; charset=utf-8", - label.Prefix + "sauternes." + label.SuffixFrontendHeadersSSLProxyHeaders: "Access-Control-Allow-Methods:POST,GET,OPTIONS || Content-type: application/json; charset=utf-8", - label.Prefix + "sauternes." + label.SuffixFrontendHeadersAllowedHosts: "foo,bar,bor", - label.Prefix + "sauternes." + label.SuffixFrontendHeadersHostsProxyHeaders: "foo,bar,bor", - label.Prefix + "sauternes." + label.SuffixFrontendHeadersSSLHost: "foo", - label.Prefix + "sauternes." + label.SuffixFrontendHeadersCustomFrameOptionsValue: "foo", - label.Prefix + "sauternes." + label.SuffixFrontendHeadersContentSecurityPolicy: "foo", - label.Prefix + "sauternes." + label.SuffixFrontendHeadersPublicKey: "foo", - label.Prefix + "sauternes." + label.SuffixFrontendHeadersReferrerPolicy: "foo", - label.Prefix + "sauternes." + label.SuffixFrontendHeadersCustomBrowserXSSValue: "foo", - label.Prefix + "sauternes." + label.SuffixFrontendHeadersSTSSeconds: "666", - label.Prefix + "sauternes." + label.SuffixFrontendHeadersSSLForceHost: "true", - label.Prefix + "sauternes." + label.SuffixFrontendHeadersSSLRedirect: "true", - label.Prefix + "sauternes." + label.SuffixFrontendHeadersSSLTemporaryRedirect: "true", - label.Prefix + "sauternes." + label.SuffixFrontendHeadersSTSIncludeSubdomains: "true", - label.Prefix + "sauternes." + label.SuffixFrontendHeadersSTSPreload: "true", - label.Prefix + "sauternes." + label.SuffixFrontendHeadersForceSTSHeader: "true", - label.Prefix + "sauternes." + label.SuffixFrontendHeadersFrameDeny: "true", - label.Prefix + "sauternes." + label.SuffixFrontendHeadersContentTypeNosniff: "true", - label.Prefix + "sauternes." + label.SuffixFrontendHeadersBrowserXSSFilter: "true", - label.Prefix + "sauternes." + label.SuffixFrontendHeadersIsDevelopment: "true", - - label.Prefix + "sauternes." + label.BaseFrontendErrorPage + "foo." + label.SuffixErrorPageStatus: "404", - label.Prefix + "sauternes." + label.BaseFrontendErrorPage + "foo." + label.SuffixErrorPageBackend: "foobar", - label.Prefix + "sauternes." + label.BaseFrontendErrorPage + "foo." + label.SuffixErrorPageQuery: "foo_query", - label.Prefix + "sauternes." + label.BaseFrontendErrorPage + "bar." + label.SuffixErrorPageStatus: "500,600", - label.Prefix + "sauternes." + label.BaseFrontendErrorPage + "bar." + label.SuffixErrorPageBackend: "foobar", - label.Prefix + "sauternes." + label.BaseFrontendErrorPage + "bar." + label.SuffixErrorPageQuery: "bar_query", - - label.Prefix + "sauternes." + label.SuffixFrontendRateLimitExtractorFunc: "client.ip", - label.Prefix + "sauternes." + label.BaseFrontendRateLimit + "foo." + label.SuffixRateLimitPeriod: "6", - label.Prefix + "sauternes." + label.BaseFrontendRateLimit + "foo." + label.SuffixRateLimitAverage: "12", - label.Prefix + "sauternes." + label.BaseFrontendRateLimit + "foo." + label.SuffixRateLimitBurst: "18", - label.Prefix + "sauternes." + label.BaseFrontendRateLimit + "bar." + label.SuffixRateLimitPeriod: "3", - label.Prefix + "sauternes." + label.BaseFrontendRateLimit + "bar." + label.SuffixRateLimitAverage: "6", - label.Prefix + "sauternes." + label.BaseFrontendRateLimit + "bar." + label.SuffixRateLimitBurst: "9", - }, - Health: "healthy", - Containers: []string{"10.0.0.1", "10.0.0.2"}, - }, - }, - expectedFrontends: map[string]*types.Frontend{ - "frontend-sauternes": { - EntryPoints: []string{"http", "https"}, - Backend: "backend-sauternes", - Routes: map[string]types.Route{ - "route-frontend-sauternes": { - Rule: "Host:traefik.wtf", - }, - }, - PassHostHeader: true, - PassTLSCert: true, - Priority: 666, - PassTLSClientCert: &types.TLSClientHeaders{ - PEM: true, - Infos: &types.TLSClientCertificateInfos{ - NotBefore: true, - Sans: true, - NotAfter: true, - Subject: &types.TLSCLientCertificateDNInfos{ - CommonName: true, - Country: true, - DomainComponent: true, - Locality: true, - Organization: true, - Province: true, - SerialNumber: true, - }, - Issuer: &types.TLSCLientCertificateDNInfos{ - CommonName: true, - Country: true, - DomainComponent: true, - Locality: true, - Organization: true, - Province: true, - SerialNumber: true, - }, - }, - }, - Auth: &types.Auth{ - HeaderField: "X-WebAuth-User", - Basic: &types.Basic{ - RemoveHeader: true, - Users: []string{"test:$apr1$H6uskkkW$IgXLP6ewTrSuBkTrqE8wj/", - "test2:$apr1$d9hr9HBB$4HxwgUir3HP4EsggP/QNo0"}, - UsersFile: ".htpasswd", - }, - }, - WhiteList: &types.WhiteList{ - SourceRange: []string{ - "10.10.10.10", - }, - }, - Headers: &types.Headers{ - CustomRequestHeaders: map[string]string{ - "Access-Control-Allow-Methods": "POST,GET,OPTIONS", - "Content-Type": "application/json; charset=utf-8", - }, - CustomResponseHeaders: map[string]string{ - "Access-Control-Allow-Methods": "POST,GET,OPTIONS", - "Content-Type": "application/json; charset=utf-8", - }, - AllowedHosts: []string{"foo", "bar", "bor"}, - HostsProxyHeaders: []string{"foo", "bar", "bor"}, - SSLRedirect: true, - SSLTemporaryRedirect: true, - SSLForceHost: true, - SSLHost: "foo", - SSLProxyHeaders: map[string]string{ - "Access-Control-Allow-Methods": "POST,GET,OPTIONS", - "Content-Type": "application/json; charset=utf-8", - }, - STSSeconds: 666, - STSIncludeSubdomains: true, - STSPreload: true, - ForceSTSHeader: true, - FrameDeny: true, - CustomFrameOptionsValue: "foo", - ContentTypeNosniff: true, - BrowserXSSFilter: true, - CustomBrowserXSSValue: "foo", - ContentSecurityPolicy: "foo", - PublicKey: "foo", - ReferrerPolicy: "foo", - IsDevelopment: true, - }, - Errors: map[string]*types.ErrorPage{ - "bar": { - Status: []string{"500", "600"}, - Backend: "backend-foobar", - Query: "bar_query", - }, - "foo": { - Status: []string{"404"}, - Backend: "backend-foobar", - Query: "foo_query", - }, - }, - RateLimit: &types.RateLimit{ - ExtractorFunc: "client.ip", - RateSet: map[string]*types.Rate{ - "foo": { - Period: parse.Duration(6 * time.Second), - Average: 12, - Burst: 18, - }, - "bar": { - Period: parse.Duration(3 * time.Second), - Average: 6, - Burst: 9, - }, - }, - }, - Redirect: &types.Redirect{ - EntryPoint: "https", - Regex: "", - Replacement: "", - Permanent: true, - }, - }, - }, - expectedBackends: map[string]*types.Backend{ - "backend-sauternes": { - Servers: map[string]types.Server{ - "server-0": { - URL: "https://10.0.0.1:666", - Weight: 12, - }, - "server-1": { - URL: "https://10.0.0.2:666", - Weight: 12, - }, - }, - }, - }, - }, - { - desc: "with services", - services: []rancherData{ - { - Name: "test/service", - Labels: map[string]string{ - label.TraefikPort: "80", - label.TraefikFrontendAuthBasicUsers: "test:$apr1$H6uskkkW$IgXLP6ewTrSuBkTrqE8wj/,test2:$apr1$d9hr9HBB$4HxwgUir3HP4EsggP/QNo0", - label.TraefikFrontendAuthBasicUsersFile: ".htpasswd", - label.TraefikFrontendRedirectEntryPoint: "https", - }, - Health: "healthy", - Containers: []string{"127.0.0.1"}, - }, - }, - expectedFrontends: map[string]*types.Frontend{ - "frontend-Host-test-service-rancher-localhost": { - Backend: "backend-test-service", - PassHostHeader: true, - EntryPoints: []string{}, - Auth: &types.Auth{ - Basic: &types.Basic{ - Users: []string{"test:$apr1$H6uskkkW$IgXLP6ewTrSuBkTrqE8wj/", - "test2:$apr1$d9hr9HBB$4HxwgUir3HP4EsggP/QNo0"}, - UsersFile: ".htpasswd", - }, - }, - Priority: 0, - Redirect: &types.Redirect{ - EntryPoint: "https", - }, - Routes: map[string]types.Route{ - "route-frontend-Host-test-service-rancher-localhost": { - Rule: "Host:test.service.rancher.localhost", - }, - }, - }, - }, - expectedBackends: map[string]*types.Backend{ - "backend-test-service": { - Servers: map[string]types.Server{ - "server-0": { - URL: "http://127.0.0.1:80", - Weight: label.DefaultWeight, - }, - }, - CircuitBreaker: nil, - }, - }, - }, - { - desc: "with basic auth backward compatibility", - services: []rancherData{ - { - Name: "test/service", - Labels: map[string]string{ - label.TraefikPort: "80", - label.TraefikFrontendAuthBasic: "test:$apr1$H6uskkkW$IgXLP6ewTrSuBkTrqE8wj/,test2:$apr1$d9hr9HBB$4HxwgUir3HP4EsggP/QNo0", - }, - Health: "healthy", - Containers: []string{"127.0.0.1"}, - }, - }, - expectedFrontends: map[string]*types.Frontend{ - "frontend-Host-test-service-rancher-localhost": { - Backend: "backend-test-service", - PassHostHeader: true, - EntryPoints: []string{}, - Auth: &types.Auth{ - Basic: &types.Basic{ - Users: []string{"test:$apr1$H6uskkkW$IgXLP6ewTrSuBkTrqE8wj/", - "test2:$apr1$d9hr9HBB$4HxwgUir3HP4EsggP/QNo0"}, - }, - }, - Priority: 0, - Routes: map[string]types.Route{ - "route-frontend-Host-test-service-rancher-localhost": { - Rule: "Host:test.service.rancher.localhost", - }, - }, - }, - }, - expectedBackends: map[string]*types.Backend{ - "backend-test-service": { - Servers: map[string]types.Server{ - "server-0": { - URL: "http://127.0.0.1:80", - Weight: label.DefaultWeight, - }, - }, - CircuitBreaker: nil, - }, - }, - }, - { - desc: "with digest auth", - services: []rancherData{ - { - Name: "test/service", - Labels: map[string]string{ - label.TraefikPort: "80", - label.TraefikFrontendAuthDigestUsers: "test:$apr1$H6uskkkW$IgXLP6ewTrSuBkTrqE8wj/,test2:$apr1$d9hr9HBB$4HxwgUir3HP4EsggP/QNo0", - label.TraefikFrontendAuthDigestUsersFile: ".htpasswd", - label.TraefikFrontendAuthDigestRemoveHeader: "true", - }, - Health: "healthy", - Containers: []string{"127.0.0.1"}, - }, - }, - expectedFrontends: map[string]*types.Frontend{ - "frontend-Host-test-service-rancher-localhost": { - Backend: "backend-test-service", - PassHostHeader: true, - EntryPoints: []string{}, - Auth: &types.Auth{ - Digest: &types.Digest{ - RemoveHeader: true, - Users: []string{"test:$apr1$H6uskkkW$IgXLP6ewTrSuBkTrqE8wj/", - "test2:$apr1$d9hr9HBB$4HxwgUir3HP4EsggP/QNo0"}, - UsersFile: ".htpasswd", - }, - }, - Priority: 0, - Routes: map[string]types.Route{ - "route-frontend-Host-test-service-rancher-localhost": { - Rule: "Host:test.service.rancher.localhost", - }, - }, - }, - }, - expectedBackends: map[string]*types.Backend{ - "backend-test-service": { - Servers: map[string]types.Server{ - "server-0": { - URL: "http://127.0.0.1:80", - Weight: label.DefaultWeight, - }, - }, - CircuitBreaker: nil, - }, - }, - }, - { - desc: "with forward auth", - services: []rancherData{ - { - Name: "test/service", - Labels: map[string]string{ - label.TraefikPort: "80", - label.TraefikFrontendAuthForwardAddress: "auth.server", - label.TraefikFrontendAuthForwardTrustForwardHeader: "true", - label.TraefikFrontendAuthForwardTLSCa: "ca.crt", - label.TraefikFrontendAuthForwardTLSCaOptional: "true", - label.TraefikFrontendAuthForwardTLSCert: "server.crt", - label.TraefikFrontendAuthForwardTLSKey: "server.key", - label.TraefikFrontendAuthForwardTLSInsecureSkipVerify: "true", - label.TraefikFrontendAuthHeaderField: "X-WebAuth-User", - label.TraefikFrontendAuthForwardAuthResponseHeaders: "X-Auth-User,X-Auth-Token", - }, - Health: "healthy", - Containers: []string{"127.0.0.1"}, - }, - }, - expectedFrontends: map[string]*types.Frontend{ - "frontend-Host-test-service-rancher-localhost": { - Backend: "backend-test-service", - PassHostHeader: true, - EntryPoints: []string{}, - Auth: &types.Auth{ - HeaderField: "X-WebAuth-User", - Forward: &types.Forward{ - Address: "auth.server", - TLS: &types.ClientTLS{ - CA: "ca.crt", - CAOptional: true, - InsecureSkipVerify: true, - Cert: "server.crt", - Key: "server.key", - }, - TrustForwardHeader: true, - AuthResponseHeaders: []string{"X-Auth-User", "X-Auth-Token"}, - }, - }, - Priority: 0, - Routes: map[string]types.Route{ - "route-frontend-Host-test-service-rancher-localhost": { - Rule: "Host:test.service.rancher.localhost", - }, - }, - }, - }, - expectedBackends: map[string]*types.Backend{ - "backend-test-service": { - Servers: map[string]types.Server{ - "server-0": { - URL: "http://127.0.0.1:80", - Weight: label.DefaultWeight, - }, - }, - CircuitBreaker: nil, - }, - }, - }, - } - - for _, test := range testCases { - test := test - t.Run(test.desc, func(t *testing.T) { - t.Parallel() - - actualConfig := provider.buildConfiguration(test.services) - require.NotNil(t, actualConfig) - - assert.EqualValues(t, test.expectedBackends, actualConfig.Backends) - assert.EqualValues(t, test.expectedFrontends, actualConfig.Frontends) - }) - } -} - -func TestProviderServiceFilter(t *testing.T) { - provider := &Provider{ - Domain: "rancher.localhost", - EnableServiceHealthFilter: true, - } - - constraint, _ := types.NewConstraint("tag==ch*se") - provider.Constraints = types.Constraints{constraint} - - testCases := []struct { - desc string - service rancherData - expected bool - }{ - { - desc: "missing Port labels, don't respect constraint", - service: rancherData{ - Labels: map[string]string{ - label.TraefikEnable: "true", - }, - Health: "healthy", - State: "active", - }, - expected: false, - }, - { - desc: "don't respect constraint", - service: rancherData{ - Labels: map[string]string{ - label.TraefikPort: "80", - label.TraefikEnable: "false", - }, - Health: "healthy", - State: "active", - }, - expected: false, - }, - { - desc: "unhealthy", - service: rancherData{ - Labels: map[string]string{ - label.TraefikTags: "cheese", - label.TraefikPort: "80", - label.TraefikEnable: "true", - }, - Health: "unhealthy", - State: "active", - }, - expected: false, - }, - { - desc: "inactive", - service: rancherData{ - Labels: map[string]string{ - label.TraefikTags: "not-cheesy", - label.TraefikPort: "80", - label.TraefikEnable: "true", - }, - Health: "healthy", - State: "inactive", - }, - expected: false, - }, - { - desc: "healthy & active, tag: cheese", - service: rancherData{ - Labels: map[string]string{ - label.TraefikTags: "cheese", - label.TraefikPort: "80", - label.TraefikEnable: "true", - }, - Health: "healthy", - State: "active", - }, - expected: true, - }, - { - desc: "healthy & active, tag: chose", - service: rancherData{ - Labels: map[string]string{ - label.TraefikTags: "chose", - label.TraefikPort: "80", - label.TraefikEnable: "true", - }, - Health: "healthy", - State: "active", - }, - expected: true, - }, - { - desc: "healthy & upgraded", - service: rancherData{ - Labels: map[string]string{ - label.TraefikTags: "cheeeeese", - label.TraefikPort: "80", - label.TraefikEnable: "true", - }, - Health: "healthy", - State: "upgraded", - }, - expected: true, - }, - } - - for _, test := range testCases { - test := test - t.Run(test.desc, func(t *testing.T) { - t.Parallel() - - actual := provider.serviceFilter(test.service) - assert.Equal(t, test.expected, actual) - }) - } -} - -func TestContainerFilter(t *testing.T) { - testCases := []struct { - name string - healthState string - state string - expected bool - }{ - { - healthState: "unhealthy", - state: "running", - expected: false, - }, - { - healthState: "healthy", - state: "stopped", - expected: false, - }, - { - state: "stopped", - expected: false, - }, - { - healthState: "healthy", - state: "running", - expected: true, - }, - { - healthState: "updating-healthy", - state: "updating-running", - expected: true, - }, - } - - for _, test := range testCases { - test := test - t.Run(test.healthState+" "+test.state, func(t *testing.T) { - t.Parallel() - - actual := containerFilter(test.name, test.healthState, test.state) - assert.Equal(t, test.expected, actual) - }) - } -} - -func TestProviderGetFrontendName(t *testing.T) { - provider := &Provider{Domain: "rancher.localhost"} - - testCases := []struct { - desc string - service rancherData - expected string - }{ - { - desc: "default", - service: rancherData{ - Name: "foo", - }, - expected: "Host-foo-rancher-localhost", - }, - { - desc: "with Headers label", - service: rancherData{ - Name: "test-service", - Labels: map[string]string{ - label.TraefikFrontendRule: "Headers:User-Agent,bat/0.1.0", - }, - }, - expected: "Headers-User-Agent-bat-0-1-0", - }, - { - desc: "with Host label", - service: rancherData{ - Name: "test-service", - Labels: map[string]string{ - label.TraefikFrontendRule: "Host:foo.bar", - }, - }, - expected: "Host-foo-bar", - }, - { - desc: "with Path label", - service: rancherData{ - Name: "test-service", - Labels: map[string]string{ - label.TraefikFrontendRule: "Path:/test", - }, - }, - expected: "Path-test", - }, - { - desc: "with PathPrefix label", - service: rancherData{ - Name: "test-service", - Labels: map[string]string{ - label.TraefikFrontendRule: "PathPrefix:/test2", - }, - }, - expected: "PathPrefix-test2", - }, - } - - for _, test := range testCases { - test := test - t.Run(test.desc, func(t *testing.T) { - t.Parallel() - - segmentProperties := label.ExtractTraefikLabels(test.service.Labels) - test.service.SegmentLabels = segmentProperties[""] - - actual := provider.getFrontendName(test.service) - assert.Equal(t, test.expected, actual) - }) - } -} - -func TestProviderGetFrontendRule(t *testing.T) { - provider := &Provider{Domain: "rancher.localhost"} - - testCases := []struct { - desc string - service rancherData - expected string - }{ - { - desc: "host", - service: rancherData{ - Name: "foo", - }, - expected: "Host:foo.rancher.localhost", - }, - { - desc: "with domain label", - service: rancherData{ - Name: "test-service", - Labels: map[string]string{ - label.TraefikDomain: "traefik.localhost", - }, - }, - expected: "Host:test-service.traefik.localhost", - }, - { - desc: "host with /", - service: rancherData{ - Name: "foo/bar", - }, - expected: "Host:foo.bar.rancher.localhost", - }, - { - desc: "with Host label", - service: rancherData{ - Name: "test-service", - Labels: map[string]string{ - label.TraefikFrontendRule: "Host:foo.bar.com", - }, - }, - expected: "Host:foo.bar.com", - }, - } - - for _, test := range testCases { - test := test - t.Run(test.desc, func(t *testing.T) { - t.Parallel() - - segmentProperties := label.ExtractTraefikLabels(test.service.Labels) - test.service.SegmentLabels = segmentProperties[""] - - actual := provider.getFrontendRule(test.service.Name, test.service.SegmentLabels) - assert.Equal(t, test.expected, actual) - }) - } -} - -func TestGetBackendName(t *testing.T) { - testCases := []struct { - desc string - service rancherData - expected string - }{ - { - desc: "without label", - service: rancherData{ - Name: "test-service", - }, - expected: "test-service", - }, - { - desc: "with label", - service: rancherData{ - Name: "test-service", - Labels: map[string]string{ - label.TraefikBackend: "foobar", - }, - }, - - expected: "foobar", - }, - } - - for _, test := range testCases { - test := test - t.Run(test.desc, func(t *testing.T) { - t.Parallel() - - segmentProperties := label.ExtractTraefikLabels(test.service.Labels) - test.service.SegmentLabels = segmentProperties[""] - - actual := getBackendName(test.service) - assert.Equal(t, test.expected, actual) - }) - } -} - -func TestGetServers(t *testing.T) { - testCases := []struct { - desc string - service rancherData - expected map[string]types.Server - }{ - { - desc: "should return nil when no server labels", - service: rancherData{ - Labels: map[string]string{}, - Health: "healthy", - State: "active", - }, - expected: nil, - }, - { - desc: "should return nil when no server IPs", - service: rancherData{ - Labels: map[string]string{ - label.TraefikWeight: "7", - }, - Containers: []string{}, - Health: "healthy", - State: "active", - }, - expected: nil, - }, - { - desc: "should return nil when no server IPs", - service: rancherData{ - Labels: map[string]string{ - label.TraefikWeight: "7", - }, - Containers: []string{""}, - Health: "healthy", - State: "active", - }, - expected: nil, - }, - { - desc: "should use default weight when invalid weight value", - service: rancherData{ - Labels: map[string]string{ - label.TraefikWeight: "kls", - }, - Containers: []string{"10.10.10.0"}, - Health: "healthy", - State: "active", - }, - expected: map[string]types.Server{ - "server-0": { - URL: "http://10.10.10.0:", - Weight: label.DefaultWeight, - }, - }, - }, - { - desc: "should return a map when configuration keys are defined", - service: rancherData{ - Labels: map[string]string{ - label.TraefikWeight: "6", - }, - Containers: []string{"10.10.10.0", "10.10.10.1"}, - Health: "healthy", - State: "active", - }, - expected: map[string]types.Server{ - "server-0": { - URL: "http://10.10.10.0:", - Weight: 6, - }, - "server-1": { - URL: "http://10.10.10.1:", - Weight: 6, - }, - }, - }, - } - - for _, test := range testCases { - test := test - t.Run(test.desc, func(t *testing.T) { - t.Parallel() - - segmentProperties := label.ExtractTraefikLabels(test.service.Labels) - test.service.SegmentLabels = segmentProperties[""] - - actual := getServers(test.service) - assert.Equal(t, test.expected, actual) - }) - } -} diff --git a/old/provider/rancher/metadata.go b/old/provider/rancher/metadata.go deleted file mode 100644 index 26404a07c..000000000 --- a/old/provider/rancher/metadata.go +++ /dev/null @@ -1,138 +0,0 @@ -package rancher - -import ( - "context" - "fmt" - "time" - - "github.com/cenkalti/backoff" - "github.com/containous/traefik/old/log" - "github.com/containous/traefik/old/types" - "github.com/containous/traefik/pkg/job" - "github.com/containous/traefik/pkg/safe" - "github.com/sirupsen/logrus" - - rancher "github.com/rancher/go-rancher-metadata/metadata" -) - -// MetadataConfiguration contains configuration properties specific to -// the Rancher metadata service provider. -type MetadataConfiguration struct { - IntervalPoll bool `description:"Poll the Rancher metadata service every 'rancher.refreshseconds' (less accurate)"` - Prefix string `description:"Prefix used for accessing the Rancher metadata service"` -} - -func (p *Provider) metadataProvide(configurationChan chan<- types.ConfigMessage, pool *safe.Pool) error { - metadataServiceURL := fmt.Sprintf("http://rancher-metadata.rancher.internal/%s", p.Metadata.Prefix) - - safe.Go(func() { - operation := func() error { - client, err := rancher.NewClientAndWait(metadataServiceURL) - if err != nil { - log.Errorf("Failed to create Rancher metadata service client: %v", err) - return err - } - - updateConfiguration := func(version string) { - log.WithField("metadata_version", version).Debugln("Refreshing configuration from Rancher metadata service") - - stacks, err := client.GetStacks() - if err != nil { - log.Errorf("Failed to query Rancher metadata service: %v", err) - return - } - - rancherData := parseMetadataSourcedRancherData(stacks) - configuration := p.buildConfiguration(rancherData) - configurationChan <- types.ConfigMessage{ - ProviderName: "rancher", - Configuration: configuration, - } - } - updateConfiguration("init") - - if p.Watch { - pool.Go(func(stop chan bool) { - switch { - case p.Metadata.IntervalPoll: - p.intervalPoll(client, updateConfiguration, stop) - default: - p.longPoll(client, updateConfiguration, stop) - } - }) - } - return nil - } - - notify := func(err error, time time.Duration) { - log.WithFields(logrus.Fields{ - "error": err, - "retry_in": time, - }).Errorln("Rancher metadata service connection error") - } - - if err := backoff.RetryNotify(operation, job.NewBackOff(backoff.NewExponentialBackOff()), notify); err != nil { - log.WithField("endpoint", metadataServiceURL).Errorln("Cannot connect to Rancher metadata service") - } - }) - - return nil -} - -func (p *Provider) intervalPoll(client rancher.Client, updateConfiguration func(string), stop chan bool) { - _, cancel := context.WithCancel(context.Background()) - defer cancel() - - ticker := time.NewTicker(time.Second * time.Duration(p.RefreshSeconds)) - defer ticker.Stop() - - var version string - for { - select { - case <-ticker.C: - newVersion, err := client.GetVersion() - if err != nil { - log.WithField("error", err).Errorln("Failed to read Rancher metadata service version") - } else if version != newVersion { - version = newVersion - updateConfiguration(version) - } - case <-stop: - return - } - } -} - -func (p *Provider) longPoll(client rancher.Client, updateConfiguration func(string), stop chan bool) { - _, cancel := context.WithCancel(context.Background()) - defer cancel() - - // Holds the connection until there is either a change in the metadata - // repository or `p.RefreshSeconds` has elapsed. Long polling should be - // favored for the most accurate configuration updates. - safe.Go(func() { - client.OnChange(p.RefreshSeconds, updateConfiguration) - }) - <-stop -} - -func parseMetadataSourcedRancherData(stacks []rancher.Stack) (rancherDataList []rancherData) { - for _, stack := range stacks { - for _, service := range stack.Services { - var containerIPAddresses []string - for _, container := range service.Containers { - if containerFilter(container.Name, container.HealthState, container.State) { - containerIPAddresses = append(containerIPAddresses, container.PrimaryIp) - } - } - - rancherDataList = append(rancherDataList, rancherData{ - Name: service.Name + "/" + stack.Name, - State: service.State, - Labels: service.Labels, - Containers: containerIPAddresses, - }) - } - } - return rancherDataList -} diff --git a/old/provider/rancher/rancher.go b/old/provider/rancher/rancher.go deleted file mode 100644 index 22fa52840..000000000 --- a/old/provider/rancher/rancher.go +++ /dev/null @@ -1,80 +0,0 @@ -package rancher - -import ( - "fmt" - - "github.com/containous/traefik/old/log" - "github.com/containous/traefik/old/provider" - "github.com/containous/traefik/old/types" - "github.com/containous/traefik/pkg/safe" -) - -const ( - // Health - healthy = "healthy" - updatingHealthy = "updating-healthy" - - // State - active = "active" - running = "running" - upgraded = "upgraded" - upgrading = "upgrading" - updatingActive = "updating-active" - updatingRunning = "updating-running" -) - -var _ provider.Provider = (*Provider)(nil) - -// Provider holds configurations of the provider. -type Provider struct { - provider.BaseProvider `mapstructure:",squash" export:"true"` - APIConfiguration `mapstructure:",squash" export:"true"` // Provide backwards compatibility - API *APIConfiguration `description:"Enable the Rancher API provider" export:"true"` - Metadata *MetadataConfiguration `description:"Enable the Rancher metadata service provider" export:"true"` - Domain string `description:"Default domain used"` - RefreshSeconds int `description:"Polling interval (in seconds)" export:"true"` - ExposedByDefault bool `description:"Expose services by default" export:"true"` - EnableServiceHealthFilter bool `description:"Filter services with unhealthy states and inactive states" export:"true"` -} - -type rancherData struct { - Name string - Labels map[string]string // List of labels set to container or service - Containers []string - Health string - State string - SegmentLabels map[string]string - SegmentName string -} - -func (r rancherData) String() string { - return fmt.Sprintf("{name:%s, labels:%v, containers: %v, health: %s, state: %s}", r.Name, r.Labels, r.Containers, r.Health, r.State) -} - -// Init the provider -func (p *Provider) Init(constraints types.Constraints) error { - return p.BaseProvider.Init(constraints) -} - -// Provide allows either the Rancher API or metadata service provider to -// seed configuration into Traefik using the given configuration channel. -func (p *Provider) Provide(configurationChan chan<- types.ConfigMessage, pool *safe.Pool) error { - if p.Metadata == nil { - return p.apiProvide(configurationChan, pool) - } - return p.metadataProvide(configurationChan, pool) -} - -func containerFilter(name, healthState, state string) bool { - if healthState != "" && healthState != healthy && healthState != updatingHealthy { - log.Debugf("Filtering container %s with healthState of %s", name, healthState) - return false - } - - if state != "" && state != running && state != updatingRunning && state != upgraded { - log.Debugf("Filtering container %s with state of %s", name, state) - return false - } - - return true -} diff --git a/old/provider/rest/rest.go b/old/provider/rest/rest.go deleted file mode 100644 index 4f3a1afb6..000000000 --- a/old/provider/rest/rest.go +++ /dev/null @@ -1,68 +0,0 @@ -package rest - -import ( - "encoding/json" - "fmt" - "io/ioutil" - "net/http" - - "github.com/containous/mux" - "github.com/containous/traefik/old/log" - "github.com/containous/traefik/old/types" - "github.com/containous/traefik/pkg/safe" - "github.com/unrolled/render" -) - -// Provider is a provider.Provider implementation that provides a Rest API -type Provider struct { - configurationChan chan<- types.ConfigMessage - EntryPoint string `description:"EntryPoint" export:"true"` -} - -var templatesRenderer = render.New(render.Options{Directory: "nowhere"}) - -// Init the provider -func (p *Provider) Init(_ types.Constraints) error { - return nil -} - -// AddRoutes add rest provider routes on a router -func (p *Provider) AddRoutes(systemRouter *mux.Router) { - systemRouter. - Methods(http.MethodPut). - Path("/api/providers/{provider}"). - HandlerFunc(func(response http.ResponseWriter, request *http.Request) { - - vars := mux.Vars(request) - // TODO: Deprecated configuration - Need to be removed in the future - if vars["provider"] != "web" && vars["provider"] != "rest" { - response.WriteHeader(http.StatusBadRequest) - fmt.Fprint(response, "Only 'rest' provider can be updated through the REST API") - return - } else if vars["provider"] == "web" { - log.Warn("The provider web is deprecated. Please use /rest instead") - } - - configuration := new(types.Configuration) - body, _ := ioutil.ReadAll(request.Body) - err := json.Unmarshal(body, configuration) - if err == nil { - // TODO: Deprecated configuration - Change to `rest` in the future - p.configurationChan <- types.ConfigMessage{ProviderName: "web", Configuration: configuration} - err := templatesRenderer.JSON(response, http.StatusOK, configuration) - if err != nil { - log.Error(err) - } - } else { - log.Errorf("Error parsing configuration %+v", err) - http.Error(response, fmt.Sprintf("%+v", err), http.StatusBadRequest) - } - }) -} - -// Provide allows the provider to provide configurations to traefik -// using the given configuration channel. -func (p *Provider) Provide(configurationChan chan<- types.ConfigMessage, pool *safe.Pool) error { - p.configurationChan = configurationChan - return nil -} diff --git a/old/provider/zk/zk.go b/old/provider/zk/zk.go deleted file mode 100644 index 4eea8676f..000000000 --- a/old/provider/zk/zk.go +++ /dev/null @@ -1,48 +0,0 @@ -package zk - -import ( - "fmt" - - "github.com/abronan/valkeyrie/store" - "github.com/abronan/valkeyrie/store/zookeeper" - "github.com/containous/traefik/old/provider" - "github.com/containous/traefik/old/provider/kv" - "github.com/containous/traefik/old/types" - "github.com/containous/traefik/pkg/safe" -) - -var _ provider.Provider = (*Provider)(nil) - -// Provider holds configurations of the provider. -type Provider struct { - kv.Provider `mapstructure:",squash" export:"true"` -} - -// Init the provider -func (p *Provider) Init(constraints types.Constraints) error { - err := p.Provider.Init(constraints) - if err != nil { - return err - } - - store, err := p.CreateStore() - if err != nil { - return fmt.Errorf("failed to Connect to KV store: %v", err) - } - - p.SetKVClient(store) - return nil -} - -// Provide allows the zk provider to Provide configurations to traefik -// using the given configuration channel. -func (p *Provider) Provide(configurationChan chan<- types.ConfigMessage, pool *safe.Pool) error { - return p.Provider.Provide(configurationChan, pool) -} - -// CreateStore creates the KV store -func (p *Provider) CreateStore() (store.Store, error) { - p.SetStoreType(store.ZK) - zookeeper.Register() - return p.Provider.CreateStore() -} diff --git a/old/tls/certificate.go b/old/tls/certificate.go deleted file mode 100644 index 7ec9060bb..000000000 --- a/old/tls/certificate.go +++ /dev/null @@ -1,244 +0,0 @@ -package tls - -import ( - "crypto/tls" - "crypto/x509" - "fmt" - "io/ioutil" - "os" - "sort" - "strings" - - "github.com/containous/traefik/pkg/log" - "github.com/containous/traefik/pkg/tls/generate" -) - -var ( - // MinVersion Map of allowed TLS minimum versions - MinVersion = map[string]uint16{ - `VersionTLS10`: tls.VersionTLS10, - `VersionTLS11`: tls.VersionTLS11, - `VersionTLS12`: tls.VersionTLS12, - } - - // CipherSuites Map of TLS CipherSuites from crypto/tls - // Available CipherSuites defined at https://golang.org/pkg/crypto/tls/#pkg-constants - CipherSuites = map[string]uint16{ - `TLS_RSA_WITH_RC4_128_SHA`: tls.TLS_RSA_WITH_RC4_128_SHA, - `TLS_RSA_WITH_3DES_EDE_CBC_SHA`: tls.TLS_RSA_WITH_3DES_EDE_CBC_SHA, - `TLS_RSA_WITH_AES_128_CBC_SHA`: tls.TLS_RSA_WITH_AES_128_CBC_SHA, - `TLS_RSA_WITH_AES_256_CBC_SHA`: tls.TLS_RSA_WITH_AES_256_CBC_SHA, - `TLS_RSA_WITH_AES_128_CBC_SHA256`: tls.TLS_RSA_WITH_AES_128_CBC_SHA256, - `TLS_RSA_WITH_AES_128_GCM_SHA256`: tls.TLS_RSA_WITH_AES_128_GCM_SHA256, - `TLS_RSA_WITH_AES_256_GCM_SHA384`: tls.TLS_RSA_WITH_AES_256_GCM_SHA384, - `TLS_ECDHE_ECDSA_WITH_RC4_128_SHA`: tls.TLS_ECDHE_ECDSA_WITH_RC4_128_SHA, - `TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA`: tls.TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA, - `TLS_ECDHE_ECDSA_WITH_AES_256_CBC_SHA`: tls.TLS_ECDHE_ECDSA_WITH_AES_256_CBC_SHA, - `TLS_ECDHE_RSA_WITH_RC4_128_SHA`: tls.TLS_ECDHE_RSA_WITH_RC4_128_SHA, - `TLS_ECDHE_RSA_WITH_3DES_EDE_CBC_SHA`: tls.TLS_ECDHE_RSA_WITH_3DES_EDE_CBC_SHA, - `TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA`: tls.TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA, - `TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA`: tls.TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA, - `TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256`: tls.TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256, - `TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256`: tls.TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256, - `TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256`: tls.TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256, - `TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256`: tls.TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256, - `TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384`: tls.TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384, - `TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384`: tls.TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384, - `TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305`: tls.TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305, - `TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305`: tls.TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305, - } -) - -// Certificate holds a SSL cert/key pair -// Certs and Key could be either a file path, or the file content itself -type Certificate struct { - CertFile FileOrContent - KeyFile FileOrContent -} - -// Certificates defines traefik certificates type -// Certs and Keys could be either a file path, or the file content itself -type Certificates []Certificate - -// FileOrContent hold a file path or content -type FileOrContent string - -func (f FileOrContent) String() string { - return string(f) -} - -// IsPath returns true if the FileOrContent is a file path, otherwise returns false -func (f FileOrContent) IsPath() bool { - _, err := os.Stat(f.String()) - return err == nil -} - -func (f FileOrContent) Read() ([]byte, error) { - var content []byte - if _, err := os.Stat(f.String()); err == nil { - content, err = ioutil.ReadFile(f.String()) - if err != nil { - return nil, err - } - } else { - content = []byte(f) - } - return content, nil -} - -// CreateTLSConfig creates a TLS config from Certificate structures -func (c *Certificates) CreateTLSConfig(entryPointName string) (*tls.Config, error) { - config := &tls.Config{} - domainsCertificates := make(map[string]map[string]*tls.Certificate) - - if c.isEmpty() { - config.Certificates = []tls.Certificate{} - - cert, err := generate.DefaultCertificate() - if err != nil { - return nil, err - } - - config.Certificates = append(config.Certificates, *cert) - } else { - for _, certificate := range *c { - err := certificate.AppendCertificates(domainsCertificates, entryPointName) - if err != nil { - log.Errorf("Unable to add a certificate to the entryPoint %q : %v", entryPointName, err) - continue - } - - for _, certDom := range domainsCertificates { - for _, cert := range certDom { - config.Certificates = append(config.Certificates, *cert) - } - } - } - } - return config, nil -} - -// isEmpty checks if the certificates list is empty -func (c *Certificates) isEmpty() bool { - if len(*c) == 0 { - return true - } - var key int - for _, cert := range *c { - if len(cert.CertFile.String()) != 0 && len(cert.KeyFile.String()) != 0 { - break - } - key++ - } - return key == len(*c) -} - -// AppendCertificates appends a Certificate to a certificates map sorted by entrypoints -func (c *Certificate) AppendCertificates(certs map[string]map[string]*tls.Certificate, ep string) error { - - certContent, err := c.CertFile.Read() - if err != nil { - return fmt.Errorf("unable to read CertFile : %v", err) - } - - keyContent, err := c.KeyFile.Read() - if err != nil { - return fmt.Errorf("unable to read KeyFile : %v", err) - } - tlsCert, err := tls.X509KeyPair(certContent, keyContent) - if err != nil { - return fmt.Errorf("unable to generate TLS certificate : %v", err) - } - - parsedCert, _ := x509.ParseCertificate(tlsCert.Certificate[0]) - - var SANs []string - if parsedCert.Subject.CommonName != "" { - SANs = append(SANs, parsedCert.Subject.CommonName) - } - if parsedCert.DNSNames != nil { - sort.Strings(parsedCert.DNSNames) - for _, dnsName := range parsedCert.DNSNames { - if dnsName != parsedCert.Subject.CommonName { - SANs = append(SANs, dnsName) - } - } - - } - if parsedCert.IPAddresses != nil { - for _, ip := range parsedCert.IPAddresses { - if ip.String() != parsedCert.Subject.CommonName { - SANs = append(SANs, ip.String()) - } - } - - } - certKey := strings.Join(SANs, ",") - - certExists := false - if certs[ep] == nil { - certs[ep] = make(map[string]*tls.Certificate) - } else { - for domains := range certs[ep] { - if domains == certKey { - certExists = true - break - } - } - } - if certExists { - log.Warnf("Into EntryPoint %s, try to add certificate for domains which already have this certificate (%s). The new certificate will not be append to the EntryPoint.", ep, certKey) - } else { - log.Debugf("Add certificate for domains %s", certKey) - certs[ep][certKey] = &tlsCert - } - - return err -} - -func (c *Certificate) getTruncatedCertificateName() string { - certName := c.CertFile.String() - - // Truncate certificate information only if it's a well formed certificate content with more than 50 characters - if !c.CertFile.IsPath() && strings.HasPrefix(certName, certificateHeader) && len(certName) > len(certificateHeader)+50 { - certName = strings.TrimPrefix(c.CertFile.String(), certificateHeader)[:50] - } - - return certName -} - -// String is the method to format the flag's value, part of the flag.Value interface. -// The String method's output will be used in diagnostics. -func (c *Certificates) String() string { - if len(*c) == 0 { - return "" - } - var result []string - for _, certificate := range *c { - result = append(result, certificate.CertFile.String()+","+certificate.KeyFile.String()) - } - return strings.Join(result, ";") -} - -// Set is the method to set the flag value, part of the flag.Value interface. -// Set's argument is a string to be parsed to set the flag. -// It's a comma-separated list, so we split it. -func (c *Certificates) Set(value string) error { - certificates := strings.Split(value, ";") - for _, certificate := range certificates { - files := strings.Split(certificate, ",") - if len(files) != 2 { - return fmt.Errorf("bad certificates format: %s", value) - } - *c = append(*c, Certificate{ - CertFile: FileOrContent(files[0]), - KeyFile: FileOrContent(files[1]), - }) - } - return nil -} - -// Type is type of the struct -func (c *Certificates) Type() string { - return "certificates" -} diff --git a/old/tls/certificate_store.go b/old/tls/certificate_store.go deleted file mode 100644 index 03caa4e58..000000000 --- a/old/tls/certificate_store.go +++ /dev/null @@ -1,137 +0,0 @@ -package tls - -import ( - "crypto/tls" - "net" - "sort" - "strings" - "time" - - "github.com/containous/traefik/pkg/log" - "github.com/containous/traefik/pkg/safe" - "github.com/patrickmn/go-cache" -) - -// CertificateStore store for dynamic and static certificates -type CertificateStore struct { - DynamicCerts *safe.Safe - StaticCerts *safe.Safe - DefaultCertificate *tls.Certificate - CertCache *cache.Cache - SniStrict bool -} - -// NewCertificateStore create a store for dynamic and static certificates -func NewCertificateStore() *CertificateStore { - return &CertificateStore{ - StaticCerts: &safe.Safe{}, - DynamicCerts: &safe.Safe{}, - CertCache: cache.New(1*time.Hour, 10*time.Minute), - } -} - -// GetAllDomains return a slice with all the certificate domain -func (c CertificateStore) GetAllDomains() []string { - var allCerts []string - - // Get static certificates - if c.StaticCerts != nil && c.StaticCerts.Get() != nil { - for domains := range c.StaticCerts.Get().(map[string]*tls.Certificate) { - allCerts = append(allCerts, domains) - } - } - - // Get dynamic certificates - if c.DynamicCerts != nil && c.DynamicCerts.Get() != nil { - for domains := range c.DynamicCerts.Get().(map[string]*tls.Certificate) { - allCerts = append(allCerts, domains) - } - } - return allCerts -} - -// GetBestCertificate returns the best match certificate, and caches the response -func (c CertificateStore) GetBestCertificate(clientHello *tls.ClientHelloInfo) *tls.Certificate { - domainToCheck := strings.ToLower(strings.TrimSpace(clientHello.ServerName)) - if len(domainToCheck) == 0 { - // If no ServerName is provided, Check for local IP address matches - host, _, err := net.SplitHostPort(clientHello.Conn.LocalAddr().String()) - if err != nil { - log.Debugf("Could not split host/port: %v", err) - } - domainToCheck = strings.TrimSpace(host) - } - - if cert, ok := c.CertCache.Get(domainToCheck); ok { - return cert.(*tls.Certificate) - } - - matchedCerts := map[string]*tls.Certificate{} - if c.DynamicCerts != nil && c.DynamicCerts.Get() != nil { - for domains, cert := range c.DynamicCerts.Get().(map[string]*tls.Certificate) { - for _, certDomain := range strings.Split(domains, ",") { - if MatchDomain(domainToCheck, certDomain) { - matchedCerts[certDomain] = cert - } - } - } - } - - if c.StaticCerts != nil && c.StaticCerts.Get() != nil { - for domains, cert := range c.StaticCerts.Get().(map[string]*tls.Certificate) { - for _, certDomain := range strings.Split(domains, ",") { - if MatchDomain(domainToCheck, certDomain) { - matchedCerts[certDomain] = cert - } - } - } - } - - if len(matchedCerts) > 0 { - // sort map by keys - keys := make([]string, 0, len(matchedCerts)) - for k := range matchedCerts { - keys = append(keys, k) - } - sort.Strings(keys) - - // cache best match - c.CertCache.SetDefault(domainToCheck, matchedCerts[keys[len(keys)-1]]) - return matchedCerts[keys[len(keys)-1]] - } - - return nil -} - -// ContainsCertificates checks if there are any certs in the store -func (c CertificateStore) ContainsCertificates() bool { - return c.StaticCerts.Get() != nil || c.DynamicCerts.Get() != nil -} - -// ResetCache clears the cache in the store -func (c CertificateStore) ResetCache() { - if c.CertCache != nil { - c.CertCache.Flush() - } -} - -// MatchDomain return true if a domain match the cert domain -func MatchDomain(domain string, certDomain string) bool { - if domain == certDomain { - return true - } - - for len(certDomain) > 0 && certDomain[len(certDomain)-1] == '.' { - certDomain = certDomain[:len(certDomain)-1] - } - - labels := strings.Split(domain, ".") - for i := range labels { - labels[i] = "*" - candidate := strings.Join(labels, ".") - if certDomain == candidate { - return true - } - } - return false -} diff --git a/old/tls/generate/generate.go b/old/tls/generate/generate.go deleted file mode 100644 index 91d73a731..000000000 --- a/old/tls/generate/generate.go +++ /dev/null @@ -1,94 +0,0 @@ -package generate - -import ( - "crypto/rand" - "crypto/rsa" - "crypto/sha256" - "crypto/tls" - "crypto/x509" - "crypto/x509/pkix" - "encoding/hex" - "encoding/pem" - "fmt" - "math/big" - "time" -) - -// DefaultDomain Traefik domain for the default certificate -const DefaultDomain = "TRAEFIK DEFAULT CERT" - -// DefaultCertificate generates random TLS certificates -func DefaultCertificate() (*tls.Certificate, error) { - randomBytes := make([]byte, 100) - _, err := rand.Read(randomBytes) - if err != nil { - return nil, err - } - zBytes := sha256.Sum256(randomBytes) - z := hex.EncodeToString(zBytes[:sha256.Size]) - domain := fmt.Sprintf("%s.%s.traefik.default", z[:32], z[32:]) - - certPEM, keyPEM, err := KeyPair(domain, time.Time{}) - if err != nil { - return nil, err - } - - certificate, err := tls.X509KeyPair(certPEM, keyPEM) - if err != nil { - return nil, err - } - - return &certificate, nil -} - -// KeyPair generates cert and key files -func KeyPair(domain string, expiration time.Time) ([]byte, []byte, error) { - rsaPrivKey, err := rsa.GenerateKey(rand.Reader, 2048) - if err != nil { - return nil, nil, err - } - keyPEM := pem.EncodeToMemory(&pem.Block{Type: "RSA PRIVATE KEY", Bytes: x509.MarshalPKCS1PrivateKey(rsaPrivKey)}) - - certPEM, err := PemCert(rsaPrivKey, domain, expiration) - if err != nil { - return nil, nil, err - } - return certPEM, keyPEM, nil -} - -// PemCert generates PEM cert file -func PemCert(privKey *rsa.PrivateKey, domain string, expiration time.Time) ([]byte, error) { - derBytes, err := derCert(privKey, expiration, domain) - if err != nil { - return nil, err - } - - return pem.EncodeToMemory(&pem.Block{Type: "CERTIFICATE", Bytes: derBytes}), nil -} - -func derCert(privKey *rsa.PrivateKey, expiration time.Time, domain string) ([]byte, error) { - serialNumberLimit := new(big.Int).Lsh(big.NewInt(1), 128) - serialNumber, err := rand.Int(rand.Reader, serialNumberLimit) - if err != nil { - return nil, err - } - - if expiration.IsZero() { - expiration = time.Now().Add(365 * (24 * time.Hour)) - } - - template := x509.Certificate{ - SerialNumber: serialNumber, - Subject: pkix.Name{ - CommonName: DefaultDomain, - }, - NotBefore: time.Now(), - NotAfter: expiration, - - KeyUsage: x509.KeyUsageKeyEncipherment, - BasicConstraintsValid: true, - DNSNames: []string{domain}, - } - - return x509.CreateCertificate(rand.Reader, &template, &template, &privKey.PublicKey, privKey) -} diff --git a/old/tls/tls.go b/old/tls/tls.go deleted file mode 100644 index dbda7c3fc..000000000 --- a/old/tls/tls.go +++ /dev/null @@ -1,101 +0,0 @@ -package tls - -import ( - "crypto/tls" - "fmt" - "strings" - - "github.com/containous/traefik/pkg/log" - "github.com/sirupsen/logrus" -) - -const ( - certificateHeader = "-----BEGIN CERTIFICATE-----\n" -) - -// ClientCA defines traefik CA files for a entryPoint -// and it indicates if they are mandatory or have just to be analyzed if provided -type ClientCA struct { - Files FilesOrContents - Optional bool -} - -// TLS configures TLS for an entry point -type TLS struct { - MinVersion string `export:"true"` - CipherSuites []string - Certificates Certificates - ClientCA ClientCA - DefaultCertificate *Certificate - SniStrict bool `export:"true"` -} - -// FilesOrContents hold the CA we want to have in root -type FilesOrContents []FileOrContent - -// Configuration allows mapping a TLS certificate to a list of entrypoints -type Configuration struct { - EntryPoints []string - Certificate *Certificate -} - -// String is the method to format the flag's value, part of the flag.Value interface. -// The String method's output will be used in diagnostics. -func (r *FilesOrContents) String() string { - sliceOfString := make([]string, len([]FileOrContent(*r))) - for key, value := range *r { - sliceOfString[key] = value.String() - } - return strings.Join(sliceOfString, ",") -} - -// Set is the method to set the flag value, part of the flag.Value interface. -// Set's argument is a string to be parsed to set the flag. -// It's a comma-separated list, so we split it. -func (r *FilesOrContents) Set(value string) error { - filesOrContents := strings.Split(value, ",") - if len(filesOrContents) == 0 { - return fmt.Errorf("bad FilesOrContents format: %s", value) - } - for _, fileOrContent := range filesOrContents { - *r = append(*r, FileOrContent(fileOrContent)) - } - return nil -} - -// Get return the FilesOrContents list -func (r *FilesOrContents) Get() interface{} { - return *r -} - -// SetValue sets the FilesOrContents with val -func (r *FilesOrContents) SetValue(val interface{}) { - *r = val.(FilesOrContents) -} - -// Type is type of the struct -func (r *FilesOrContents) Type() string { - return "filesorcontents" -} - -// SortTLSPerEntryPoints converts TLS configuration sorted by Certificates into TLS configuration sorted by EntryPoints -func SortTLSPerEntryPoints(configurations []*Configuration, epConfiguration map[string]map[string]*tls.Certificate, defaultEntryPoints []string) { - if epConfiguration == nil { - epConfiguration = make(map[string]map[string]*tls.Certificate) - } - for _, conf := range configurations { - if conf.EntryPoints == nil || len(conf.EntryPoints) == 0 { - if log.GetLevel() >= logrus.DebugLevel { - log.Debugf("No entryPoint is defined to add the certificate %s, it will be added to the default entryPoints: %s", - conf.Certificate.getTruncatedCertificateName(), - strings.Join(defaultEntryPoints, ", ")) - } - conf.EntryPoints = append(conf.EntryPoints, defaultEntryPoints...) - } - for _, ep := range conf.EntryPoints { - if err := conf.Certificate.AppendCertificates(epConfiguration, ep); err != nil { - log.Errorf("Unable to append certificate %s to entrypoint %s: %v", conf.Certificate.getTruncatedCertificateName(), ep, err) - } - } - } -} diff --git a/old/types/dns_resolvers.go b/old/types/dns_resolvers.go deleted file mode 100644 index dd96f7895..000000000 --- a/old/types/dns_resolvers.go +++ /dev/null @@ -1,44 +0,0 @@ -package types - -import ( - "fmt" - "strings" -) - -// DNSResolvers is a list of DNSes that we will try to resolve the challenged FQDN against -type DNSResolvers []string - -// String is the method to format the flag's value, part of the flag.Value interface. -// The String method's output will be used in diagnostics. -func (r *DNSResolvers) String() string { - return strings.Join(*r, ",") -} - -// Set is the method to set the flag value, part of the flag.Value interface. -// Set's argument is a string to be parsed to set the flag. -// It's a comma-separated list, so we split it. -func (r *DNSResolvers) Set(value string) error { - entryPoints := strings.Split(value, ",") - if len(entryPoints) == 0 { - return fmt.Errorf("wrong DNSResolvers format: %s", value) - } - for _, entryPoint := range entryPoints { - *r = append(*r, entryPoint) - } - return nil -} - -// Get return the DNSResolvers list -func (r *DNSResolvers) Get() interface{} { - return *r -} - -// SetValue sets the DNSResolvers list -func (r *DNSResolvers) SetValue(val interface{}) { - *r = val.(DNSResolvers) -} - -// Type is type of the struct -func (r *DNSResolvers) Type() string { - return "dnsresolvers" -} diff --git a/old/types/domain_test.go b/old/types/domain_test.go deleted file mode 100644 index dc97c7971..000000000 --- a/old/types/domain_test.go +++ /dev/null @@ -1,182 +0,0 @@ -package types - -import ( - "testing" - - "github.com/stretchr/testify/assert" -) - -func TestDomain_ToStrArray(t *testing.T) { - testCases := []struct { - desc string - domain Domain - expected []string - }{ - { - desc: "with Main and SANs", - domain: Domain{ - Main: "foo.com", - SANs: []string{"bar.foo.com", "bir.foo.com"}, - }, - expected: []string{"foo.com", "bar.foo.com", "bir.foo.com"}, - }, - { - desc: "without SANs", - domain: Domain{ - Main: "foo.com", - }, - expected: []string{"foo.com"}, - }, - { - desc: "without Main", - domain: Domain{ - SANs: []string{"bar.foo.com", "bir.foo.com"}, - }, - expected: []string{"bar.foo.com", "bir.foo.com"}, - }, - } - - for _, test := range testCases { - test := test - t.Run(test.desc, func(t *testing.T) { - t.Parallel() - - domains := test.domain.ToStrArray() - assert.EqualValues(t, test.expected, domains) - }) - } -} - -func TestDomain_Set(t *testing.T) { - testCases := []struct { - desc string - rawDomains []string - expected Domain - }{ - { - desc: "with 3 domains", - rawDomains: []string{"foo.com", "bar.foo.com", "bir.foo.com"}, - expected: Domain{ - Main: "foo.com", - SANs: []string{"bar.foo.com", "bir.foo.com"}, - }, - }, - { - desc: "with 1 domain", - rawDomains: []string{"foo.com"}, - expected: Domain{ - Main: "foo.com", - SANs: []string{}, - }, - }, - { - desc: "", - rawDomains: nil, - expected: Domain{}, - }, - } - - for _, test := range testCases { - test := test - t.Run(test.desc, func(t *testing.T) { - t.Parallel() - - domain := Domain{} - domain.Set(test.rawDomains) - - assert.Equal(t, test.expected, domain) - }) - } -} - -func TestMatchDomain(t *testing.T) { - testCases := []struct { - desc string - certDomain string - domain string - expected bool - }{ - { - desc: "exact match", - certDomain: "traefik.wtf", - domain: "traefik.wtf", - expected: true, - }, - { - desc: "wildcard and root domain", - certDomain: "*.traefik.wtf", - domain: "traefik.wtf", - expected: false, - }, - { - desc: "wildcard and sub domain", - certDomain: "*.traefik.wtf", - domain: "sub.traefik.wtf", - expected: true, - }, - { - desc: "wildcard and sub sub domain", - certDomain: "*.traefik.wtf", - domain: "sub.sub.traefik.wtf", - expected: false, - }, - { - desc: "double wildcard and sub sub domain", - certDomain: "*.*.traefik.wtf", - domain: "sub.sub.traefik.wtf", - expected: true, - }, - { - desc: "sub sub domain and invalid wildcard", - certDomain: "sub.*.traefik.wtf", - domain: "sub.sub.traefik.wtf", - expected: false, - }, - { - desc: "sub sub domain and valid wildcard", - certDomain: "*.sub.traefik.wtf", - domain: "sub.sub.traefik.wtf", - expected: true, - }, - { - desc: "dot replaced by a cahr", - certDomain: "sub.sub.traefik.wtf", - domain: "sub.sub.traefikiwtf", - expected: false, - }, - { - desc: "*", - certDomain: "*", - domain: "sub.sub.traefik.wtf", - expected: false, - }, - { - desc: "?", - certDomain: "?", - domain: "sub.sub.traefik.wtf", - expected: false, - }, - { - desc: "...................", - certDomain: "...................", - domain: "sub.sub.traefik.wtf", - expected: false, - }, - { - desc: "wildcard and *", - certDomain: "*.traefik.wtf", - domain: "*.*.traefik.wtf", - expected: false, - }, - } - - for _, test := range testCases { - test := test - t.Run(test.desc, func(t *testing.T) { - t.Parallel() - - domains := MatchDomain(test.domain, test.certDomain) - assert.Equal(t, test.expected, domains) - }) - } -} diff --git a/old/types/domains.go b/old/types/domains.go deleted file mode 100644 index 2cace3f64..000000000 --- a/old/types/domains.go +++ /dev/null @@ -1,88 +0,0 @@ -package types - -import ( - "fmt" - "strings" -) - -// Domain holds a domain name with SANs -type Domain struct { - Main string - SANs []string -} - -// ToStrArray convert a domain into an array of strings -func (d *Domain) ToStrArray() []string { - var domains []string - if len(d.Main) > 0 { - domains = []string{d.Main} - } - return append(domains, d.SANs...) -} - -// Set sets a domains from an array of strings -func (d *Domain) Set(domains []string) { - if len(domains) > 0 { - d.Main = domains[0] - d.SANs = domains[1:] - } -} - -// Domains parse []Domain -type Domains []Domain - -// Set []Domain -func (ds *Domains) Set(str string) error { - fargs := func(c rune) bool { - return c == ',' || c == ';' - } - - // get function - slice := strings.FieldsFunc(str, fargs) - if len(slice) < 1 { - return fmt.Errorf("parse error ACME.Domain. Unable to parse %s", str) - } - - d := Domain{ - Main: slice[0], - } - - if len(slice) > 1 { - d.SANs = slice[1:] - } - - *ds = append(*ds, d) - return nil -} - -// Get []Domain -func (ds *Domains) Get() interface{} { return []Domain(*ds) } - -// String returns []Domain in string -func (ds *Domains) String() string { return fmt.Sprintf("%+v", *ds) } - -// SetValue sets []Domain into the parser -func (ds *Domains) SetValue(val interface{}) { - *ds = val.([]Domain) -} - -// MatchDomain return true if a domain match the cert domain -func MatchDomain(domain string, certDomain string) bool { - if domain == certDomain { - return true - } - - for len(certDomain) > 0 && certDomain[len(certDomain)-1] == '.' { - certDomain = certDomain[:len(certDomain)-1] - } - - labels := strings.Split(domain, ".") - for i := range labels { - labels[i] = "*" - candidate := strings.Join(labels, ".") - if certDomain == candidate { - return true - } - } - return false -} diff --git a/old/types/internal_router.go b/old/types/internal_router.go deleted file mode 100644 index 62bc1f51c..000000000 --- a/old/types/internal_router.go +++ /dev/null @@ -1,10 +0,0 @@ -package types - -import ( - "github.com/containous/mux" -) - -// InternalRouter router used by server to register internal routes (/api, /ping ...) -type InternalRouter interface { - AddRoutes(systemRouter *mux.Router) -} diff --git a/old/types/logs.go b/old/types/logs.go deleted file mode 100644 index 118de9c5f..000000000 --- a/old/types/logs.go +++ /dev/null @@ -1,200 +0,0 @@ -package types - -import ( - "fmt" - "strings" - - "github.com/containous/flaeg/parse" -) - -const ( - // AccessLogKeep is the keep string value - AccessLogKeep = "keep" - // AccessLogDrop is the drop string value - AccessLogDrop = "drop" - // AccessLogRedact is the redact string value - AccessLogRedact = "redact" -) - -// TraefikLog holds the configuration settings for the traefik logger. -type TraefikLog struct { - FilePath string `json:"file,omitempty" description:"Traefik log file path. Stdout is used when omitted or empty"` - Format string `json:"format,omitempty" description:"Traefik log format: json | common"` -} - -// AccessLog holds the configuration settings for the access logger (middlewares/accesslog). -type AccessLog struct { - FilePath string `json:"file,omitempty" description:"Access log file path. Stdout is used when omitted or empty" export:"true"` - Format string `json:"format,omitempty" description:"Access log format: json | common" export:"true"` - Filters *AccessLogFilters `json:"filters,omitempty" description:"Access log filters, used to keep only specific access logs" export:"true"` - Fields *AccessLogFields `json:"fields,omitempty" description:"AccessLogFields" export:"true"` - BufferingSize int64 `json:"bufferingSize,omitempty" description:"Number of access log lines to process in a buffered way. Default 0." export:"true"` -} - -// AccessLogFilters holds filters configuration -type AccessLogFilters struct { - StatusCodes StatusCodes `json:"statusCodes,omitempty" description:"Keep access logs with status codes in the specified range" export:"true"` - RetryAttempts bool `json:"retryAttempts,omitempty" description:"Keep access logs when at least one retry happened" export:"true"` - MinDuration parse.Duration `json:"duration,omitempty" description:"Keep access logs when request took longer than the specified duration" export:"true"` -} - -// FieldHeaders holds configuration for access log headers -type FieldHeaders struct { - DefaultMode string `json:"defaultMode,omitempty" description:"Default mode for fields: keep | drop | redact" export:"true"` - Names FieldHeaderNames `json:"names,omitempty" description:"Override mode for headers" export:"true"` -} - -// StatusCodes holds status codes ranges to filter access log -type StatusCodes []string - -// Set adds strings elem into the the parser -// it splits str on , and ; -func (s *StatusCodes) Set(str string) error { - fargs := func(c rune) bool { - return c == ',' || c == ';' - } - // get function - slice := strings.FieldsFunc(str, fargs) - *s = append(*s, slice...) - return nil -} - -// Get StatusCodes -func (s *StatusCodes) Get() interface{} { return *s } - -// String return slice in a string -func (s *StatusCodes) String() string { return fmt.Sprintf("%v", *s) } - -// SetValue sets StatusCodes into the parser -func (s *StatusCodes) SetValue(val interface{}) { - *s = val.(StatusCodes) -} - -// FieldNames holds maps of fields with specific mode -type FieldNames map[string]string - -// String is the method to format the flag's value, part of the flag.Value interface. -// The String method's output will be used in diagnostics. -func (f *FieldNames) String() string { - return fmt.Sprintf("%+v", *f) -} - -// Get return the FieldNames map -func (f *FieldNames) Get() interface{} { - return *f -} - -// Set is the method to set the flag value, part of the flag.Value interface. -// Set's argument is a string to be parsed to set the flag. -// It's a space-separated list, so we split it. -func (f *FieldNames) Set(value string) error { - // When arguments are passed through YAML, escaped double quotes - // might be added to this string, and they would break the last - // key/value pair. This ensures the string is clean. - value = strings.Trim(value, "\"") - - fields := strings.Fields(value) - - for _, field := range fields { - n := strings.SplitN(field, "=", 2) - if len(n) == 2 { - (*f)[n[0]] = n[1] - } - } - - return nil -} - -// SetValue sets the FieldNames map with val -func (f *FieldNames) SetValue(val interface{}) { - *f = val.(FieldNames) -} - -// FieldHeaderNames holds maps of fields with specific mode -type FieldHeaderNames map[string]string - -// String is the method to format the flag's value, part of the flag.Value interface. -// The String method's output will be used in diagnostics. -func (f *FieldHeaderNames) String() string { - return fmt.Sprintf("%+v", *f) -} - -// Get return the FieldHeaderNames map -func (f *FieldHeaderNames) Get() interface{} { - return *f -} - -// Set is the method to set the flag value, part of the flag.Value interface. -// Set's argument is a string to be parsed to set the flag. -// It's a space-separated list, so we split it. -func (f *FieldHeaderNames) Set(value string) error { - // When arguments are passed through YAML, escaped double quotes - // might be added to this string, and they would break the last - // key/value pair. This ensures the string is clean. - value = strings.Trim(value, "\"") - - fields := strings.Fields(value) - - for _, field := range fields { - n := strings.SplitN(field, "=", 2) - (*f)[n[0]] = n[1] - } - - return nil -} - -// SetValue sets the FieldHeaderNames map with val -func (f *FieldHeaderNames) SetValue(val interface{}) { - *f = val.(FieldHeaderNames) -} - -// AccessLogFields holds configuration for access log fields -type AccessLogFields struct { - DefaultMode string `json:"defaultMode,omitempty" description:"Default mode for fields: keep | drop" export:"true"` - Names FieldNames `json:"names,omitempty" description:"Override mode for fields" export:"true"` - Headers *FieldHeaders `json:"headers,omitempty" description:"Headers to keep, drop or redact" export:"true"` -} - -// Keep check if the field need to be kept or dropped -func (f *AccessLogFields) Keep(field string) bool { - defaultKeep := true - if f != nil { - defaultKeep = checkFieldValue(f.DefaultMode, defaultKeep) - - if v, ok := f.Names[field]; ok { - return checkFieldValue(v, defaultKeep) - } - } - return defaultKeep -} - -// KeepHeader checks if the headers need to be kept, dropped or redacted and returns the status -func (f *AccessLogFields) KeepHeader(header string) string { - defaultValue := AccessLogKeep - if f != nil && f.Headers != nil { - defaultValue = checkFieldHeaderValue(f.Headers.DefaultMode, defaultValue) - - if v, ok := f.Headers.Names[header]; ok { - return checkFieldHeaderValue(v, defaultValue) - } - } - return defaultValue -} - -func checkFieldValue(value string, defaultKeep bool) bool { - switch value { - case AccessLogKeep: - return true - case AccessLogDrop: - return false - default: - return defaultKeep - } -} - -func checkFieldHeaderValue(value string, defaultValue string) string { - if value == AccessLogKeep || value == AccessLogDrop || value == AccessLogRedact { - return value - } - return defaultValue -} diff --git a/old/types/logs_test.go b/old/types/logs_test.go deleted file mode 100644 index 0b1bf8ebc..000000000 --- a/old/types/logs_test.go +++ /dev/null @@ -1,419 +0,0 @@ -package types - -import ( - "testing" - - "github.com/stretchr/testify/assert" -) - -func TestStatusCodesSet(t *testing.T) { - testCases := []struct { - desc string - value string - expected StatusCodes - }{ - { - desc: "One value should return StatusCodes of size 1", - value: "200", - expected: StatusCodes{"200"}, - }, - { - desc: "Two values separated by comma should return StatusCodes of size 2", - value: "200,400", - expected: StatusCodes{"200", "400"}, - }, - { - desc: "Two values separated by semicolon should return StatusCodes of size 2", - value: "200;400", - expected: StatusCodes{"200", "400"}, - }, - { - desc: "Three values separated by comma and semicolon should return StatusCodes of size 3", - value: "200,400;500", - expected: StatusCodes{"200", "400", "500"}, - }, - } - - for _, test := range testCases { - test := test - t.Run(test.desc, func(t *testing.T) { - t.Parallel() - - var statusCodes StatusCodes - err := statusCodes.Set(test.value) - assert.Nil(t, err) - assert.Equal(t, test.expected, statusCodes) - }) - } -} - -func TestStatusCodesGet(t *testing.T) { - testCases := []struct { - desc string - values StatusCodes - expected StatusCodes - }{ - { - desc: "Should return 1 value", - values: StatusCodes{"200"}, - expected: StatusCodes{"200"}, - }, - { - desc: "Should return 2 values", - values: StatusCodes{"200", "400"}, - expected: StatusCodes{"200", "400"}, - }, - { - desc: "Should return 3 values", - values: StatusCodes{"200", "400", "500"}, - expected: StatusCodes{"200", "400", "500"}, - }, - } - - for _, test := range testCases { - test := test - t.Run(test.desc, func(t *testing.T) { - t.Parallel() - - actual := test.values.Get() - assert.Equal(t, test.expected, actual) - }) - } -} - -func TestStatusCodesString(t *testing.T) { - testCases := []struct { - desc string - values StatusCodes - expected string - }{ - { - desc: "Should return 1 value", - values: StatusCodes{"200"}, - expected: "[200]", - }, - { - desc: "Should return 2 values", - values: StatusCodes{"200", "400"}, - expected: "[200 400]", - }, - { - desc: "Should return 3 values", - values: StatusCodes{"200", "400", "500"}, - expected: "[200 400 500]", - }, - } - - for _, test := range testCases { - test := test - t.Run(test.desc, func(t *testing.T) { - t.Parallel() - - actual := test.values.String() - assert.Equal(t, test.expected, actual) - }) - } -} - -func TestStatusCodesSetValue(t *testing.T) { - testCases := []struct { - desc string - values StatusCodes - expected StatusCodes - }{ - { - desc: "Should return 1 value", - values: StatusCodes{"200"}, - expected: StatusCodes{"200"}, - }, - { - desc: "Should return 2 values", - values: StatusCodes{"200", "400"}, - expected: StatusCodes{"200", "400"}, - }, - { - desc: "Should return 3 values", - values: StatusCodes{"200", "400", "500"}, - expected: StatusCodes{"200", "400", "500"}, - }, - } - - for _, test := range testCases { - test := test - t.Run(test.desc, func(t *testing.T) { - t.Parallel() - - var slice StatusCodes - slice.SetValue(test.values) - assert.Equal(t, test.expected, slice) - }) - } -} - -func TestFieldsNamesSet(t *testing.T) { - testCases := []struct { - desc string - value string - expected *FieldNames - }{ - { - desc: "One value should return FieldNames of size 1", - value: "field-1=foo", - expected: &FieldNames{ - "field-1": "foo", - }, - }, - { - desc: "Two values separated by space should return FieldNames of size 2", - value: "field-1=foo field-2=bar", - expected: &FieldNames{ - "field-1": "foo", - "field-2": "bar", - }, - }, - } - - for _, test := range testCases { - test := test - t.Run(test.desc, func(t *testing.T) { - t.Parallel() - - fieldsNames := &FieldNames{} - err := fieldsNames.Set(test.value) - assert.NoError(t, err) - - assert.Equal(t, test.expected, fieldsNames) - }) - } -} - -func TestFieldsNamesGet(t *testing.T) { - testCases := []struct { - desc string - values FieldNames - expected FieldNames - }{ - { - desc: "Should return 1 value", - values: FieldNames{"field-1": "foo"}, - expected: FieldNames{"field-1": "foo"}, - }, - { - desc: "Should return 2 values", - values: FieldNames{"field-1": "foo", "field-2": "bar"}, - expected: FieldNames{"field-1": "foo", "field-2": "bar"}, - }, - { - desc: "Should return 3 values", - values: FieldNames{"field-1": "foo", "field-2": "bar", "field-3": "powpow"}, - expected: FieldNames{"field-1": "foo", "field-2": "bar", "field-3": "powpow"}, - }, - } - - for _, test := range testCases { - test := test - t.Run(test.desc, func(t *testing.T) { - t.Parallel() - - actual := test.values.Get() - assert.Equal(t, test.expected, actual) - }) - } -} - -func TestFieldsNamesString(t *testing.T) { - testCases := []struct { - desc string - values FieldNames - expected string - }{ - { - desc: "Should return 1 value", - values: FieldNames{"field-1": "foo"}, - expected: "map[field-1:foo]", - }, - } - - for _, test := range testCases { - test := test - t.Run(test.desc, func(t *testing.T) { - t.Parallel() - - actual := test.values.String() - assert.Equal(t, test.expected, actual) - }) - } -} - -func TestFieldsNamesSetValue(t *testing.T) { - testCases := []struct { - desc string - values FieldNames - expected *FieldNames - }{ - { - desc: "Should return 1 value", - values: FieldNames{"field-1": "foo"}, - expected: &FieldNames{"field-1": "foo"}, - }, - { - desc: "Should return 2 values", - values: FieldNames{"field-1": "foo", "field-2": "bar"}, - expected: &FieldNames{"field-1": "foo", "field-2": "bar"}, - }, - { - desc: "Should return 3 values", - values: FieldNames{"field-1": "foo", "field-2": "bar", "field-3": "powpow"}, - expected: &FieldNames{"field-1": "foo", "field-2": "bar", "field-3": "powpow"}, - }, - } - - for _, test := range testCases { - test := test - t.Run(test.desc, func(t *testing.T) { - t.Parallel() - - fieldsNames := &FieldNames{} - fieldsNames.SetValue(test.values) - assert.Equal(t, test.expected, fieldsNames) - }) - } -} - -func TestFieldsHeadersNamesSet(t *testing.T) { - testCases := []struct { - desc string - value string - expected *FieldHeaderNames - }{ - { - desc: "One value should return FieldNames of size 1", - value: "X-HEADER-1=foo", - expected: &FieldHeaderNames{ - "X-HEADER-1": "foo", - }, - }, - { - desc: "Two values separated by space should return FieldNames of size 2", - value: "X-HEADER-1=foo X-HEADER-2=bar", - expected: &FieldHeaderNames{ - "X-HEADER-1": "foo", - "X-HEADER-2": "bar", - }, - }, - { - desc: "Two values separated by space with escaped double quotes should return FieldNames of size 2", - value: "\"X-HEADER-1=foo X-HEADER-2=bar\"", - expected: &FieldHeaderNames{ - "X-HEADER-1": "foo", - "X-HEADER-2": "bar", - }, - }, - } - - for _, test := range testCases { - test := test - t.Run(test.desc, func(t *testing.T) { - t.Parallel() - - headersNames := &FieldHeaderNames{} - err := headersNames.Set(test.value) - assert.NoError(t, err) - - assert.Equal(t, test.expected, headersNames) - }) - } -} - -func TestFieldsHeadersNamesGet(t *testing.T) { - testCases := []struct { - desc string - values FieldHeaderNames - expected FieldHeaderNames - }{ - { - desc: "Should return 1 value", - values: FieldHeaderNames{"X-HEADER-1": "foo"}, - expected: FieldHeaderNames{"X-HEADER-1": "foo"}, - }, - { - desc: "Should return 2 values", - values: FieldHeaderNames{"X-HEADER-1": "foo", "X-HEADER-2": "bar"}, - expected: FieldHeaderNames{"X-HEADER-1": "foo", "X-HEADER-2": "bar"}, - }, - { - desc: "Should return 3 values", - values: FieldHeaderNames{"X-HEADER-1": "foo", "X-HEADER-2": "bar", "X-HEADER-3": "powpow"}, - expected: FieldHeaderNames{"X-HEADER-1": "foo", "X-HEADER-2": "bar", "X-HEADER-3": "powpow"}, - }, - } - - for _, test := range testCases { - test := test - t.Run(test.desc, func(t *testing.T) { - t.Parallel() - - actual := test.values.Get() - assert.Equal(t, test.expected, actual) - }) - } -} - -func TestFieldsHeadersNamesString(t *testing.T) { - testCases := []struct { - desc string - values FieldHeaderNames - expected string - }{ - { - desc: "Should return 1 value", - values: FieldHeaderNames{"X-HEADER-1": "foo"}, - expected: "map[X-HEADER-1:foo]", - }, - } - - for _, test := range testCases { - test := test - t.Run(test.desc, func(t *testing.T) { - t.Parallel() - - actual := test.values.String() - assert.Equal(t, test.expected, actual) - }) - } -} - -func TestFieldsHeadersNamesSetValue(t *testing.T) { - testCases := []struct { - desc string - values FieldHeaderNames - expected *FieldHeaderNames - }{ - { - desc: "Should return 1 value", - values: FieldHeaderNames{"X-HEADER-1": "foo"}, - expected: &FieldHeaderNames{"X-HEADER-1": "foo"}, - }, - { - desc: "Should return 2 values", - values: FieldHeaderNames{"X-HEADER-1": "foo", "X-HEADER-2": "bar"}, - expected: &FieldHeaderNames{"X-HEADER-1": "foo", "X-HEADER-2": "bar"}, - }, - { - desc: "Should return 3 values", - values: FieldHeaderNames{"X-HEADER-1": "foo", "X-HEADER-2": "bar", "X-HEADER-3": "powpow"}, - expected: &FieldHeaderNames{"X-HEADER-1": "foo", "X-HEADER-2": "bar", "X-HEADER-3": "powpow"}, - }, - } - - for _, test := range testCases { - test := test - t.Run(test.desc, func(t *testing.T) { - t.Parallel() - - headersNames := &FieldHeaderNames{} - headersNames.SetValue(test.values) - assert.Equal(t, test.expected, headersNames) - }) - } -} diff --git a/old/types/types.go b/old/types/types.go deleted file mode 100644 index c4c107b09..000000000 --- a/old/types/types.go +++ /dev/null @@ -1,685 +0,0 @@ -package types - -import ( - "crypto/tls" - "crypto/x509" - "encoding" - "errors" - "fmt" - "io/ioutil" - "os" - "strconv" - "strings" - - "github.com/abronan/valkeyrie/store" - "github.com/containous/flaeg/parse" - "github.com/containous/mux" - "github.com/containous/traefik/old/log" - "github.com/containous/traefik/pkg/ip" - traefiktls "github.com/containous/traefik/pkg/tls" - "github.com/mitchellh/hashstructure" - "github.com/ryanuber/go-glob" -) - -// Backend holds backend configuration. -type Backend struct { - Servers map[string]Server `json:"servers,omitempty"` - CircuitBreaker *CircuitBreaker `json:"circuitBreaker,omitempty"` - LoadBalancer *LoadBalancer `json:"loadBalancer,omitempty"` - MaxConn *MaxConn `json:"maxConn,omitempty"` - HealthCheck *HealthCheck `json:"healthCheck,omitempty"` - Buffering *Buffering `json:"buffering,omitempty"` - ResponseForwarding *ResponseForwarding `json:"forwardingResponse,omitempty"` -} - -// ResponseForwarding holds configuration for the forward of the response -type ResponseForwarding struct { - FlushInterval string `json:"flushInterval,omitempty"` -} - -// MaxConn holds maximum connection configuration -type MaxConn struct { - Amount int64 `json:"amount,omitempty"` - ExtractorFunc string `json:"extractorFunc,omitempty"` -} - -// LoadBalancer holds load balancing configuration. -type LoadBalancer struct { - Method string `json:"method,omitempty"` - Stickiness *Stickiness `json:"stickiness,omitempty"` -} - -// Stickiness holds sticky session configuration. -type Stickiness struct { - CookieName string `json:"cookieName,omitempty"` -} - -// CircuitBreaker holds circuit breaker configuration. -type CircuitBreaker struct { - Expression string `json:"expression,omitempty"` -} - -// Buffering holds request/response buffering configuration/ -type Buffering struct { - MaxRequestBodyBytes int64 `json:"maxRequestBodyBytes,omitempty"` - MemRequestBodyBytes int64 `json:"memRequestBodyBytes,omitempty"` - MaxResponseBodyBytes int64 `json:"maxResponseBodyBytes,omitempty"` - MemResponseBodyBytes int64 `json:"memResponseBodyBytes,omitempty"` - RetryExpression string `json:"retryExpression,omitempty"` -} - -// WhiteList contains white list configuration. -type WhiteList struct { - SourceRange []string `json:"sourceRange,omitempty"` - IPStrategy *IPStrategy `json:"ipStrategy,omitempty"` -} - -// HealthCheck holds HealthCheck configuration -type HealthCheck struct { - Scheme string `json:"scheme,omitempty"` - Path string `json:"path,omitempty"` - Port int `json:"port,omitempty"` - Interval string `json:"interval,omitempty"` - Timeout string `json:"timeout,omitempty"` - Hostname string `json:"hostname,omitempty"` - Headers map[string]string `json:"headers,omitempty"` -} - -// Server holds server configuration. -type Server struct { - URL string `json:"url,omitempty"` - Weight int `json:"weight"` -} - -// Route holds route configuration. -type Route struct { - Rule string `json:"rule,omitempty"` -} - -// ServerRoute holds ServerRoute configuration. -type ServerRoute struct { - Route *mux.Route - StripPrefixes []string - StripPrefixesRegex []string - AddPrefix string - ReplacePath string - ReplacePathRegex string -} - -// ErrorPage holds custom error page configuration -type ErrorPage struct { - Status []string `json:"status,omitempty"` - Backend string `json:"backend,omitempty"` - Query string `json:"query,omitempty"` -} - -// Rate holds a rate limiting configuration for a specific time period -type Rate struct { - Period parse.Duration `json:"period,omitempty"` - Average int64 `json:"average,omitempty"` - Burst int64 `json:"burst,omitempty"` -} - -// RateLimit holds a rate limiting configuration for a given frontend -type RateLimit struct { - RateSet map[string]*Rate `json:"rateset,omitempty"` - ExtractorFunc string `json:"extractorFunc,omitempty"` -} - -// Headers holds the custom header configuration -type Headers struct { - CustomRequestHeaders map[string]string `json:"customRequestHeaders,omitempty"` - CustomResponseHeaders map[string]string `json:"customResponseHeaders,omitempty"` - - AllowedHosts []string `json:"allowedHosts,omitempty"` - HostsProxyHeaders []string `json:"hostsProxyHeaders,omitempty"` - SSLRedirect bool `json:"sslRedirect,omitempty"` - SSLTemporaryRedirect bool `json:"sslTemporaryRedirect,omitempty"` - SSLHost string `json:"sslHost,omitempty"` - SSLProxyHeaders map[string]string `json:"sslProxyHeaders,omitempty"` - SSLForceHost bool `json:"sslForceHost,omitempty"` - STSSeconds int64 `json:"stsSeconds,omitempty"` - STSIncludeSubdomains bool `json:"stsIncludeSubdomains,omitempty"` - STSPreload bool `json:"stsPreload,omitempty"` - ForceSTSHeader bool `json:"forceSTSHeader,omitempty"` - FrameDeny bool `json:"frameDeny,omitempty"` - CustomFrameOptionsValue string `json:"customFrameOptionsValue,omitempty"` - ContentTypeNosniff bool `json:"contentTypeNosniff,omitempty"` - BrowserXSSFilter bool `json:"browserXssFilter,omitempty"` - CustomBrowserXSSValue string `json:"customBrowserXSSValue,omitempty"` - ContentSecurityPolicy string `json:"contentSecurityPolicy,omitempty"` - PublicKey string `json:"publicKey,omitempty"` - ReferrerPolicy string `json:"referrerPolicy,omitempty"` - IsDevelopment bool `json:"isDevelopment,omitempty"` -} - -// HasCustomHeadersDefined checks to see if any of the custom header elements have been set -func (h *Headers) HasCustomHeadersDefined() bool { - return h != nil && (len(h.CustomResponseHeaders) != 0 || - len(h.CustomRequestHeaders) != 0) -} - -// HasSecureHeadersDefined checks to see if any of the secure header elements have been set -func (h *Headers) HasSecureHeadersDefined() bool { - return h != nil && (len(h.AllowedHosts) != 0 || - len(h.HostsProxyHeaders) != 0 || - h.SSLRedirect || - h.SSLTemporaryRedirect || - h.SSLForceHost || - h.SSLHost != "" || - len(h.SSLProxyHeaders) != 0 || - h.STSSeconds != 0 || - h.STSIncludeSubdomains || - h.STSPreload || - h.ForceSTSHeader || - h.FrameDeny || - h.CustomFrameOptionsValue != "" || - h.ContentTypeNosniff || - h.BrowserXSSFilter || - h.CustomBrowserXSSValue != "" || - h.ContentSecurityPolicy != "" || - h.PublicKey != "" || - h.ReferrerPolicy != "" || - h.IsDevelopment) -} - -// Frontend holds frontend configuration. -type Frontend struct { - EntryPoints []string `json:"entryPoints,omitempty" hash:"ignore"` - Backend string `json:"backend,omitempty"` - Routes map[string]Route `json:"routes,omitempty" hash:"ignore"` - PassHostHeader bool `json:"passHostHeader,omitempty"` - PassTLSCert bool `json:"passTLSCert,omitempty"` // Deprecated use PassTLSClientCert instead - PassTLSClientCert *TLSClientHeaders `json:"passTLSClientCert,omitempty"` - Priority int `json:"priority"` - WhiteList *WhiteList `json:"whiteList,omitempty"` - Headers *Headers `json:"headers,omitempty"` - Errors map[string]*ErrorPage `json:"errors,omitempty"` - RateLimit *RateLimit `json:"ratelimit,omitempty"` - Redirect *Redirect `json:"redirect,omitempty"` - Auth *Auth `json:"auth,omitempty"` -} - -// Hash returns the hash value of a Frontend struct. -func (f *Frontend) Hash() (string, error) { - hash, err := hashstructure.Hash(f, nil) - - if err != nil { - return "", err - } - - return strconv.FormatUint(hash, 10), nil -} - -// Redirect configures a redirection of an entry point to another, or to an URL -type Redirect struct { - EntryPoint string `json:"entryPoint,omitempty"` - Regex string `json:"regex,omitempty"` - Replacement string `json:"replacement,omitempty"` - Permanent bool `json:"permanent,omitempty"` -} - -// LoadBalancerMethod holds the method of load balancing to use. -type LoadBalancerMethod uint8 - -const ( - // Wrr (default) = Weighted Round Robin - Wrr LoadBalancerMethod = iota - // Drr = Dynamic Round Robin - Drr -) - -var loadBalancerMethodNames = []string{ - "Wrr", - "Drr", -} - -// NewLoadBalancerMethod create a new LoadBalancerMethod from a given LoadBalancer. -func NewLoadBalancerMethod(loadBalancer *LoadBalancer) (LoadBalancerMethod, error) { - if loadBalancer == nil { - return Wrr, errors.New("no load-balancer defined, fallback to 'wrr' method") - } - - if len(loadBalancer.Method) == 0 { - return Wrr, errors.New("no load-balancing method defined, fallback to 'wrr' method") - } - - for i, name := range loadBalancerMethodNames { - if strings.EqualFold(name, loadBalancer.Method) { - return LoadBalancerMethod(i), nil - } - } - - return Wrr, fmt.Errorf("invalid load-balancing method %q, fallback to 'wrr' method", loadBalancer.Method) -} - -// Configurations is for currentConfigurations Map -type Configurations map[string]*Configuration - -// Configuration of a provider. -type Configuration struct { - Backends map[string]*Backend `json:"backends,omitempty"` - Frontends map[string]*Frontend `json:"frontends,omitempty"` - TLS []*traefiktls.Configuration `json:"-"` -} - -// ConfigMessage hold configuration information exchanged between parts of traefik. -type ConfigMessage struct { - ProviderName string - Configuration *Configuration -} - -// Constraint hold a parsed constraint expression -type Constraint struct { - Key string `export:"true"` - // MustMatch is true if operator is "==" or false if operator is "!=" - MustMatch bool `export:"true"` - // TODO: support regex - Regex string `export:"true"` -} - -// NewConstraint receive a string and return a *Constraint, after checking syntax and parsing the constraint expression -func NewConstraint(exp string) (*Constraint, error) { - sep := "" - constraint := &Constraint{} - - if strings.Contains(exp, "==") { - sep = "==" - constraint.MustMatch = true - } else if strings.Contains(exp, "!=") { - sep = "!=" - constraint.MustMatch = false - } else { - return nil, errors.New("constraint expression missing valid operator: '==' or '!='") - } - - kv := strings.SplitN(exp, sep, 2) - if len(kv) == 2 { - // At the moment, it only supports tags - if kv[0] != "tag" { - return nil, errors.New("constraint must be tag-based. Syntax: tag==us-*") - } - - constraint.Key = kv[0] - constraint.Regex = kv[1] - return constraint, nil - } - - return nil, fmt.Errorf("incorrect constraint expression: %s", exp) -} - -func (c *Constraint) String() string { - if c.MustMatch { - return c.Key + "==" + c.Regex - } - return c.Key + "!=" + c.Regex -} - -var _ encoding.TextUnmarshaler = (*Constraint)(nil) - -// UnmarshalText define how unmarshal in TOML parsing -func (c *Constraint) UnmarshalText(text []byte) error { - constraint, err := NewConstraint(string(text)) - if err != nil { - return err - } - c.Key = constraint.Key - c.MustMatch = constraint.MustMatch - c.Regex = constraint.Regex - return nil -} - -var _ encoding.TextMarshaler = (*Constraint)(nil) - -// MarshalText encodes the receiver into UTF-8-encoded text and returns the result. -func (c *Constraint) MarshalText() (text []byte, err error) { - return []byte(c.String()), nil -} - -// MatchConstraintWithAtLeastOneTag tests a constraint for one single service -func (c *Constraint) MatchConstraintWithAtLeastOneTag(tags []string) bool { - for _, tag := range tags { - if glob.Glob(c.Regex, tag) { - return true - } - } - return false -} - -// Set []*Constraint -func (cs *Constraints) Set(str string) error { - exps := strings.Split(str, ",") - if len(exps) == 0 { - return fmt.Errorf("bad Constraint format: %s", str) - } - for _, exp := range exps { - constraint, err := NewConstraint(exp) - if err != nil { - return err - } - *cs = append(*cs, constraint) - } - return nil -} - -// Constraints holds a Constraint parser -type Constraints []*Constraint - -// Get []*Constraint -func (cs *Constraints) Get() interface{} { return []*Constraint(*cs) } - -// String returns []*Constraint in string -func (cs *Constraints) String() string { return fmt.Sprintf("%+v", *cs) } - -// SetValue sets []*Constraint into the parser -func (cs *Constraints) SetValue(val interface{}) { - *cs = val.(Constraints) -} - -// Type exports the Constraints type as a string -func (cs *Constraints) Type() string { - return "constraint" -} - -// Store holds KV store cluster config -type Store struct { - store.Store - // like this "prefix" (without the /) - Prefix string `export:"true"` -} - -// Cluster holds cluster config -type Cluster struct { - Node string `description:"Node name" export:"true"` - Store *Store `export:"true"` -} - -// Auth holds authentication configuration (BASIC, DIGEST, users) -type Auth struct { - Basic *Basic `json:"basic,omitempty" export:"true"` - Digest *Digest `json:"digest,omitempty" export:"true"` - Forward *Forward `json:"forward,omitempty" export:"true"` - HeaderField string `json:"headerField,omitempty" export:"true"` -} - -// Users authentication users -type Users []string - -// Basic HTTP basic authentication -type Basic struct { - Realm string `json:"realm,omitempty"` - Users `json:"users,omitempty" mapstructure:","` - UsersFile string `json:"usersFile,omitempty"` - RemoveHeader bool `json:"removeHeader,omitempty"` -} - -// Digest HTTP authentication -type Digest struct { - Users `json:"users,omitempty" mapstructure:","` - UsersFile string `json:"usersFile,omitempty"` - RemoveHeader bool `json:"removeHeader,omitempty"` -} - -// Forward authentication -type Forward struct { - Address string `description:"Authentication server address" json:"address,omitempty"` - TLS *ClientTLS `description:"Enable TLS support" json:"tls,omitempty" export:"true"` - TrustForwardHeader bool `description:"Trust X-Forwarded-* headers" json:"trustForwardHeader,omitempty" export:"true"` - AuthResponseHeaders []string `description:"Headers to be forwarded from auth response" json:"authResponseHeaders,omitempty"` -} - -// CanonicalDomain returns a lower case domain with trim space -func CanonicalDomain(domain string) string { - return strings.ToLower(strings.TrimSpace(domain)) -} - -// Statistics provides options for monitoring request and response stats -type Statistics struct { - RecentErrors int `description:"Number of recent errors logged" export:"true"` -} - -// Metrics provides options to expose and send Traefik metrics to different third party monitoring systems -type Metrics struct { - Prometheus *Prometheus `description:"Prometheus metrics exporter type" export:"true"` - Datadog *Datadog `description:"DataDog metrics exporter type" export:"true"` - StatsD *Statsd `description:"StatsD metrics exporter type" export:"true"` - InfluxDB *InfluxDB `description:"InfluxDB metrics exporter type"` -} - -// Prometheus can contain specific configuration used by the Prometheus Metrics exporter -type Prometheus struct { - Buckets Buckets `description:"Buckets for latency metrics" export:"true"` - EntryPoint string `description:"EntryPoint" export:"true"` -} - -// Datadog contains address and metrics pushing interval configuration -type Datadog struct { - Address string `description:"DataDog's address"` - PushInterval string `description:"DataDog push interval" export:"true"` -} - -// Statsd contains address and metrics pushing interval configuration -type Statsd struct { - Address string `description:"StatsD address"` - PushInterval string `description:"StatsD push interval" export:"true"` -} - -// InfluxDB contains address, login and metrics pushing interval configuration -type InfluxDB struct { - Address string `description:"InfluxDB address"` - Protocol string `description:"InfluxDB address protocol (udp or http)"` - PushInterval string `description:"InfluxDB push interval" export:"true"` - Database string `description:"InfluxDB database used when protocol is http" export:"true"` - RetentionPolicy string `description:"InfluxDB retention policy used when protocol is http" export:"true"` - Username string `description:"InfluxDB username (only with http)" export:"true"` - Password string `description:"InfluxDB password (only with http)" export:"true"` -} - -// Buckets holds Prometheus Buckets -type Buckets []float64 - -// Set adds strings elem into the the parser -// it splits str on "," and ";" and apply ParseFloat to string -func (b *Buckets) Set(str string) error { - fargs := func(c rune) bool { - return c == ',' || c == ';' - } - // get function - slice := strings.FieldsFunc(str, fargs) - for _, bucket := range slice { - bu, err := strconv.ParseFloat(bucket, 64) - if err != nil { - return err - } - *b = append(*b, bu) - } - return nil -} - -// Get []float64 -func (b *Buckets) Get() interface{} { return *b } - -// String return slice in a string -func (b *Buckets) String() string { return fmt.Sprintf("%v", *b) } - -// SetValue sets []float64 into the parser -func (b *Buckets) SetValue(val interface{}) { - *b = val.(Buckets) -} - -// ClientTLS holds TLS specific configurations as client -// CA, Cert and Key can be either path or file contents -type ClientTLS struct { - CA string `description:"TLS CA" json:"ca,omitempty"` - CAOptional bool `description:"TLS CA.Optional" json:"caOptional,omitempty"` - Cert string `description:"TLS cert" json:"cert,omitempty"` - Key string `description:"TLS key" json:"key,omitempty"` - InsecureSkipVerify bool `description:"TLS insecure skip verify" json:"insecureSkipVerify,omitempty"` -} - -// CreateTLSConfig creates a TLS config from ClientTLS structures -func (clientTLS *ClientTLS) CreateTLSConfig() (*tls.Config, error) { - var err error - if clientTLS == nil { - log.Warnf("clientTLS is nil") - return nil, nil - } - caPool := x509.NewCertPool() - clientAuth := tls.NoClientCert - if clientTLS.CA != "" { - var ca []byte - if _, errCA := os.Stat(clientTLS.CA); errCA == nil { - ca, err = ioutil.ReadFile(clientTLS.CA) - if err != nil { - return nil, fmt.Errorf("failed to read CA. %s", err) - } - } else { - ca = []byte(clientTLS.CA) - } - if !caPool.AppendCertsFromPEM(ca) { - return nil, fmt.Errorf("failed to parse CA") - } - if clientTLS.CAOptional { - clientAuth = tls.VerifyClientCertIfGiven - } else { - clientAuth = tls.RequireAndVerifyClientCert - } - } - - cert := tls.Certificate{} - _, errKeyIsFile := os.Stat(clientTLS.Key) - - if !clientTLS.InsecureSkipVerify && (len(clientTLS.Cert) == 0 || len(clientTLS.Key) == 0) { - return nil, fmt.Errorf("TLS Certificate or Key file must be set when TLS configuration is created") - } - - if len(clientTLS.Cert) > 0 && len(clientTLS.Key) > 0 { - if _, errCertIsFile := os.Stat(clientTLS.Cert); errCertIsFile == nil { - if errKeyIsFile == nil { - cert, err = tls.LoadX509KeyPair(clientTLS.Cert, clientTLS.Key) - if err != nil { - return nil, fmt.Errorf("failed to load TLS keypair: %v", err) - } - } else { - return nil, fmt.Errorf("tls cert is a file, but tls key is not") - } - } else { - if errKeyIsFile != nil { - cert, err = tls.X509KeyPair([]byte(clientTLS.Cert), []byte(clientTLS.Key)) - if err != nil { - return nil, fmt.Errorf("failed to load TLS keypair: %v", err) - - } - } else { - return nil, fmt.Errorf("TLS key is a file, but tls cert is not") - } - } - } - - TLSConfig := &tls.Config{ - Certificates: []tls.Certificate{cert}, - RootCAs: caPool, - InsecureSkipVerify: clientTLS.InsecureSkipVerify, - ClientAuth: clientAuth, - } - return TLSConfig, nil -} - -// HTTPCodeRanges holds HTTP code ranges -type HTTPCodeRanges [][2]int - -// NewHTTPCodeRanges creates HTTPCodeRanges from a given []string. -// Break out the http status code ranges into a low int and high int -// for ease of use at runtime -func NewHTTPCodeRanges(strBlocks []string) (HTTPCodeRanges, error) { - var blocks HTTPCodeRanges - for _, block := range strBlocks { - codes := strings.Split(block, "-") - // if only a single HTTP code was configured, assume the best and create the correct configuration on the user's behalf - if len(codes) == 1 { - codes = append(codes, codes[0]) - } - lowCode, err := strconv.Atoi(codes[0]) - if err != nil { - return nil, err - } - highCode, err := strconv.Atoi(codes[1]) - if err != nil { - return nil, err - } - blocks = append(blocks, [2]int{lowCode, highCode}) - } - return blocks, nil -} - -// Contains tests whether the passed status code is within -// one of its HTTP code ranges. -func (h HTTPCodeRanges) Contains(statusCode int) bool { - for _, block := range h { - if statusCode >= block[0] && statusCode <= block[1] { - return true - } - } - return false -} - -// IPStrategy Configuration to choose the IP selection strategy. -type IPStrategy struct { - Depth int `json:"depth,omitempty" export:"true"` - ExcludedIPs []string `json:"excludedIPs,omitempty"` -} - -// Get an IP selection strategy -// if nil return the RemoteAddr strategy -// else return a strategy base on the configuration using the X-Forwarded-For Header. -// Depth override the ExcludedIPs -func (s *IPStrategy) Get() (ip.Strategy, error) { - if s == nil { - return &ip.RemoteAddrStrategy{}, nil - } - - if s.Depth > 0 { - return &ip.DepthStrategy{ - Depth: s.Depth, - }, nil - } - - if len(s.ExcludedIPs) > 0 { - checker, err := ip.NewChecker(s.ExcludedIPs) - if err != nil { - return nil, err - } - return &ip.CheckerStrategy{ - Checker: checker, - }, nil - } - - return &ip.RemoteAddrStrategy{}, nil -} - -// TLSClientHeaders holds the TLS client cert headers configuration. -type TLSClientHeaders struct { - PEM bool `description:"Enable header with escaped client pem" json:"pem"` - Infos *TLSClientCertificateInfos `description:"Enable header with configured client cert infos" json:"infos,omitempty"` -} - -// TLSClientCertificateInfos holds the client TLS certificate infos configuration -type TLSClientCertificateInfos struct { - NotAfter bool `description:"Add NotAfter info in header" json:"notAfter"` - NotBefore bool `description:"Add NotBefore info in header" json:"notBefore"` - Sans bool `description:"Add Sans info in header" json:"sans"` - Subject *TLSCLientCertificateDNInfos `description:"Add Subject info in header" json:"subject,omitempty"` - Issuer *TLSCLientCertificateDNInfos `description:"Add Issuer info in header" json:"issuer,omitempty"` -} - -// TLSCLientCertificateDNInfos holds the client TLS certificate distinguished name infos configuration -// cf https://tools.ietf.org/html/rfc3739 -type TLSCLientCertificateDNInfos struct { - Country bool `description:"Add Country info in header" json:"country"` - Province bool `description:"Add Province info in header" json:"province"` - Locality bool `description:"Add Locality info in header" json:"locality"` - Organization bool `description:"Add Organization info in header" json:"organization"` - CommonName bool `description:"Add CommonName info in header" json:"commonName"` - SerialNumber bool `description:"Add SerialNumber info in header" json:"serialNumber"` - DomainComponent bool `description:"Add Domain Component info in header" json:"domainComponent"` -} diff --git a/old/types/types_test.go b/old/types/types_test.go deleted file mode 100644 index b4931ef9b..000000000 --- a/old/types/types_test.go +++ /dev/null @@ -1,186 +0,0 @@ -package types - -import ( - "fmt" - "testing" - - "github.com/containous/traefik/pkg/ip" - "github.com/stretchr/testify/assert" - "github.com/stretchr/testify/require" -) - -func TestHeaders_ShouldReturnFalseWhenNotHasCustomHeadersDefined(t *testing.T) { - headers := Headers{} - - assert.False(t, headers.HasCustomHeadersDefined()) -} - -func TestHeaders_ShouldReturnTrueWhenHasCustomHeadersDefined(t *testing.T) { - headers := Headers{} - - headers.CustomRequestHeaders = map[string]string{ - "foo": "bar", - } - - assert.True(t, headers.HasCustomHeadersDefined()) -} - -func TestHeaders_ShouldReturnFalseWhenNotHasSecureHeadersDefined(t *testing.T) { - headers := Headers{} - - assert.False(t, headers.HasSecureHeadersDefined()) -} - -func TestHeaders_ShouldReturnTrueWhenHasSecureHeadersDefined(t *testing.T) { - headers := Headers{} - - headers.SSLRedirect = true - - assert.True(t, headers.HasSecureHeadersDefined()) -} - -func TestNewHTTPCodeRanges(t *testing.T) { - testCases := []struct { - desc string - strBlocks []string - expected HTTPCodeRanges - errExpected bool - }{ - { - desc: "Should return 2 code range", - strBlocks: []string{ - "200-500", - "502", - }, - expected: HTTPCodeRanges{[2]int{200, 500}, [2]int{502, 502}}, - errExpected: false, - }, - { - desc: "Should return 2 code range", - strBlocks: []string{ - "200-500", - "205", - }, - expected: HTTPCodeRanges{[2]int{200, 500}, [2]int{205, 205}}, - errExpected: false, - }, - { - desc: "invalid code range", - strBlocks: []string{ - "200-500", - "aaa", - }, - expected: nil, - errExpected: true, - }, - { - desc: "invalid code range nil", - strBlocks: nil, - expected: nil, - errExpected: false, - }, - } - - for _, test := range testCases { - test := test - t.Run(test.desc, func(t *testing.T) { - t.Parallel() - - actual, err := NewHTTPCodeRanges(test.strBlocks) - assert.Equal(t, test.expected, actual) - if test.errExpected { - assert.Error(t, err) - } else { - assert.NoError(t, err) - } - }) - } -} - -func TestHTTPCodeRanges_Contains(t *testing.T) { - testCases := []struct { - strBlocks []string - statusCode int - contains bool - }{ - { - strBlocks: []string{"200-299"}, - statusCode: 200, - contains: true, - }, - { - strBlocks: []string{"200"}, - statusCode: 200, - contains: true, - }, - { - strBlocks: []string{"201"}, - statusCode: 200, - contains: false, - }, - { - strBlocks: []string{"200-299", "500-599"}, - statusCode: 400, - contains: false, - }, - } - - for _, test := range testCases { - test := test - testName := fmt.Sprintf("%q contains %d", test.strBlocks, test.statusCode) - t.Run(testName, func(t *testing.T) { - t.Parallel() - - httpCodeRanges, err := NewHTTPCodeRanges(test.strBlocks) - assert.NoError(t, err) - - assert.Equal(t, test.contains, httpCodeRanges.Contains(test.statusCode)) - }) - } -} - -func TestIPStrategy_Get(t *testing.T) { - testCases := []struct { - desc string - ipStrategy *IPStrategy - expected ip.Strategy - }{ - { - desc: "IPStrategy is nil", - expected: &ip.RemoteAddrStrategy{}, - }, - { - desc: "IPStrategy is not nil but with no values", - ipStrategy: &IPStrategy{}, - expected: &ip.RemoteAddrStrategy{}, - }, - { - desc: "IPStrategy with Depth", - ipStrategy: &IPStrategy{Depth: 3}, - expected: &ip.DepthStrategy{}, - }, - { - desc: "IPStrategy with ExcludedIPs", - ipStrategy: &IPStrategy{ExcludedIPs: []string{"10.0.0.1"}}, - expected: &ip.CheckerStrategy{}, - }, - { - desc: "IPStrategy with ExcludedIPs and Depth", - ipStrategy: &IPStrategy{Depth: 4, ExcludedIPs: []string{"10.0.0.1"}}, - expected: &ip.DepthStrategy{}, - }, - } - - for _, test := range testCases { - test := test - t.Run(test.desc, func(t *testing.T) { - t.Parallel() - - strategy, err := test.ipStrategy.Get() - require.NoError(t, err) - - assert.IsType(t, test.expected, strategy) - - }) - } -} diff --git a/pkg/collector/collector.go b/pkg/collector/collector.go index a5b1d03f9..e69385fcd 100644 --- a/pkg/collector/collector.go +++ b/pkg/collector/collector.go @@ -9,7 +9,6 @@ import ( "strconv" "time" - "github.com/containous/traefik/old/configuration" "github.com/containous/traefik/pkg/anonymize" "github.com/containous/traefik/pkg/config/static" "github.com/containous/traefik/pkg/log" @@ -63,7 +62,7 @@ func Collect(staticConfiguration *static.Configuration) error { func makeHTTPClient() *http.Client { dialer := &net.Dialer{ - Timeout: configuration.DefaultDialTimeout, + Timeout: 30 * time.Second, KeepAlive: 30 * time.Second, DualStack: true, } diff --git a/pkg/config/static/static_config.go b/pkg/config/static/static_config.go index 41bfde88c..ebfeaad45 100644 --- a/pkg/config/static/static_config.go +++ b/pkg/config/static/static_config.go @@ -6,16 +6,6 @@ import ( "time" "github.com/containous/flaeg/parse" - "github.com/containous/traefik/old/provider/boltdb" - "github.com/containous/traefik/old/provider/consul" - "github.com/containous/traefik/old/provider/consulcatalog" - "github.com/containous/traefik/old/provider/dynamodb" - "github.com/containous/traefik/old/provider/ecs" - "github.com/containous/traefik/old/provider/etcd" - "github.com/containous/traefik/old/provider/eureka" - "github.com/containous/traefik/old/provider/mesos" - "github.com/containous/traefik/old/provider/rancher" - "github.com/containous/traefik/old/provider/zk" "github.com/containous/traefik/pkg/log" "github.com/containous/traefik/pkg/ping" acmeprovider "github.com/containous/traefik/pkg/provider/acme" @@ -129,23 +119,13 @@ type Tracing struct { // Providers contains providers configuration type Providers struct { - ProvidersThrottleDuration parse.Duration `description:"Backends throttle duration: minimum duration between 2 events from providers before applying a new configuration. It avoids unnecessary reloads if multiples events are sent in a short amount of time." export:"true"` - Docker *docker.Provider `description:"Enable Docker backend with default settings" export:"true"` - File *file.Provider `description:"Enable File backend with default settings" export:"true"` - Marathon *marathon.Provider `description:"Enable Marathon backend with default settings" export:"true"` - Consul *consul.Provider `description:"Enable Consul backend with default settings" export:"true"` - ConsulCatalog *consulcatalog.Provider `description:"Enable Consul catalog backend with default settings" export:"true"` - Etcd *etcd.Provider `description:"Enable Etcd backend with default settings" export:"true"` - Zookeeper *zk.Provider `description:"Enable Zookeeper backend with default settings" export:"true"` - Boltdb *boltdb.Provider `description:"Enable Boltdb backend with default settings" export:"true"` - Kubernetes *ingress.Provider `description:"Enable Kubernetes backend with default settings" export:"true"` - KubernetesCRD *crd.Provider `description:"Enable Kubernetes backend with default settings" export:"true"` - Mesos *mesos.Provider `description:"Enable Mesos backend with default settings" export:"true"` - Eureka *eureka.Provider `description:"Enable Eureka backend with default settings" export:"true"` - ECS *ecs.Provider `description:"Enable ECS backend with default settings" export:"true"` - Rancher *rancher.Provider `description:"Enable Rancher backend with default settings" export:"true"` - DynamoDB *dynamodb.Provider `description:"Enable DynamoDB backend with default settings" export:"true"` - Rest *rest.Provider `description:"Enable Rest backend with default settings" export:"true"` + ProvidersThrottleDuration parse.Duration `description:"Backends throttle duration: minimum duration between 2 events from providers before applying a new configuration. It avoids unnecessary reloads if multiples events are sent in a short amount of time." export:"true"` + Docker *docker.Provider `description:"Enable Docker backend with default settings" export:"true"` + File *file.Provider `description:"Enable File backend with default settings" export:"true"` + Marathon *marathon.Provider `description:"Enable Marathon backend with default settings" export:"true"` + Kubernetes *ingress.Provider `description:"Enable Kubernetes backend with default settings" export:"true"` + KubernetesCRD *crd.Provider `description:"Enable Kubernetes backend with default settings" export:"true"` + Rest *rest.Provider `description:"Enable Rest backend with default settings" export:"true"` } // SetEffectiveConfiguration adds missing configuration parameters derived from existing ones. @@ -188,28 +168,6 @@ func (c *Configuration) SetEffectiveConfiguration(configFile string) { } } - if c.Providers.Rancher != nil { - // Ensure backwards compatibility for now - if len(c.Providers.Rancher.AccessKey) > 0 || - len(c.Providers.Rancher.Endpoint) > 0 || - len(c.Providers.Rancher.SecretKey) > 0 { - - if c.Providers.Rancher.API == nil { - c.Providers.Rancher.API = &rancher.APIConfiguration{ - AccessKey: c.Providers.Rancher.AccessKey, - SecretKey: c.Providers.Rancher.SecretKey, - Endpoint: c.Providers.Rancher.Endpoint, - } - } - log.Warn("Deprecated configuration found: rancher.[accesskey|secretkey|endpoint]. " + - "Please use rancher.api.[accesskey|secretkey|endpoint] instead.") - } - - if c.Providers.Rancher.Metadata != nil && len(c.Providers.Rancher.Metadata.Prefix) == 0 { - c.Providers.Rancher.Metadata.Prefix = "latest" - } - } - if c.Providers.Docker != nil { if c.Providers.Docker.SwarmModeRefreshSeconds <= 0 { c.Providers.Docker.SwarmModeRefreshSeconds = 15 diff --git a/pkg/middlewares/accesslog/capture_response_writer.go b/pkg/middlewares/accesslog/capture_response_writer.go index 58fd368c4..200db3c12 100644 --- a/pkg/middlewares/accesslog/capture_response_writer.go +++ b/pkg/middlewares/accesslog/capture_response_writer.go @@ -6,7 +6,7 @@ import ( "net" "net/http" - "github.com/containous/traefik/old/middlewares" + "github.com/containous/traefik/pkg/middlewares" ) var ( diff --git a/pkg/middlewares/accesslog/logger.go b/pkg/middlewares/accesslog/logger.go index 0668acd2f..de184d1f8 100644 --- a/pkg/middlewares/accesslog/logger.go +++ b/pkg/middlewares/accesslog/logger.go @@ -53,7 +53,7 @@ type Handler struct { func WrapHandler(handler *Handler) alice.Constructor { return func(next http.Handler) (http.Handler, error) { return http.HandlerFunc(func(rw http.ResponseWriter, req *http.Request) { - handler.ServeHTTP(rw, req, next.ServeHTTP) + handler.ServeHTTP(rw, req, next) }), nil } } @@ -140,7 +140,7 @@ func GetLogData(req *http.Request) *LogData { return nil } -func (h *Handler) ServeHTTP(rw http.ResponseWriter, req *http.Request, next http.HandlerFunc) { +func (h *Handler) ServeHTTP(rw http.ResponseWriter, req *http.Request, next http.Handler) { now := time.Now().UTC() core := CoreLogData{ diff --git a/pkg/middlewares/accesslog/logger_test.go b/pkg/middlewares/accesslog/logger_test.go index 62d6cef73..2bfbd9224 100644 --- a/pkg/middlewares/accesslog/logger_test.go +++ b/pkg/middlewares/accesslog/logger_test.go @@ -67,7 +67,7 @@ func TestLogRotation(t *testing.T) { writeDone := make(chan bool) go func() { for i := 0; i < iterations; i++ { - logHandler.ServeHTTP(recorder, req, next) + logHandler.ServeHTTP(recorder, req, http.HandlerFunc(next)) if i == iterations/2 { halfDone <- true } @@ -624,7 +624,7 @@ func doLogging(t *testing.T, config *types.AccessLog) { }, } - logger.ServeHTTP(httptest.NewRecorder(), req, logWriterTestHandlerFunc) + logger.ServeHTTP(httptest.NewRecorder(), req, http.HandlerFunc(logWriterTestHandlerFunc)) } func logWriterTestHandlerFunc(rw http.ResponseWriter, r *http.Request) { diff --git a/pkg/middlewares/customerrors/custom_errors.go b/pkg/middlewares/customerrors/custom_errors.go index b7edd63f2..c86ee3416 100644 --- a/pkg/middlewares/customerrors/custom_errors.go +++ b/pkg/middlewares/customerrors/custom_errors.go @@ -11,10 +11,10 @@ import ( "strconv" "strings" - "github.com/containous/traefik/old/types" "github.com/containous/traefik/pkg/config" "github.com/containous/traefik/pkg/middlewares" "github.com/containous/traefik/pkg/tracing" + "github.com/containous/traefik/pkg/types" "github.com/opentracing/opentracing-go/ext" "github.com/sirupsen/logrus" "github.com/vulcand/oxy/utils" diff --git a/old/middlewares/pipelining/pipelining.go b/pkg/middlewares/pipelining/pipelining.go similarity index 74% rename from old/middlewares/pipelining/pipelining.go rename to pkg/middlewares/pipelining/pipelining.go index ce06d79c9..cd0340f3f 100644 --- a/old/middlewares/pipelining/pipelining.go +++ b/pkg/middlewares/pipelining/pipelining.go @@ -2,23 +2,32 @@ package pipelining import ( "bufio" + "context" "net" "net/http" + + "github.com/containous/traefik/pkg/middlewares" ) -// Pipelining returns a middleware -type Pipelining struct { +const ( + typeName = "Pipelining" +) + +// pipelining returns a middleware +type pipelining struct { next http.Handler } -// NewPipelining returns a new Pipelining instance -func NewPipelining(next http.Handler) *Pipelining { - return &Pipelining{ +// New returns a new pipelining instance +func New(ctx context.Context, next http.Handler, name string) http.Handler { + middlewares.GetLogger(ctx, name, typeName).Debug("Creating middleware") + + return &pipelining{ next: next, } } -func (p *Pipelining) ServeHTTP(rw http.ResponseWriter, r *http.Request) { +func (p *pipelining) ServeHTTP(rw http.ResponseWriter, r *http.Request) { // https://github.com/golang/go/blob/3d59583836630cf13ec4bfbed977d27b1b7adbdc/src/net/http/server.go#L201-L218 if r.Method == http.MethodPut || r.Method == http.MethodPost { p.next.ServeHTTP(rw, r) diff --git a/old/middlewares/pipelining/pipelining_test.go b/pkg/middlewares/pipelining/pipelining_test.go similarity index 94% rename from old/middlewares/pipelining/pipelining_test.go rename to pkg/middlewares/pipelining/pipelining_test.go index b5b327a41..b26e5fc6b 100644 --- a/old/middlewares/pipelining/pipelining_test.go +++ b/pkg/middlewares/pipelining/pipelining_test.go @@ -1,6 +1,7 @@ package pipelining import ( + "context" "net/http" "net/http/httptest" "testing" @@ -16,7 +17,7 @@ func (r *recorderWithCloseNotify) CloseNotify() <-chan bool { panic("implement me") } -func TestNewPipelining(t *testing.T) { +func TestNew(t *testing.T) { testCases := []struct { desc string HTTPMethod string @@ -59,7 +60,7 @@ func TestNewPipelining(t *testing.T) { assert.Equal(t, test.implementCloseNotifier, ok) w.WriteHeader(http.StatusOK) }) - handler := NewPipelining(nextHandler) + handler := New(context.Background(), nextHandler, "pipe") req := httptest.NewRequest(test.HTTPMethod, "http://localhost", nil) diff --git a/pkg/provider/kubernetes/crd/client.go b/pkg/provider/kubernetes/crd/client.go index fec2c9d58..e17566841 100644 --- a/pkg/provider/kubernetes/crd/client.go +++ b/pkg/provider/kubernetes/crd/client.go @@ -6,7 +6,7 @@ import ( "io/ioutil" "time" - "github.com/containous/traefik/old/log" + "github.com/containous/traefik/pkg/log" "github.com/containous/traefik/pkg/provider/kubernetes/crd/generated/clientset/versioned" "github.com/containous/traefik/pkg/provider/kubernetes/crd/generated/informers/externalversions" "github.com/containous/traefik/pkg/provider/kubernetes/crd/traefik/v1alpha1" diff --git a/pkg/provider/kubernetes/ingress/client.go b/pkg/provider/kubernetes/ingress/client.go index 31221912d..61475bb1c 100644 --- a/pkg/provider/kubernetes/ingress/client.go +++ b/pkg/provider/kubernetes/ingress/client.go @@ -6,7 +6,7 @@ import ( "io/ioutil" "time" - "github.com/containous/traefik/old/log" + "github.com/containous/traefik/pkg/log" "github.com/containous/traefik/pkg/provider/kubernetes/k8s" corev1 "k8s.io/api/core/v1" extensionsv1beta1 "k8s.io/api/extensions/v1beta1" diff --git a/pkg/provider/marathon/marathon.go b/pkg/provider/marathon/marathon.go index 3f0f49d23..5e6268d1a 100644 --- a/pkg/provider/marathon/marathon.go +++ b/pkg/provider/marathon/marathon.go @@ -11,12 +11,12 @@ import ( "github.com/cenkalti/backoff" "github.com/containous/flaeg/parse" - "github.com/containous/traefik/old/types" "github.com/containous/traefik/pkg/config" "github.com/containous/traefik/pkg/job" "github.com/containous/traefik/pkg/log" "github.com/containous/traefik/pkg/provider" "github.com/containous/traefik/pkg/safe" + "github.com/containous/traefik/pkg/types" "github.com/gambol99/go-marathon" "github.com/sirupsen/logrus" ) @@ -120,7 +120,7 @@ func (p *Provider) Provide(configurationChan chan<- config.Message, pool *safe.P if len(p.DCOSToken) > 0 { confg.DCOSToken = p.DCOSToken } - TLSConfig, err := p.TLS.CreateTLSConfig() + TLSConfig, err := p.TLS.CreateTLSConfig(ctx) if err != nil { return err } diff --git a/pkg/provider/marathon/readiness.go b/pkg/provider/marathon/readiness.go index d6b92b315..af1228ae6 100644 --- a/pkg/provider/marathon/readiness.go +++ b/pkg/provider/marathon/readiness.go @@ -3,7 +3,7 @@ package marathon import ( "time" - "github.com/containous/traefik/old/log" + "github.com/containous/traefik/pkg/log" "github.com/gambol99/go-marathon" ) diff --git a/pkg/server/roundtripper.go b/pkg/server/roundtripper.go index 367c5b4f3..4e928f11d 100644 --- a/pkg/server/roundtripper.go +++ b/pkg/server/roundtripper.go @@ -7,7 +7,6 @@ import ( "net/http" "time" - "github.com/containous/traefik/old/configuration" "github.com/containous/traefik/pkg/config/static" "github.com/containous/traefik/pkg/log" traefiktls "github.com/containous/traefik/pkg/tls" @@ -35,7 +34,7 @@ func createHTTPTransport(transportConfiguration *static.ServersTransport) (*http } dialer := &net.Dialer{ - Timeout: configuration.DefaultDialTimeout, + Timeout: 30 * time.Second, KeepAlive: 30 * time.Second, DualStack: true, } diff --git a/pkg/server/service/service.go b/pkg/server/service/service.go index 577519b4f..da628a3d0 100644 --- a/pkg/server/service/service.go +++ b/pkg/server/service/service.go @@ -9,12 +9,12 @@ import ( "time" "github.com/containous/alice" - "github.com/containous/traefik/old/middlewares/pipelining" "github.com/containous/traefik/pkg/config" "github.com/containous/traefik/pkg/healthcheck" "github.com/containous/traefik/pkg/log" "github.com/containous/traefik/pkg/middlewares/accesslog" "github.com/containous/traefik/pkg/middlewares/emptybackendhandler" + "github.com/containous/traefik/pkg/middlewares/pipelining" "github.com/containous/traefik/pkg/server/cookie" "github.com/containous/traefik/pkg/server/internal" "github.com/vulcand/oxy/roundrobin" @@ -76,7 +76,7 @@ func (m *Manager) getLoadBalancerServiceHandler( return accesslog.NewFieldHandler(next, accesslog.ServiceName, serviceName, accesslog.AddServiceFields), nil } - handler, err := alice.New().Append(alHandler).Then(pipelining.NewPipelining(fwd)) + handler, err := alice.New().Append(alHandler).Then(pipelining.New(ctx, fwd, "pipelining")) if err != nil { return nil, err } @@ -100,10 +100,10 @@ func (m *Manager) LaunchHealthCheck() { for serviceName, balancers := range m.balancers { ctx := log.With(context.Background(), log.Str(log.ServiceName, serviceName)) - // FIXME aggregate + // TODO aggregate balancer := balancers[0] - // FIXME Should all the services handle healthcheck? Handle different types + // TODO Should all the services handle healthcheck? Handle different types service := m.configs[serviceName].LoadBalancer // Health Check