Merge branch v2.3 into master
This commit is contained in:
commit
2112de6f15
36 changed files with 864 additions and 188 deletions
32
CHANGELOG.md
32
CHANGELOG.md
|
@ -1,3 +1,35 @@
|
|||
## [v2.3.3](https://github.com/traefik/traefik/tree/v2.3.3) (2020-11-19)
|
||||
[All Commits](https://github.com/traefik/traefik/compare/v2.3.2...v2.3.3)
|
||||
|
||||
**Bug fixes:**
|
||||
- **[acme]** Update go-acme/lego to v4.1.0 ([#7526](https://github.com/traefik/traefik/pull/7526) by [ldez](https://github.com/ldez))
|
||||
- **[consulcatalog,ecs]** Fix missing allow-empty tag on ECS and Consul Catalog providers ([#7561](https://github.com/traefik/traefik/pull/7561) by [jspdown](https://github.com/jspdown))
|
||||
- **[consulcatalog]** consulcatalog to update before the first interval ([#7514](https://github.com/traefik/traefik/pull/7514) by [greut](https://github.com/greut))
|
||||
- **[consulcatalog]** Fix consul catalog panic when health and services are not in sync ([#7558](https://github.com/traefik/traefik/pull/7558) by [jspdown](https://github.com/jspdown))
|
||||
- **[ecs]** Translate configured server port into correct mapped host port ([#7480](https://github.com/traefik/traefik/pull/7480) by [alekitto](https://github.com/alekitto))
|
||||
- **[k8s,k8s/crd,k8s/ingress]** Filter out Helm secrets from informer caches ([#7562](https://github.com/traefik/traefik/pull/7562) by [jspdown](https://github.com/jspdown))
|
||||
- **[plugins]** Update Yaegi to v0.9.5 ([#7527](https://github.com/traefik/traefik/pull/7527) by [ldez](https://github.com/ldez))
|
||||
- **[plugins]** Update Yaegi to v0.9.7 ([#7569](https://github.com/traefik/traefik/pull/7569) by [kevinpollet](https://github.com/kevinpollet))
|
||||
- **[plugins]** Update Yaegi to v0.9.4 ([#7451](https://github.com/traefik/traefik/pull/7451) by [ldez](https://github.com/ldez))
|
||||
- **[tcp]** Ignore errors when setting keepalive period is not supported by the system ([#7410](https://github.com/traefik/traefik/pull/7410) by [tristan-weil](https://github.com/tristan-weil))
|
||||
- **[tcp]** Improve service name lookup on TCP routers ([#7370](https://github.com/traefik/traefik/pull/7370) by [ddtmachado](https://github.com/ddtmachado))
|
||||
- Improve anonymize configuration ([#7482](https://github.com/traefik/traefik/pull/7482) by [mmatur](https://github.com/mmatur))
|
||||
|
||||
**Documentation:**
|
||||
- **[ecs]** Add ECS menu to dynamic config reference ([#7501](https://github.com/traefik/traefik/pull/7501) by [kevinpollet](https://github.com/kevinpollet))
|
||||
- **[k8s,k8s/ingress]** Fix ingress documentation ([#7424](https://github.com/traefik/traefik/pull/7424) by [rtribotte](https://github.com/rtribotte))
|
||||
- **[k8s]** fix documentation ([#7469](https://github.com/traefik/traefik/pull/7469) by [jbdoumenjou](https://github.com/jbdoumenjou))
|
||||
- **[k8s]** Fix grammar in kubernetes ingress controller documentation ([#7565](https://github.com/traefik/traefik/pull/7565) by [ivorscott](https://github.com/ivorscott))
|
||||
- **[logs]** Clarify time-based field units ([#7447](https://github.com/traefik/traefik/pull/7447) by [tomtastic](https://github.com/tomtastic))
|
||||
- **[middleware]** Forwardauth headers ([#7506](https://github.com/traefik/traefik/pull/7506) by [w4tsn](https://github.com/w4tsn))
|
||||
- **[provider]** fix typo in providers overview documentation ([#7441](https://github.com/traefik/traefik/pull/7441) by [pirey](https://github.com/pirey))
|
||||
- **[tls]** Fix docs for TLS ([#7541](https://github.com/traefik/traefik/pull/7541) by [james426759](https://github.com/james426759))
|
||||
- fix: exclude protected link from doc verify ([#7477](https://github.com/traefik/traefik/pull/7477) by [jbdoumenjou](https://github.com/jbdoumenjou))
|
||||
- Add missed tls config for yaml example ([#7450](https://github.com/traefik/traefik/pull/7450) by [andrew-demb](https://github.com/andrew-demb))
|
||||
- Resolve broken URLs causing make docs to fail ([#7444](https://github.com/traefik/traefik/pull/7444) by [tomtastic](https://github.com/tomtastic))
|
||||
- Fix Traefik Proxy product nav in docs ([#7523](https://github.com/traefik/traefik/pull/7523) by [PCM2](https://github.com/PCM2))
|
||||
- add links to contributors guide ([#7435](https://github.com/traefik/traefik/pull/7435) by [notsureifkevin](https://github.com/notsureifkevin))
|
||||
|
||||
## [v2.3.2](https://github.com/traefik/traefik/tree/v2.3.2) (2020-10-19)
|
||||
[All Commits](https://github.com/traefik/traefik/compare/v2.3.1...v2.3.2)
|
||||
|
||||
|
|
|
@ -64,7 +64,7 @@ tls:
|
|||
!!! important "Restriction"
|
||||
|
||||
Any store definition other than the default one (named `default`) will be ignored,
|
||||
and there is thefore only one globally available TLS store.
|
||||
and there is therefore only one globally available TLS store.
|
||||
|
||||
In the `tls.certificates` section, a list of stores can then be specified to indicate where the certificates should be stored:
|
||||
|
||||
|
|
|
@ -61,6 +61,18 @@ http:
|
|||
address: "https://example.com/auth"
|
||||
```
|
||||
|
||||
## Forward-Request Headers
|
||||
|
||||
The following request properties are provided to the forward-auth target endpoint as `X-Forwarded-` headers.
|
||||
|
||||
| Property | Forward-Request Header |
|
||||
|-------------------|------------------------|
|
||||
| HTTP Method | X-Forwarded-Method |
|
||||
| Protocol | X-Forwarded-Proto |
|
||||
| Host | X-Forwarded-Host |
|
||||
| Request URI | X-Forwarded-Uri |
|
||||
| Source IP-Address | X-Forwarded-For |
|
||||
|
||||
## Configuration Options
|
||||
|
||||
### `address`
|
||||
|
|
|
@ -13,18 +13,15 @@ Attach labels to your ECS containers and let Traefik do the rest!
|
|||
|
||||
```toml tab="File (TOML)"
|
||||
[providers.ecs]
|
||||
clusters = ["default"]
|
||||
```
|
||||
|
||||
```yaml tab="File (YAML)"
|
||||
providers:
|
||||
ecs:
|
||||
clusters:
|
||||
- default
|
||||
ecs: {}
|
||||
```
|
||||
|
||||
```bash tab="CLI"
|
||||
--providers.ecs.clusters=default
|
||||
--providers.ecs=true
|
||||
```
|
||||
|
||||
## Policy
|
||||
|
|
|
@ -177,26 +177,32 @@ _Optional,Default: empty (process all resources)_
|
|||
|
||||
```toml tab="File (TOML)"
|
||||
[providers.kubernetesCRD]
|
||||
labelselector = "A and not B"
|
||||
labelselector = "app=traefik"
|
||||
# ...
|
||||
```
|
||||
|
||||
```yaml tab="File (YAML)"
|
||||
providers:
|
||||
kubernetesCRD:
|
||||
labelselector: "A and not B"
|
||||
labelselector: "app=traefik"
|
||||
# ...
|
||||
```
|
||||
|
||||
```bash tab="CLI"
|
||||
--providers.kubernetescrd.labelselector="A and not B"
|
||||
--providers.kubernetescrd.labelselector="app=traefik"
|
||||
```
|
||||
|
||||
By default, Traefik processes all resource objects in the configured namespaces.
|
||||
A label selector can be defined to filter on specific resource objects only.
|
||||
A label selector can be defined to filter on specific resource objects only,
|
||||
this will apply only on Traefik [Custom Resources](../routing/providers/kubernetes-crd.md#custom-resource-definition-crd)
|
||||
and has no effect on Kubernetes `Secrets`, `Endpoints` and `Services`.
|
||||
|
||||
See [label-selectors](https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/#label-selectors) for details.
|
||||
|
||||
!!! warning
|
||||
|
||||
As the LabelSelector is applied to all Traefik Custom Resources, they all must match the filter.
|
||||
|
||||
### `ingressClass`
|
||||
|
||||
_Optional, Default: empty_
|
||||
|
|
|
@ -4,7 +4,7 @@ The Kubernetes Ingress Controller.
|
|||
{: .subtitle }
|
||||
|
||||
The Traefik Kubernetes Ingress provider is a Kubernetes Ingress controller; that is to say,
|
||||
it manages access to a cluster services by supporting the [Ingress](https://kubernetes.io/docs/concepts/services-networking/ingress/) specification.
|
||||
it manages access to cluster services by supporting the [Ingress](https://kubernetes.io/docs/concepts/services-networking/ingress/) specification.
|
||||
|
||||
## Routing Configuration
|
||||
|
||||
|
@ -212,23 +212,23 @@ _Optional,Default: empty (process all Ingresses)_
|
|||
|
||||
```toml tab="File (TOML)"
|
||||
[providers.kubernetesIngress]
|
||||
labelSelector = "A and not B"
|
||||
labelSelector = "app=traefik"
|
||||
# ...
|
||||
```
|
||||
|
||||
```yaml tab="File (YAML)"
|
||||
providers:
|
||||
kubernetesIngress:
|
||||
labelselector: "A and not B"
|
||||
labelselector: "app=traefik"
|
||||
# ...
|
||||
```
|
||||
|
||||
```bash tab="CLI"
|
||||
--providers.kubernetesingress.labelselector="A and not B"
|
||||
--providers.kubernetesingress.labelselector="app=traefik"
|
||||
```
|
||||
|
||||
By default, Traefik processes all Ingress objects in the configured namespaces.
|
||||
A label selector can be defined to filter on specific Ingress objects only.
|
||||
By default, Traefik processes all `Ingress` objects in the configured namespaces.
|
||||
A label selector can be defined to filter on specific `Ingress` objects only.
|
||||
|
||||
See [label-selectors](https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/#label-selectors) for details.
|
||||
|
||||
|
|
|
@ -330,6 +330,9 @@ TLS key
|
|||
`--providers.consul.username`:
|
||||
KV Username
|
||||
|
||||
`--providers.consulcatalog`:
|
||||
Enable ConsulCatalog backend with default settings. (Default: ```false```)
|
||||
|
||||
`--providers.consulcatalog.cache`:
|
||||
Use local agent caching for catalog reads. (Default: ```false```)
|
||||
|
||||
|
@ -438,6 +441,9 @@ Use the ip address from the bound port, rather than from the inner network. (Def
|
|||
`--providers.docker.watch`:
|
||||
Watch Docker Swarm events. (Default: ```true```)
|
||||
|
||||
`--providers.ecs`:
|
||||
Enable AWS ECS backend with default settings. (Default: ```false```)
|
||||
|
||||
`--providers.ecs.accesskeyid`:
|
||||
The AWS credentials access key to use for making requests
|
||||
|
||||
|
|
|
@ -303,6 +303,9 @@ Terminating status code (Default: ```503```)
|
|||
`TRAEFIK_PROVIDERS_CONSUL`:
|
||||
Enable Consul backend with default settings. (Default: ```false```)
|
||||
|
||||
`TRAEFIK_PROVIDERS_CONSULCATALOG`:
|
||||
Enable ConsulCatalog backend with default settings. (Default: ```false```)
|
||||
|
||||
`TRAEFIK_PROVIDERS_CONSULCATALOG_CACHE`:
|
||||
Use local agent caching for catalog reads. (Default: ```false```)
|
||||
|
||||
|
@ -438,6 +441,9 @@ Use the ip address from the bound port, rather than from the inner network. (Def
|
|||
`TRAEFIK_PROVIDERS_DOCKER_WATCH`:
|
||||
Watch Docker Swarm events. (Default: ```true```)
|
||||
|
||||
`TRAEFIK_PROVIDERS_ECS`:
|
||||
Enable AWS ECS backend with default settings. (Default: ```false```)
|
||||
|
||||
`TRAEFIK_PROVIDERS_ECS_ACCESSKEYID`:
|
||||
The AWS credentials access key to use for making requests
|
||||
|
||||
|
|
2
go.mod
2
go.mod
|
@ -72,7 +72,7 @@ require (
|
|||
github.com/stvp/go-udp-testing v0.0.0-20191102171040-06b61409b154
|
||||
github.com/tinylib/msgp v1.0.2 // indirect
|
||||
github.com/traefik/paerser v0.1.0
|
||||
github.com/traefik/yaegi v0.9.5
|
||||
github.com/traefik/yaegi v0.9.7
|
||||
github.com/uber/jaeger-client-go v2.25.0+incompatible
|
||||
github.com/uber/jaeger-lib v2.2.0+incompatible
|
||||
github.com/unrolled/render v1.0.2
|
||||
|
|
4
go.sum
4
go.sum
|
@ -788,8 +788,8 @@ github.com/tmc/grpc-websocket-proxy v0.0.0-20190109142713-0ad062ec5ee5 h1:LnC5Kc
|
|||
github.com/tmc/grpc-websocket-proxy v0.0.0-20190109142713-0ad062ec5ee5/go.mod h1:ncp9v5uamzpCO7NfCPTXjqaC+bZgJeR0sMTm6dMHP7U=
|
||||
github.com/traefik/paerser v0.1.0 h1:B4v1tbvd8YnHsA7spwHKEWJoGrRP+2jYpIozsCMHhl0=
|
||||
github.com/traefik/paerser v0.1.0/go.mod h1:yYnAgdEC2wJH5CgG75qGWC8SsFDEapg09o9RrA6FfrE=
|
||||
github.com/traefik/yaegi v0.9.5 h1:mRJtmV6t/wecIq6Gfs0DpdSC2AjFpPRjoiBXP03OIz0=
|
||||
github.com/traefik/yaegi v0.9.5/go.mod h1:FAYnRlZyuVlEkvnkHq3bvJ1lW5be6XuwgLdkYgYG6Lk=
|
||||
github.com/traefik/yaegi v0.9.7 h1:CbeKjEhy3DoSC8xC4TQF2Mhmd7u3Cjqluz1//x6Vtcs=
|
||||
github.com/traefik/yaegi v0.9.7/go.mod h1:FAYnRlZyuVlEkvnkHq3bvJ1lW5be6XuwgLdkYgYG6Lk=
|
||||
github.com/transip/gotransip/v6 v6.2.0 h1:0Z+qVsyeiQdWfcAUeJyF0IEKAPvhJwwpwPi2WGtBIiE=
|
||||
github.com/transip/gotransip/v6 v6.2.0/go.mod h1:pQZ36hWWRahCUXkFWlx9Hs711gLd8J4qdgLdRzmtY+g=
|
||||
github.com/tv42/httpunix v0.0.0-20150427012821-b75d8614f926/go.mod h1:9ESjWnEqriFuLhtthL60Sar/7RFoluCcXsuvEwTV5KM=
|
||||
|
|
|
@ -3,6 +3,8 @@ kind: Ingress
|
|||
metadata:
|
||||
name: test.ingress
|
||||
namespace: default
|
||||
labels:
|
||||
app: traefik
|
||||
|
||||
spec:
|
||||
rules:
|
||||
|
|
|
@ -3,6 +3,8 @@ kind: IngressRoute
|
|||
metadata:
|
||||
name: test.route
|
||||
namespace: default
|
||||
labels:
|
||||
app: traefik
|
||||
|
||||
spec:
|
||||
entryPoints:
|
||||
|
|
|
@ -3,6 +3,8 @@ kind: TLSOption
|
|||
metadata:
|
||||
name: mytlsoption
|
||||
namespace: default
|
||||
labels:
|
||||
app: traefik
|
||||
|
||||
spec:
|
||||
minVersion: VersionTLS12
|
||||
|
|
|
@ -3,6 +3,8 @@ kind: TLSStore
|
|||
metadata:
|
||||
name: mytlsstore
|
||||
namespace: default
|
||||
labels:
|
||||
app: traefik
|
||||
|
||||
spec:
|
||||
defaultCertificate:
|
||||
|
|
|
@ -50,6 +50,8 @@ kind: IngressRoute
|
|||
metadata:
|
||||
name: api.route
|
||||
namespace: default
|
||||
labels:
|
||||
app: traefik
|
||||
|
||||
spec:
|
||||
entryPoints:
|
||||
|
|
19
integration/fixtures/k8s_crd_label_selector.toml
Normal file
19
integration/fixtures/k8s_crd_label_selector.toml
Normal file
|
@ -0,0 +1,19 @@
|
|||
[global]
|
||||
checkNewVersion = false
|
||||
sendAnonymousUsage = false
|
||||
|
||||
[log]
|
||||
level = "DEBUG"
|
||||
|
||||
[api]
|
||||
|
||||
[entryPoints]
|
||||
[entryPoints.footcp]
|
||||
address = ":8093"
|
||||
[entryPoints.fooudp]
|
||||
address = ":8090/udp"
|
||||
[entryPoints.web]
|
||||
address = ":8000"
|
||||
|
||||
[providers.kubernetesCRD]
|
||||
labelSelector = "app=traefik"
|
16
integration/fixtures/k8s_ingress_label_selector.toml
Normal file
16
integration/fixtures/k8s_ingress_label_selector.toml
Normal file
|
@ -0,0 +1,16 @@
|
|||
[global]
|
||||
checkNewVersion = false
|
||||
sendAnonymousUsage = false
|
||||
|
||||
[log]
|
||||
level = "DEBUG"
|
||||
|
||||
[api]
|
||||
insecure = true
|
||||
|
||||
[entryPoints]
|
||||
[entryPoints.web]
|
||||
address = ":8000"
|
||||
|
||||
[providers.kubernetesIngress]
|
||||
labelSelector = "app=traefik"
|
|
@ -74,6 +74,17 @@ func (s *K8sSuite) TestIngressConfiguration(c *check.C) {
|
|||
testConfiguration(c, "testdata/rawdata-ingress.json", "8080")
|
||||
}
|
||||
|
||||
func (s *K8sSuite) TestIngressLabelSelector(c *check.C) {
|
||||
cmd, display := s.traefikCmd(withConfigFile("fixtures/k8s_ingress_label_selector.toml"))
|
||||
defer display(c)
|
||||
|
||||
err := cmd.Start()
|
||||
c.Assert(err, checker.IsNil)
|
||||
defer s.killCmd(cmd)
|
||||
|
||||
testConfiguration(c, "testdata/rawdata-ingress-label-selector.json", "8080")
|
||||
}
|
||||
|
||||
func (s *K8sSuite) TestCRDConfiguration(c *check.C) {
|
||||
cmd, display := s.traefikCmd(withConfigFile("fixtures/k8s_crd.toml"))
|
||||
defer display(c)
|
||||
|
@ -85,6 +96,17 @@ func (s *K8sSuite) TestCRDConfiguration(c *check.C) {
|
|||
testConfiguration(c, "testdata/rawdata-crd.json", "8000")
|
||||
}
|
||||
|
||||
func (s *K8sSuite) TestCRDLabelSelector(c *check.C) {
|
||||
cmd, display := s.traefikCmd(withConfigFile("fixtures/k8s_crd_label_selector.toml"))
|
||||
defer display(c)
|
||||
|
||||
err := cmd.Start()
|
||||
c.Assert(err, checker.IsNil)
|
||||
defer s.killCmd(cmd)
|
||||
|
||||
testConfiguration(c, "testdata/rawdata-crd-label-selector.json", "8000")
|
||||
}
|
||||
|
||||
func testConfiguration(c *check.C, path, apiPort string) {
|
||||
err := try.GetRequest("http://127.0.0.1:"+apiPort+"/api/entrypoints", 20*time.Second, try.BodyContains(`"name":"web"`))
|
||||
c.Assert(err, checker.IsNil)
|
||||
|
|
65
integration/testdata/rawdata-crd-label-selector.json
vendored
Normal file
65
integration/testdata/rawdata-crd-label-selector.json
vendored
Normal file
|
@ -0,0 +1,65 @@
|
|||
{
|
||||
"routers": {
|
||||
"default-api-route-29f28a463fb5d5ba16d2@kubernetescrd": {
|
||||
"entryPoints": [
|
||||
"web"
|
||||
],
|
||||
"service": "api@internal",
|
||||
"rule": "PathPrefix(`/api`)",
|
||||
"status": "enabled",
|
||||
"using": [
|
||||
"web"
|
||||
]
|
||||
},
|
||||
"default-test-route-6b204d94623b3df4370c@kubernetescrd": {
|
||||
"entryPoints": [
|
||||
"web"
|
||||
],
|
||||
"service": "default-test-route-6b204d94623b3df4370c",
|
||||
"rule": "Host(`foo.com`) \u0026\u0026 PathPrefix(`/bar`)",
|
||||
"priority": 12,
|
||||
"tls": {
|
||||
"options": "default-mytlsoption"
|
||||
},
|
||||
"status": "enabled",
|
||||
"using": [
|
||||
"web"
|
||||
]
|
||||
}
|
||||
},
|
||||
"services": {
|
||||
"api@internal": {
|
||||
"status": "enabled",
|
||||
"usedBy": [
|
||||
"default-api-route-29f28a463fb5d5ba16d2@kubernetescrd"
|
||||
]
|
||||
},
|
||||
"dashboard@internal": {
|
||||
"status": "enabled"
|
||||
},
|
||||
"default-test-route-6b204d94623b3df4370c@kubernetescrd": {
|
||||
"loadBalancer": {
|
||||
"servers": [
|
||||
{
|
||||
"url": "http://10.42.0.3:80"
|
||||
},
|
||||
{
|
||||
"url": "http://10.42.0.4:80"
|
||||
}
|
||||
],
|
||||
"passHostHeader": true
|
||||
},
|
||||
"status": "enabled",
|
||||
"usedBy": [
|
||||
"default-test-route-6b204d94623b3df4370c@kubernetescrd"
|
||||
],
|
||||
"serverStatus": {
|
||||
"http://10.42.0.3:80": "UP",
|
||||
"http://10.42.0.4:80": "UP"
|
||||
}
|
||||
},
|
||||
"noop@internal": {
|
||||
"status": "enabled"
|
||||
}
|
||||
}
|
||||
}
|
106
integration/testdata/rawdata-ingress-label-selector.json
vendored
Normal file
106
integration/testdata/rawdata-ingress-label-selector.json
vendored
Normal file
|
@ -0,0 +1,106 @@
|
|||
{
|
||||
"routers": {
|
||||
"api@internal": {
|
||||
"entryPoints": [
|
||||
"traefik"
|
||||
],
|
||||
"service": "api@internal",
|
||||
"rule": "PathPrefix(`/api`)",
|
||||
"priority": 2147483646,
|
||||
"status": "enabled",
|
||||
"using": [
|
||||
"traefik"
|
||||
]
|
||||
},
|
||||
"dashboard@internal": {
|
||||
"entryPoints": [
|
||||
"traefik"
|
||||
],
|
||||
"middlewares": [
|
||||
"dashboard_redirect@internal",
|
||||
"dashboard_stripprefix@internal"
|
||||
],
|
||||
"service": "dashboard@internal",
|
||||
"rule": "PathPrefix(`/`)",
|
||||
"priority": 2147483645,
|
||||
"status": "enabled",
|
||||
"using": [
|
||||
"traefik"
|
||||
]
|
||||
},
|
||||
"test-ingress-default-whoami-test-whoami@kubernetes": {
|
||||
"entryPoints": [
|
||||
"web"
|
||||
],
|
||||
"service": "default-whoami-http",
|
||||
"rule": "Host(`whoami.test`) \u0026\u0026 PathPrefix(`/whoami`)",
|
||||
"status": "enabled",
|
||||
"using": [
|
||||
"web"
|
||||
]
|
||||
}
|
||||
},
|
||||
"middlewares": {
|
||||
"dashboard_redirect@internal": {
|
||||
"redirectRegex": {
|
||||
"regex": "^(http:\\/\\/(\\[[\\w:.]+\\]|[\\w\\._-]+)(:\\d+)?)\\/$",
|
||||
"replacement": "${1}/dashboard/",
|
||||
"permanent": true
|
||||
},
|
||||
"status": "enabled",
|
||||
"usedBy": [
|
||||
"dashboard@internal"
|
||||
]
|
||||
},
|
||||
"dashboard_stripprefix@internal": {
|
||||
"stripPrefix": {
|
||||
"prefixes": [
|
||||
"/dashboard/",
|
||||
"/dashboard"
|
||||
]
|
||||
},
|
||||
"status": "enabled",
|
||||
"usedBy": [
|
||||
"dashboard@internal"
|
||||
]
|
||||
}
|
||||
},
|
||||
"services": {
|
||||
"api@internal": {
|
||||
"status": "enabled",
|
||||
"usedBy": [
|
||||
"api@internal"
|
||||
]
|
||||
},
|
||||
"dashboard@internal": {
|
||||
"status": "enabled",
|
||||
"usedBy": [
|
||||
"dashboard@internal"
|
||||
]
|
||||
},
|
||||
"default-whoami-http@kubernetes": {
|
||||
"loadBalancer": {
|
||||
"servers": [
|
||||
{
|
||||
"url": "http://10.42.0.2:80"
|
||||
},
|
||||
{
|
||||
"url": "http://10.42.0.7:80"
|
||||
}
|
||||
],
|
||||
"passHostHeader": true
|
||||
},
|
||||
"status": "enabled",
|
||||
"usedBy": [
|
||||
"test-ingress-default-whoami-test-whoami@kubernetes"
|
||||
],
|
||||
"serverStatus": {
|
||||
"http://10.42.0.2:80": "UP",
|
||||
"http://10.42.0.7:80": "UP"
|
||||
}
|
||||
},
|
||||
"noop@internal": {
|
||||
"status": "enabled"
|
||||
}
|
||||
}
|
||||
}
|
|
@ -176,8 +176,8 @@ type Providers struct {
|
|||
KubernetesCRD *crd.Provider `description:"Enable Kubernetes backend with default settings." json:"kubernetesCRD,omitempty" toml:"kubernetesCRD,omitempty" yaml:"kubernetesCRD,omitempty" export:"true" label:"allowEmpty" file:"allowEmpty"`
|
||||
Rest *rest.Provider `description:"Enable Rest backend with default settings." json:"rest,omitempty" toml:"rest,omitempty" yaml:"rest,omitempty" export:"true" label:"allowEmpty" file:"allowEmpty"`
|
||||
Rancher *rancher.Provider `description:"Enable Rancher backend with default settings." json:"rancher,omitempty" toml:"rancher,omitempty" yaml:"rancher,omitempty" export:"true" label:"allowEmpty" file:"allowEmpty"`
|
||||
ConsulCatalog *consulcatalog.Provider `description:"Enable ConsulCatalog backend with default settings." json:"consulCatalog,omitempty" toml:"consulCatalog,omitempty" yaml:"consulCatalog,omitempty" export:"true"`
|
||||
Ecs *ecs.Provider `description:"Enable AWS ECS backend with default settings." json:"ecs,omitempty" toml:"ecs,omitempty" yaml:"ecs,omitempty" export:"true"`
|
||||
ConsulCatalog *consulcatalog.Provider `description:"Enable ConsulCatalog backend with default settings." json:"consulCatalog,omitempty" toml:"consulCatalog,omitempty" yaml:"consulCatalog,omitempty" label:"allowEmpty" file:"allowEmpty" export:"true"`
|
||||
Ecs *ecs.Provider `description:"Enable AWS ECS backend with default settings." json:"ecs,omitempty" toml:"ecs,omitempty" yaml:"ecs,omitempty" label:"allowEmpty" file:"allowEmpty" export:"true"`
|
||||
|
||||
Consul *consul.Provider `description:"Enable Consul backend with default settings." json:"consul,omitempty" toml:"consul,omitempty" yaml:"consul,omitempty" label:"allowEmpty" file:"allowEmpty" export:"true"`
|
||||
Etcd *etcd.Provider `description:"Enable Etcd backend with default settings." json:"etcd,omitempty" toml:"etcd,omitempty" yaml:"etcd,omitempty" label:"allowEmpty" file:"allowEmpty" export:"true"`
|
||||
|
|
|
@ -108,27 +108,28 @@ func (p *Provider) Provide(configurationChan chan<- dynamic.Message, pool *safe.
|
|||
|
||||
p.client, err = createClient(p.Endpoint)
|
||||
if err != nil {
|
||||
return fmt.Errorf("error create consul client, %w", err)
|
||||
return fmt.Errorf("unable to create consul client: %w", err)
|
||||
}
|
||||
|
||||
// get configuration at the provider's startup.
|
||||
err = p.loadConfiguration(routineCtx, configurationChan)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to get consul catalog data: %w", err)
|
||||
}
|
||||
|
||||
// Periodic refreshes.
|
||||
ticker := time.NewTicker(time.Duration(p.RefreshInterval))
|
||||
defer ticker.Stop()
|
||||
|
||||
for {
|
||||
select {
|
||||
case <-ticker.C:
|
||||
data, err := p.getConsulServicesData(routineCtx)
|
||||
err = p.loadConfiguration(routineCtx, configurationChan)
|
||||
if err != nil {
|
||||
logger.Errorf("error get consul catalog data, %v", err)
|
||||
return err
|
||||
return fmt.Errorf("failed to refresh consul catalog data: %w", err)
|
||||
}
|
||||
|
||||
configuration := p.buildConfiguration(routineCtx, data)
|
||||
configurationChan <- dynamic.Message{
|
||||
ProviderName: "consulcatalog",
|
||||
Configuration: configuration,
|
||||
}
|
||||
case <-routineCtx.Done():
|
||||
ticker.Stop()
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
@ -147,6 +148,20 @@ func (p *Provider) Provide(configurationChan chan<- dynamic.Message, pool *safe.
|
|||
return nil
|
||||
}
|
||||
|
||||
func (p *Provider) loadConfiguration(ctx context.Context, configurationChan chan<- dynamic.Message) error {
|
||||
data, err := p.getConsulServicesData(ctx)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
configurationChan <- dynamic.Message{
|
||||
ProviderName: "consulcatalog",
|
||||
Configuration: p.buildConfiguration(ctx, data),
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (p *Provider) getConsulServicesData(ctx context.Context) ([]itemData, error) {
|
||||
consulServiceNames, err := p.fetchServices(ctx)
|
||||
if err != nil {
|
||||
|
@ -155,17 +170,22 @@ func (p *Provider) getConsulServicesData(ctx context.Context) ([]itemData, error
|
|||
|
||||
var data []itemData
|
||||
for _, name := range consulServiceNames {
|
||||
consulServices, healthServices, err := p.fetchService(ctx, name)
|
||||
consulServices, statuses, err := p.fetchService(ctx, name)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
for i, consulService := range consulServices {
|
||||
for _, consulService := range consulServices {
|
||||
address := consulService.ServiceAddress
|
||||
if address == "" {
|
||||
address = consulService.Address
|
||||
}
|
||||
|
||||
status, exists := statuses[consulService.ID+consulService.ServiceID]
|
||||
if !exists {
|
||||
status = api.HealthAny
|
||||
}
|
||||
|
||||
item := itemData{
|
||||
ID: consulService.ServiceID,
|
||||
Node: consulService.Node,
|
||||
|
@ -174,7 +194,7 @@ func (p *Provider) getConsulServicesData(ctx context.Context) ([]itemData, error
|
|||
Port: strconv.Itoa(consulService.ServicePort),
|
||||
Labels: tagsToNeutralLabels(consulService.ServiceTags, p.Prefix),
|
||||
Tags: consulService.ServiceTags,
|
||||
Status: healthServices[i].Checks.AggregatedStatus(),
|
||||
Status: status,
|
||||
}
|
||||
|
||||
extraConf, err := p.getConfiguration(item)
|
||||
|
@ -190,13 +210,14 @@ func (p *Provider) getConsulServicesData(ctx context.Context) ([]itemData, error
|
|||
return data, nil
|
||||
}
|
||||
|
||||
func (p *Provider) fetchService(ctx context.Context, name string) ([]*api.CatalogService, []*api.ServiceEntry, error) {
|
||||
func (p *Provider) fetchService(ctx context.Context, name string) ([]*api.CatalogService, map[string]string, error) {
|
||||
var tagFilter string
|
||||
if !p.ExposedByDefault {
|
||||
tagFilter = p.Prefix + ".enable=true"
|
||||
}
|
||||
|
||||
opts := &api.QueryOptions{AllowStale: p.Stale, RequireConsistent: p.RequireConsistent, UseCache: p.Cache}
|
||||
opts = opts.WithContext(ctx)
|
||||
|
||||
consulServices, _, err := p.client.Catalog().Service(name, tagFilter, opts)
|
||||
if err != nil {
|
||||
|
@ -204,7 +225,22 @@ func (p *Provider) fetchService(ctx context.Context, name string) ([]*api.Catalo
|
|||
}
|
||||
|
||||
healthServices, _, err := p.client.Health().Service(name, tagFilter, false, opts)
|
||||
return consulServices, healthServices, err
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
|
||||
// Index status by service and node so it can be retrieved from a CatalogService even if the health and services
|
||||
// are not in sync.
|
||||
statuses := make(map[string]string)
|
||||
for _, health := range healthServices {
|
||||
if health.Service == nil || health.Node == nil {
|
||||
continue
|
||||
}
|
||||
|
||||
statuses[health.Node.ID+health.Service.ID] = health.Checks.AggregatedStatus()
|
||||
}
|
||||
|
||||
return consulServices, statuses, err
|
||||
}
|
||||
|
||||
func (p *Provider) fetchServices(ctx context.Context) ([]string, error) {
|
||||
|
|
|
@ -307,6 +307,13 @@ func (p Provider) getIPAddress(instance ecsInstance) string {
|
|||
|
||||
func getPort(instance ecsInstance, serverPort string) string {
|
||||
if len(serverPort) > 0 {
|
||||
for _, port := range instance.machine.ports {
|
||||
containerPort := strconv.FormatInt(port.containerPort, 10)
|
||||
if serverPort == containerPort {
|
||||
return strconv.FormatInt(port.hostPort, 10)
|
||||
}
|
||||
}
|
||||
|
||||
return serverPort
|
||||
}
|
||||
|
||||
|
|
|
@ -1721,13 +1721,13 @@ func Test_buildConfiguration(t *testing.T) {
|
|||
name("Test"),
|
||||
labels(map[string]string{
|
||||
"traefik.http.services.Service1.LoadBalancer.server.scheme": "h2c",
|
||||
"traefik.http.services.Service1.LoadBalancer.server.port": "8080",
|
||||
"traefik.http.services.Service1.LoadBalancer.server.port": "80",
|
||||
}),
|
||||
iMachine(
|
||||
mState(ec2.InstanceStateNameRunning),
|
||||
mPrivateIP("127.0.0.1"),
|
||||
mPorts(
|
||||
mPort(0, 80, "tcp"),
|
||||
mPort(80, 8080, "tcp"),
|
||||
),
|
||||
),
|
||||
),
|
||||
|
@ -1764,6 +1764,125 @@ func Test_buildConfiguration(t *testing.T) {
|
|||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
desc: "one container with label port not exposed by container",
|
||||
containers: []ecsInstance{
|
||||
instance(
|
||||
name("Test"),
|
||||
labels(map[string]string{
|
||||
"traefik.http.services.Service1.LoadBalancer.server.scheme": "h2c",
|
||||
"traefik.http.services.Service1.LoadBalancer.server.port": "8040",
|
||||
}),
|
||||
iMachine(
|
||||
mState(ec2.InstanceStateNameRunning),
|
||||
mPrivateIP("127.0.0.1"),
|
||||
mPorts(
|
||||
mPort(80, 8080, "tcp"),
|
||||
),
|
||||
),
|
||||
),
|
||||
},
|
||||
expected: &dynamic.Configuration{
|
||||
TCP: &dynamic.TCPConfiguration{
|
||||
Routers: map[string]*dynamic.TCPRouter{},
|
||||
Services: map[string]*dynamic.TCPService{},
|
||||
},
|
||||
UDP: &dynamic.UDPConfiguration{
|
||||
Routers: map[string]*dynamic.UDPRouter{},
|
||||
Services: map[string]*dynamic.UDPService{},
|
||||
},
|
||||
HTTP: &dynamic.HTTPConfiguration{
|
||||
Routers: map[string]*dynamic.Router{
|
||||
"Test": {
|
||||
Service: "Service1",
|
||||
Rule: "Host(`Test.traefik.wtf`)",
|
||||
},
|
||||
},
|
||||
Middlewares: map[string]*dynamic.Middleware{},
|
||||
Services: map[string]*dynamic.Service{
|
||||
"Service1": {
|
||||
LoadBalancer: &dynamic.ServersLoadBalancer{
|
||||
Servers: []dynamic.Server{
|
||||
{
|
||||
URL: "h2c://127.0.0.1:8040",
|
||||
},
|
||||
},
|
||||
PassHostHeader: Bool(true),
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
desc: "one container with label and multiple ports",
|
||||
containers: []ecsInstance{
|
||||
instance(
|
||||
name("Test"),
|
||||
labels(map[string]string{
|
||||
"traefik.http.routers.Test.rule": "Host(`Test.traefik.wtf`)",
|
||||
"traefik.http.routers.Test.service": "Service1",
|
||||
"traefik.http.services.Service1.LoadBalancer.server.port": "4445",
|
||||
"traefik.http.routers.Test2.rule": "Host(`Test.traefik.local`)",
|
||||
"traefik.http.routers.Test2.service": "Service2",
|
||||
"traefik.http.services.Service2.LoadBalancer.server.port": "4444",
|
||||
}),
|
||||
iMachine(
|
||||
mState(ec2.InstanceStateNameRunning),
|
||||
mPrivateIP("127.0.0.1"),
|
||||
mPorts(
|
||||
mPort(4444, 32123, "tcp"),
|
||||
mPort(4445, 32124, "tcp"),
|
||||
),
|
||||
),
|
||||
),
|
||||
},
|
||||
expected: &dynamic.Configuration{
|
||||
TCP: &dynamic.TCPConfiguration{
|
||||
Routers: map[string]*dynamic.TCPRouter{},
|
||||
Services: map[string]*dynamic.TCPService{},
|
||||
},
|
||||
UDP: &dynamic.UDPConfiguration{
|
||||
Routers: map[string]*dynamic.UDPRouter{},
|
||||
Services: map[string]*dynamic.UDPService{},
|
||||
},
|
||||
HTTP: &dynamic.HTTPConfiguration{
|
||||
Routers: map[string]*dynamic.Router{
|
||||
"Test": {
|
||||
Service: "Service1",
|
||||
Rule: "Host(`Test.traefik.wtf`)",
|
||||
},
|
||||
"Test2": {
|
||||
Service: "Service2",
|
||||
Rule: "Host(`Test.traefik.local`)",
|
||||
},
|
||||
},
|
||||
Middlewares: map[string]*dynamic.Middleware{},
|
||||
Services: map[string]*dynamic.Service{
|
||||
"Service1": {
|
||||
LoadBalancer: &dynamic.ServersLoadBalancer{
|
||||
Servers: []dynamic.Server{
|
||||
{
|
||||
URL: "http://127.0.0.1:32124",
|
||||
},
|
||||
},
|
||||
PassHostHeader: Bool(true),
|
||||
},
|
||||
},
|
||||
"Service2": {
|
||||
LoadBalancer: &dynamic.ServersLoadBalancer{
|
||||
Servers: []dynamic.Server{
|
||||
{
|
||||
URL: "http://127.0.0.1:32123",
|
||||
},
|
||||
},
|
||||
PassHostHeader: Bool(true),
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
desc: "one container with label port on two services",
|
||||
containers: []ecsInstance{
|
||||
|
@ -2274,13 +2393,13 @@ func Test_buildConfiguration(t *testing.T) {
|
|||
labels(map[string]string{
|
||||
"traefik.tcp.routers.foo.rule": "HostSNI(`foo.bar`)",
|
||||
"traefik.tcp.routers.foo.tls.options": "foo",
|
||||
"traefik.tcp.services.foo.loadbalancer.server.port": "8080",
|
||||
"traefik.tcp.services.foo.loadbalancer.server.port": "80",
|
||||
}),
|
||||
iMachine(
|
||||
mState(ec2.InstanceStateNameRunning),
|
||||
mPrivateIP("127.0.0.1"),
|
||||
mPorts(
|
||||
mPort(0, 80, "tcp"),
|
||||
mPort(80, 8080, "tcp"),
|
||||
),
|
||||
),
|
||||
),
|
||||
|
@ -2327,13 +2446,13 @@ func Test_buildConfiguration(t *testing.T) {
|
|||
name("Test"),
|
||||
labels(map[string]string{
|
||||
"traefik.udp.routers.foo.entrypoints": "mydns",
|
||||
"traefik.udp.services.foo.loadbalancer.server.port": "8080",
|
||||
"traefik.udp.services.foo.loadbalancer.server.port": "80",
|
||||
}),
|
||||
iMachine(
|
||||
mState(ec2.InstanceStateNameRunning),
|
||||
mPrivateIP("127.0.0.1"),
|
||||
mPorts(
|
||||
mPort(0, 80, "udp"),
|
||||
mPort(80, 8080, "udp"),
|
||||
),
|
||||
),
|
||||
),
|
||||
|
@ -2506,14 +2625,14 @@ func Test_buildConfiguration(t *testing.T) {
|
|||
instance(
|
||||
name("Test"),
|
||||
labels(map[string]string{
|
||||
"traefik.tcp.services.foo.loadbalancer.server.port": "8080",
|
||||
"traefik.tcp.services.foo.loadbalancer.server.port": "80",
|
||||
"traefik.tcp.services.foo.loadbalancer.terminationdelay": "200",
|
||||
}),
|
||||
iMachine(
|
||||
mState(ec2.InstanceStateNameRunning),
|
||||
mPrivateIP("127.0.0.1"),
|
||||
mPorts(
|
||||
mPort(0, 80, "tcp"),
|
||||
mPort(80, 8080, "tcp"),
|
||||
),
|
||||
),
|
||||
),
|
||||
|
|
|
@ -151,35 +151,25 @@ func (p Provider) Provide(configurationChan chan<- dynamic.Message, pool *safe.P
|
|||
operation := func() error {
|
||||
awsClient, err := p.createClient(logger)
|
||||
if err != nil {
|
||||
return err
|
||||
return fmt.Errorf("unable to create AWS client: %w", err)
|
||||
}
|
||||
|
||||
configuration, err := p.loadECSConfig(ctxLog, awsClient)
|
||||
err = p.loadConfiguration(ctxLog, awsClient, configurationChan)
|
||||
if err != nil {
|
||||
return err
|
||||
return fmt.Errorf("failed to get ECS configuration: %w", err)
|
||||
}
|
||||
|
||||
configurationChan <- dynamic.Message{
|
||||
ProviderName: "ecs",
|
||||
Configuration: configuration,
|
||||
}
|
||||
|
||||
reload := time.NewTicker(time.Second * time.Duration(p.RefreshSeconds))
|
||||
defer reload.Stop()
|
||||
ticker := time.NewTicker(time.Second * time.Duration(p.RefreshSeconds))
|
||||
defer ticker.Stop()
|
||||
|
||||
for {
|
||||
select {
|
||||
case <-reload.C:
|
||||
configuration, err := p.loadECSConfig(ctxLog, awsClient)
|
||||
case <-ticker.C:
|
||||
err = p.loadConfiguration(ctxLog, awsClient, configurationChan)
|
||||
if err != nil {
|
||||
logger.Errorf("Failed to load ECS configuration, error %s", err)
|
||||
return err
|
||||
return fmt.Errorf("failed to refresh ECS configuration: %w", err)
|
||||
}
|
||||
|
||||
configurationChan <- dynamic.Message{
|
||||
ProviderName: "ecs",
|
||||
Configuration: configuration,
|
||||
}
|
||||
case <-routineCtx.Done():
|
||||
return nil
|
||||
}
|
||||
|
@ -198,6 +188,20 @@ func (p Provider) Provide(configurationChan chan<- dynamic.Message, pool *safe.P
|
|||
return nil
|
||||
}
|
||||
|
||||
func (p *Provider) loadConfiguration(ctx context.Context, client *awsClient, configurationChan chan<- dynamic.Message) error {
|
||||
instances, err := p.listInstances(ctx, client)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
configurationChan <- dynamic.Message{
|
||||
ProviderName: "ecs",
|
||||
Configuration: p.buildConfiguration(ctx, instances),
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// Find all running Provider tasks in a cluster, also collect the task definitions (for docker labels)
|
||||
// and the EC2 instance data.
|
||||
func (p *Provider) listInstances(ctx context.Context, client *awsClient) ([]ecsInstance, error) {
|
||||
|
@ -365,15 +369,6 @@ func (p *Provider) listInstances(ctx context.Context, client *awsClient) ([]ecsI
|
|||
return instances, nil
|
||||
}
|
||||
|
||||
func (p *Provider) loadECSConfig(ctx context.Context, client *awsClient) (*dynamic.Configuration, error) {
|
||||
instances, err := p.listInstances(ctx, client)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return p.buildConfiguration(ctx, instances), nil
|
||||
}
|
||||
|
||||
func (p *Provider) lookupEc2Instances(ctx context.Context, client *awsClient, clusterName *string, ecsDatas map[string]*ecs.Task) (map[string]*ec2.Instance, error) {
|
||||
logger := log.FromContext(ctx)
|
||||
instanceIds := make(map[string]string)
|
||||
|
|
|
@ -21,7 +21,6 @@ import (
|
|||
"k8s.io/client-go/informers"
|
||||
"k8s.io/client-go/kubernetes"
|
||||
"k8s.io/client-go/rest"
|
||||
"k8s.io/client-go/tools/cache"
|
||||
"k8s.io/client-go/tools/clientcmd"
|
||||
)
|
||||
|
||||
|
@ -66,13 +65,14 @@ type Client interface {
|
|||
|
||||
// TODO: add tests for the clientWrapper (and its methods) itself.
|
||||
type clientWrapper struct {
|
||||
csCrd *versioned.Clientset
|
||||
csKube *kubernetes.Clientset
|
||||
csCrd versioned.Interface
|
||||
csKube kubernetes.Interface
|
||||
|
||||
factoriesCrd map[string]externalversions.SharedInformerFactory
|
||||
factoriesKube map[string]informers.SharedInformerFactory
|
||||
factoriesCrd map[string]externalversions.SharedInformerFactory
|
||||
factoriesKube map[string]informers.SharedInformerFactory
|
||||
factoriesSecret map[string]informers.SharedInformerFactory
|
||||
|
||||
labelSelector labels.Selector
|
||||
labelSelector string
|
||||
|
||||
isNamespaceAll bool
|
||||
watchedNamespaces []string
|
||||
|
@ -100,12 +100,13 @@ func createClientFromConfig(c *rest.Config) (*clientWrapper, error) {
|
|||
return newClientImpl(csKube, csCrd), nil
|
||||
}
|
||||
|
||||
func newClientImpl(csKube *kubernetes.Clientset, csCrd *versioned.Clientset) *clientWrapper {
|
||||
func newClientImpl(csKube kubernetes.Interface, csCrd versioned.Interface) *clientWrapper {
|
||||
return &clientWrapper{
|
||||
csCrd: csCrd,
|
||||
csKube: csKube,
|
||||
factoriesCrd: make(map[string]externalversions.SharedInformerFactory),
|
||||
factoriesKube: make(map[string]informers.SharedInformerFactory),
|
||||
csCrd: csCrd,
|
||||
csKube: csKube,
|
||||
factoriesCrd: make(map[string]externalversions.SharedInformerFactory),
|
||||
factoriesKube: make(map[string]informers.SharedInformerFactory),
|
||||
factoriesSecret: make(map[string]informers.SharedInformerFactory),
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -160,16 +161,25 @@ func newExternalClusterClient(endpoint, token, caFilePath string) (*clientWrappe
|
|||
// WatchAll starts namespace-specific controllers for all relevant kinds.
|
||||
func (c *clientWrapper) WatchAll(namespaces []string, stopCh <-chan struct{}) (<-chan interface{}, error) {
|
||||
eventCh := make(chan interface{}, 1)
|
||||
eventHandler := c.newResourceEventHandler(eventCh)
|
||||
eventHandler := &resourceEventHandler{ev: eventCh}
|
||||
|
||||
if len(namespaces) == 0 {
|
||||
namespaces = []string{metav1.NamespaceAll}
|
||||
c.isNamespaceAll = true
|
||||
}
|
||||
|
||||
c.watchedNamespaces = namespaces
|
||||
|
||||
notOwnedByHelm := func(opts *metav1.ListOptions) {
|
||||
opts.LabelSelector = "owner!=helm"
|
||||
}
|
||||
|
||||
matchesLabelSelector := func(opts *metav1.ListOptions) {
|
||||
opts.LabelSelector = c.labelSelector
|
||||
}
|
||||
|
||||
for _, ns := range namespaces {
|
||||
factoryCrd := externalversions.NewSharedInformerFactoryWithOptions(c.csCrd, resyncPeriod, externalversions.WithNamespace(ns))
|
||||
factoryCrd := externalversions.NewSharedInformerFactoryWithOptions(c.csCrd, resyncPeriod, externalversions.WithNamespace(ns), externalversions.WithTweakListOptions(matchesLabelSelector))
|
||||
factoryCrd.Traefik().V1alpha1().IngressRoutes().Informer().AddEventHandler(eventHandler)
|
||||
factoryCrd.Traefik().V1alpha1().Middlewares().Informer().AddEventHandler(eventHandler)
|
||||
factoryCrd.Traefik().V1alpha1().IngressRouteTCPs().Informer().AddEventHandler(eventHandler)
|
||||
|
@ -180,18 +190,21 @@ func (c *clientWrapper) WatchAll(namespaces []string, stopCh <-chan struct{}) (<
|
|||
factoryCrd.Traefik().V1alpha1().TraefikServices().Informer().AddEventHandler(eventHandler)
|
||||
|
||||
factoryKube := informers.NewSharedInformerFactoryWithOptions(c.csKube, resyncPeriod, informers.WithNamespace(ns))
|
||||
factoryKube.Extensions().V1beta1().Ingresses().Informer().AddEventHandler(eventHandler)
|
||||
factoryKube.Core().V1().Services().Informer().AddEventHandler(eventHandler)
|
||||
factoryKube.Core().V1().Endpoints().Informer().AddEventHandler(eventHandler)
|
||||
factoryKube.Core().V1().Secrets().Informer().AddEventHandler(eventHandler)
|
||||
|
||||
factorySecret := informers.NewSharedInformerFactoryWithOptions(c.csKube, resyncPeriod, informers.WithNamespace(ns), informers.WithTweakListOptions(notOwnedByHelm))
|
||||
factorySecret.Core().V1().Secrets().Informer().AddEventHandler(eventHandler)
|
||||
|
||||
c.factoriesCrd[ns] = factoryCrd
|
||||
c.factoriesKube[ns] = factoryKube
|
||||
c.factoriesSecret[ns] = factorySecret
|
||||
}
|
||||
|
||||
for _, ns := range namespaces {
|
||||
c.factoriesCrd[ns].Start(stopCh)
|
||||
c.factoriesKube[ns].Start(stopCh)
|
||||
c.factoriesSecret[ns].Start(stopCh)
|
||||
}
|
||||
|
||||
for _, ns := range namespaces {
|
||||
|
@ -206,6 +219,12 @@ func (c *clientWrapper) WatchAll(namespaces []string, stopCh <-chan struct{}) (<
|
|||
return nil, fmt.Errorf("timed out waiting for controller caches to sync %s in namespace %q", t.String(), ns)
|
||||
}
|
||||
}
|
||||
|
||||
for t, ok := range c.factoriesSecret[ns].WaitForCacheSync(stopCh) {
|
||||
if !ok {
|
||||
return nil, fmt.Errorf("timed out waiting for controller caches to sync %s in namespace %q", t.String(), ns)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return eventCh, nil
|
||||
|
@ -215,7 +234,7 @@ func (c *clientWrapper) GetIngressRoutes() []*v1alpha1.IngressRoute {
|
|||
var result []*v1alpha1.IngressRoute
|
||||
|
||||
for ns, factory := range c.factoriesCrd {
|
||||
ings, err := factory.Traefik().V1alpha1().IngressRoutes().Lister().List(c.labelSelector)
|
||||
ings, err := factory.Traefik().V1alpha1().IngressRoutes().Lister().List(labels.Everything())
|
||||
if err != nil {
|
||||
log.Errorf("Failed to list ingress routes in namespace %s: %v", ns, err)
|
||||
}
|
||||
|
@ -229,7 +248,7 @@ func (c *clientWrapper) GetIngressRouteTCPs() []*v1alpha1.IngressRouteTCP {
|
|||
var result []*v1alpha1.IngressRouteTCP
|
||||
|
||||
for ns, factory := range c.factoriesCrd {
|
||||
ings, err := factory.Traefik().V1alpha1().IngressRouteTCPs().Lister().List(c.labelSelector)
|
||||
ings, err := factory.Traefik().V1alpha1().IngressRouteTCPs().Lister().List(labels.Everything())
|
||||
if err != nil {
|
||||
log.Errorf("Failed to list tcp ingress routes in namespace %s: %v", ns, err)
|
||||
}
|
||||
|
@ -243,7 +262,7 @@ func (c *clientWrapper) GetIngressRouteUDPs() []*v1alpha1.IngressRouteUDP {
|
|||
var result []*v1alpha1.IngressRouteUDP
|
||||
|
||||
for ns, factory := range c.factoriesCrd {
|
||||
ings, err := factory.Traefik().V1alpha1().IngressRouteUDPs().Lister().List(c.labelSelector)
|
||||
ings, err := factory.Traefik().V1alpha1().IngressRouteUDPs().Lister().List(labels.Everything())
|
||||
if err != nil {
|
||||
log.Errorf("Failed to list udp ingress routes in namespace %s: %v", ns, err)
|
||||
}
|
||||
|
@ -257,7 +276,7 @@ func (c *clientWrapper) GetMiddlewares() []*v1alpha1.Middleware {
|
|||
var result []*v1alpha1.Middleware
|
||||
|
||||
for ns, factory := range c.factoriesCrd {
|
||||
middlewares, err := factory.Traefik().V1alpha1().Middlewares().Lister().List(c.labelSelector)
|
||||
middlewares, err := factory.Traefik().V1alpha1().Middlewares().Lister().List(labels.Everything())
|
||||
if err != nil {
|
||||
log.Errorf("Failed to list middlewares in namespace %s: %v", ns, err)
|
||||
}
|
||||
|
@ -283,7 +302,7 @@ func (c *clientWrapper) GetTraefikServices() []*v1alpha1.TraefikService {
|
|||
var result []*v1alpha1.TraefikService
|
||||
|
||||
for ns, factory := range c.factoriesCrd {
|
||||
ings, err := factory.Traefik().V1alpha1().TraefikServices().Lister().List(c.labelSelector)
|
||||
ings, err := factory.Traefik().V1alpha1().TraefikServices().Lister().List(labels.Everything())
|
||||
if err != nil {
|
||||
log.Errorf("Failed to list Traefik services in namespace %s: %v", ns, err)
|
||||
}
|
||||
|
@ -298,7 +317,7 @@ func (c *clientWrapper) GetServersTransports() []*v1alpha1.ServersTransport {
|
|||
var result []*v1alpha1.ServersTransport
|
||||
|
||||
for ns, factory := range c.factoriesCrd {
|
||||
serversTransports, err := factory.Traefik().V1alpha1().ServersTransports().Lister().List(c.labelSelector)
|
||||
serversTransports, err := factory.Traefik().V1alpha1().ServersTransports().Lister().List(labels.Everything())
|
||||
if err != nil {
|
||||
log.Errorf("Failed to list servers transport in namespace %s: %v", ns, err)
|
||||
}
|
||||
|
@ -313,7 +332,7 @@ func (c *clientWrapper) GetTLSOptions() []*v1alpha1.TLSOption {
|
|||
var result []*v1alpha1.TLSOption
|
||||
|
||||
for ns, factory := range c.factoriesCrd {
|
||||
options, err := factory.Traefik().V1alpha1().TLSOptions().Lister().List(c.labelSelector)
|
||||
options, err := factory.Traefik().V1alpha1().TLSOptions().Lister().List(labels.Everything())
|
||||
if err != nil {
|
||||
log.Errorf("Failed to list tls options in namespace %s: %v", ns, err)
|
||||
}
|
||||
|
@ -328,7 +347,7 @@ func (c *clientWrapper) GetTLSStores() []*v1alpha1.TLSStore {
|
|||
var result []*v1alpha1.TLSStore
|
||||
|
||||
for ns, factory := range c.factoriesCrd {
|
||||
stores, err := factory.Traefik().V1alpha1().TLSStores().Lister().List(c.labelSelector)
|
||||
stores, err := factory.Traefik().V1alpha1().TLSStores().Lister().List(labels.Everything())
|
||||
if err != nil {
|
||||
log.Errorf("Failed to list tls stores in namespace %s: %v", ns, err)
|
||||
}
|
||||
|
@ -366,7 +385,7 @@ func (c *clientWrapper) GetSecret(namespace, name string) (*corev1.Secret, bool,
|
|||
return nil, false, fmt.Errorf("failed to get secret %s/%s: namespace is not within watched namespaces", namespace, name)
|
||||
}
|
||||
|
||||
secret, err := c.factoriesKube[c.lookupNamespace(namespace)].Core().V1().Secrets().Lister().Secrets(namespace).Get(name)
|
||||
secret, err := c.factoriesSecret[c.lookupNamespace(namespace)].Core().V1().Secrets().Lister().Secrets(namespace).Get(name)
|
||||
exist, err := translateNotFoundError(err)
|
||||
return secret, exist, err
|
||||
}
|
||||
|
@ -384,31 +403,6 @@ func (c *clientWrapper) lookupNamespace(ns string) string {
|
|||
return ns
|
||||
}
|
||||
|
||||
func (c *clientWrapper) newResourceEventHandler(events chan<- interface{}) cache.ResourceEventHandler {
|
||||
return &cache.FilteringResourceEventHandler{
|
||||
FilterFunc: func(obj interface{}) bool {
|
||||
// Ignore Ingresses that do not match our custom label selector.
|
||||
switch v := obj.(type) {
|
||||
case *v1alpha1.IngressRoute:
|
||||
return c.labelSelector.Matches(labels.Set(v.GetLabels()))
|
||||
case *v1alpha1.IngressRouteTCP:
|
||||
return c.labelSelector.Matches(labels.Set(v.GetLabels()))
|
||||
case *v1alpha1.TraefikService:
|
||||
return c.labelSelector.Matches(labels.Set(v.GetLabels()))
|
||||
case *v1alpha1.TLSOption:
|
||||
return c.labelSelector.Matches(labels.Set(v.GetLabels()))
|
||||
case *v1alpha1.TLSStore:
|
||||
return c.labelSelector.Matches(labels.Set(v.GetLabels()))
|
||||
case *v1alpha1.Middleware:
|
||||
return c.labelSelector.Matches(labels.Set(v.GetLabels()))
|
||||
default:
|
||||
return true
|
||||
}
|
||||
},
|
||||
Handler: &resourceEventHandler{ev: events},
|
||||
}
|
||||
}
|
||||
|
||||
// eventHandlerFunc will pass the obj on to the events channel or drop it.
|
||||
// This is so passing the events along won't block in the case of high volume.
|
||||
// The events are only used for signaling anyway so dropping a few is ok.
|
||||
|
|
65
pkg/provider/kubernetes/crd/client_test.go
Normal file
65
pkg/provider/kubernetes/crd/client_test.go
Normal file
|
@ -0,0 +1,65 @@
|
|||
package crd
|
||||
|
||||
import (
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/require"
|
||||
crdfake "github.com/traefik/traefik/v2/pkg/provider/kubernetes/crd/generated/clientset/versioned/fake"
|
||||
corev1 "k8s.io/api/core/v1"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
kubefake "k8s.io/client-go/kubernetes/fake"
|
||||
)
|
||||
|
||||
func TestClientIgnoresHelmOwnedSecrets(t *testing.T) {
|
||||
secret := &corev1.Secret{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Namespace: "default",
|
||||
Name: "secret",
|
||||
},
|
||||
}
|
||||
helmSecret := &corev1.Secret{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Namespace: "default",
|
||||
Name: "helm-secret",
|
||||
Labels: map[string]string{
|
||||
"owner": "helm",
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
kubeClient := kubefake.NewSimpleClientset(helmSecret, secret)
|
||||
crdClient := crdfake.NewSimpleClientset()
|
||||
|
||||
client := newClientImpl(kubeClient, crdClient)
|
||||
|
||||
stopCh := make(chan struct{})
|
||||
|
||||
eventCh, err := client.WatchAll(nil, stopCh)
|
||||
require.NoError(t, err)
|
||||
|
||||
select {
|
||||
case event := <-eventCh:
|
||||
secret, ok := event.(*corev1.Secret)
|
||||
require.True(t, ok)
|
||||
|
||||
assert.NotEqual(t, "helm-secret", secret.Name)
|
||||
case <-time.After(50 * time.Millisecond):
|
||||
assert.Fail(t, "expected to receive event for secret")
|
||||
}
|
||||
|
||||
select {
|
||||
case <-eventCh:
|
||||
assert.Fail(t, "received more than one event")
|
||||
case <-time.After(50 * time.Millisecond):
|
||||
}
|
||||
|
||||
_, found, err := client.GetSecret("default", "secret")
|
||||
require.NoError(t, err)
|
||||
assert.True(t, found)
|
||||
|
||||
_, found, err = client.GetSecret("default", "helm-secret")
|
||||
require.NoError(t, err)
|
||||
assert.False(t, found)
|
||||
}
|
|
@ -49,12 +49,12 @@ type Provider struct {
|
|||
lastConfiguration safe.Safe
|
||||
}
|
||||
|
||||
func (p *Provider) newK8sClient(ctx context.Context, labelSelector string) (*clientWrapper, error) {
|
||||
labelSel, err := labels.Parse(labelSelector)
|
||||
func (p *Provider) newK8sClient(ctx context.Context) (*clientWrapper, error) {
|
||||
_, err := labels.Parse(p.LabelSelector)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("invalid label selector: %q", labelSelector)
|
||||
return nil, fmt.Errorf("invalid label selector: %q", p.LabelSelector)
|
||||
}
|
||||
log.FromContext(ctx).Infof("label selector is: %q", labelSel)
|
||||
log.FromContext(ctx).Infof("label selector is: %q", p.LabelSelector)
|
||||
|
||||
withEndpoint := ""
|
||||
if p.Endpoint != "" {
|
||||
|
@ -74,11 +74,12 @@ func (p *Provider) newK8sClient(ctx context.Context, labelSelector string) (*cli
|
|||
client, err = newExternalClusterClient(p.Endpoint, p.Token, p.CertAuthFilePath)
|
||||
}
|
||||
|
||||
if err == nil {
|
||||
client.labelSelector = labelSel
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return client, err
|
||||
client.labelSelector = p.LabelSelector
|
||||
return client, nil
|
||||
}
|
||||
|
||||
// Init the provider.
|
||||
|
@ -92,8 +93,7 @@ func (p *Provider) Provide(configurationChan chan<- dynamic.Message, pool *safe.
|
|||
ctxLog := log.With(context.Background(), log.Str(log.ProviderName, providerName))
|
||||
logger := log.FromContext(ctxLog)
|
||||
|
||||
logger.Debugf("Using label selector: %q", p.LabelSelector)
|
||||
k8sClient, err := p.newK8sClient(ctxLog, p.LabelSelector)
|
||||
k8sClient, err := p.newK8sClient(ctxLog)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
|
|
@ -22,7 +22,6 @@ import (
|
|||
"k8s.io/client-go/informers"
|
||||
"k8s.io/client-go/kubernetes"
|
||||
"k8s.io/client-go/rest"
|
||||
"k8s.io/client-go/tools/cache"
|
||||
"k8s.io/client-go/tools/clientcmd"
|
||||
)
|
||||
|
||||
|
@ -66,10 +65,12 @@ type Client interface {
|
|||
}
|
||||
|
||||
type clientWrapper struct {
|
||||
clientset *kubernetes.Clientset
|
||||
factories map[string]informers.SharedInformerFactory
|
||||
clientset kubernetes.Interface
|
||||
factoriesKube map[string]informers.SharedInformerFactory
|
||||
factoriesSecret map[string]informers.SharedInformerFactory
|
||||
factoriesIngress map[string]informers.SharedInformerFactory
|
||||
clusterFactory informers.SharedInformerFactory
|
||||
ingressLabelSelector labels.Selector
|
||||
ingressLabelSelector string
|
||||
isNamespaceAll bool
|
||||
watchedNamespaces []string
|
||||
}
|
||||
|
@ -138,17 +139,19 @@ func createClientFromConfig(c *rest.Config) (*clientWrapper, error) {
|
|||
return newClientImpl(clientset), nil
|
||||
}
|
||||
|
||||
func newClientImpl(clientset *kubernetes.Clientset) *clientWrapper {
|
||||
func newClientImpl(clientset kubernetes.Interface) *clientWrapper {
|
||||
return &clientWrapper{
|
||||
clientset: clientset,
|
||||
factories: make(map[string]informers.SharedInformerFactory),
|
||||
clientset: clientset,
|
||||
factoriesSecret: make(map[string]informers.SharedInformerFactory),
|
||||
factoriesIngress: make(map[string]informers.SharedInformerFactory),
|
||||
factoriesKube: make(map[string]informers.SharedInformerFactory),
|
||||
}
|
||||
}
|
||||
|
||||
// WatchAll starts namespace-specific controllers for all relevant kinds.
|
||||
func (c *clientWrapper) WatchAll(namespaces []string, stopCh <-chan struct{}) (<-chan interface{}, error) {
|
||||
eventCh := make(chan interface{}, 1)
|
||||
eventHandler := c.newResourceEventHandler(eventCh)
|
||||
eventHandler := &resourceEventHandler{eventCh}
|
||||
|
||||
if len(namespaces) == 0 {
|
||||
namespaces = []string{metav1.NamespaceAll}
|
||||
|
@ -157,21 +160,49 @@ func (c *clientWrapper) WatchAll(namespaces []string, stopCh <-chan struct{}) (<
|
|||
|
||||
c.watchedNamespaces = namespaces
|
||||
|
||||
for _, ns := range namespaces {
|
||||
factory := informers.NewSharedInformerFactoryWithOptions(c.clientset, resyncPeriod, informers.WithNamespace(ns))
|
||||
factory.Extensions().V1beta1().Ingresses().Informer().AddEventHandler(eventHandler)
|
||||
factory.Core().V1().Services().Informer().AddEventHandler(eventHandler)
|
||||
factory.Core().V1().Endpoints().Informer().AddEventHandler(eventHandler)
|
||||
factory.Core().V1().Secrets().Informer().AddEventHandler(eventHandler)
|
||||
c.factories[ns] = factory
|
||||
notOwnedByHelm := func(opts *metav1.ListOptions) {
|
||||
opts.LabelSelector = "owner!=helm"
|
||||
}
|
||||
|
||||
matchesLabelSelector := func(opts *metav1.ListOptions) {
|
||||
opts.LabelSelector = c.ingressLabelSelector
|
||||
}
|
||||
|
||||
for _, ns := range namespaces {
|
||||
c.factories[ns].Start(stopCh)
|
||||
factoryIngress := informers.NewSharedInformerFactoryWithOptions(c.clientset, resyncPeriod, informers.WithNamespace(ns), informers.WithTweakListOptions(matchesLabelSelector))
|
||||
factoryIngress.Extensions().V1beta1().Ingresses().Informer().AddEventHandler(eventHandler)
|
||||
c.factoriesIngress[ns] = factoryIngress
|
||||
|
||||
factoryKube := informers.NewSharedInformerFactoryWithOptions(c.clientset, resyncPeriod, informers.WithNamespace(ns))
|
||||
factoryKube.Core().V1().Services().Informer().AddEventHandler(eventHandler)
|
||||
factoryKube.Core().V1().Endpoints().Informer().AddEventHandler(eventHandler)
|
||||
c.factoriesKube[ns] = factoryKube
|
||||
|
||||
factorySecret := informers.NewSharedInformerFactoryWithOptions(c.clientset, resyncPeriod, informers.WithNamespace(ns), informers.WithTweakListOptions(notOwnedByHelm))
|
||||
factorySecret.Core().V1().Secrets().Informer().AddEventHandler(eventHandler)
|
||||
c.factoriesSecret[ns] = factorySecret
|
||||
}
|
||||
|
||||
for _, ns := range namespaces {
|
||||
for typ, ok := range c.factories[ns].WaitForCacheSync(stopCh) {
|
||||
c.factoriesIngress[ns].Start(stopCh)
|
||||
c.factoriesKube[ns].Start(stopCh)
|
||||
c.factoriesSecret[ns].Start(stopCh)
|
||||
}
|
||||
|
||||
for _, ns := range namespaces {
|
||||
for typ, ok := range c.factoriesIngress[ns].WaitForCacheSync(stopCh) {
|
||||
if !ok {
|
||||
return nil, fmt.Errorf("timed out waiting for controller caches to sync %s in namespace %q", typ, ns)
|
||||
}
|
||||
}
|
||||
|
||||
for typ, ok := range c.factoriesKube[ns].WaitForCacheSync(stopCh) {
|
||||
if !ok {
|
||||
return nil, fmt.Errorf("timed out waiting for controller caches to sync %s in namespace %q", typ, ns)
|
||||
}
|
||||
}
|
||||
|
||||
for typ, ok := range c.factoriesSecret[ns].WaitForCacheSync(stopCh) {
|
||||
if !ok {
|
||||
return nil, fmt.Errorf("timed out waiting for controller caches to sync %s in namespace %q", typ, ns)
|
||||
}
|
||||
|
@ -203,9 +234,9 @@ func (c *clientWrapper) WatchAll(namespaces []string, stopCh <-chan struct{}) (<
|
|||
func (c *clientWrapper) GetIngresses() []*networkingv1beta1.Ingress {
|
||||
var results []*networkingv1beta1.Ingress
|
||||
|
||||
for ns, factory := range c.factories {
|
||||
for ns, factory := range c.factoriesIngress {
|
||||
// extensions
|
||||
ings, err := factory.Extensions().V1beta1().Ingresses().Lister().List(c.ingressLabelSelector)
|
||||
ings, err := factory.Extensions().V1beta1().Ingresses().Lister().List(labels.Everything())
|
||||
if err != nil {
|
||||
log.Errorf("Failed to list ingresses in namespace %s: %v", ns, err)
|
||||
}
|
||||
|
@ -220,7 +251,7 @@ func (c *clientWrapper) GetIngresses() []*networkingv1beta1.Ingress {
|
|||
}
|
||||
|
||||
// networking
|
||||
list, err := factory.Networking().V1beta1().Ingresses().Lister().List(c.ingressLabelSelector)
|
||||
list, err := factory.Networking().V1beta1().Ingresses().Lister().List(labels.Everything())
|
||||
if err != nil {
|
||||
log.Errorf("Failed to list ingresses in namespace %s: %v", ns, err)
|
||||
}
|
||||
|
@ -254,7 +285,7 @@ func (c *clientWrapper) UpdateIngressStatus(src *networkingv1beta1.Ingress, ingS
|
|||
return c.updateIngressStatusOld(src, ingStatus)
|
||||
}
|
||||
|
||||
ing, err := c.factories[c.lookupNamespace(src.Namespace)].Networking().V1beta1().Ingresses().Lister().Ingresses(src.Namespace).Get(src.Name)
|
||||
ing, err := c.factoriesIngress[c.lookupNamespace(src.Namespace)].Networking().V1beta1().Ingresses().Lister().Ingresses(src.Namespace).Get(src.Name)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to get ingress %s/%s: %w", src.Namespace, src.Name, err)
|
||||
}
|
||||
|
@ -282,7 +313,7 @@ func (c *clientWrapper) UpdateIngressStatus(src *networkingv1beta1.Ingress, ingS
|
|||
}
|
||||
|
||||
func (c *clientWrapper) updateIngressStatusOld(src *networkingv1beta1.Ingress, ingStatus []corev1.LoadBalancerIngress) error {
|
||||
ing, err := c.factories[c.lookupNamespace(src.Namespace)].Extensions().V1beta1().Ingresses().Lister().Ingresses(src.Namespace).Get(src.Name)
|
||||
ing, err := c.factoriesIngress[c.lookupNamespace(src.Namespace)].Extensions().V1beta1().Ingresses().Lister().Ingresses(src.Namespace).Get(src.Name)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to get ingress %s/%s: %w", src.Namespace, src.Name, err)
|
||||
}
|
||||
|
@ -335,7 +366,7 @@ func (c *clientWrapper) GetService(namespace, name string) (*corev1.Service, boo
|
|||
return nil, false, fmt.Errorf("failed to get service %s/%s: namespace is not within watched namespaces", namespace, name)
|
||||
}
|
||||
|
||||
service, err := c.factories[c.lookupNamespace(namespace)].Core().V1().Services().Lister().Services(namespace).Get(name)
|
||||
service, err := c.factoriesKube[c.lookupNamespace(namespace)].Core().V1().Services().Lister().Services(namespace).Get(name)
|
||||
exist, err := translateNotFoundError(err)
|
||||
return service, exist, err
|
||||
}
|
||||
|
@ -346,7 +377,7 @@ func (c *clientWrapper) GetEndpoints(namespace, name string) (*corev1.Endpoints,
|
|||
return nil, false, fmt.Errorf("failed to get endpoints %s/%s: namespace is not within watched namespaces", namespace, name)
|
||||
}
|
||||
|
||||
endpoint, err := c.factories[c.lookupNamespace(namespace)].Core().V1().Endpoints().Lister().Endpoints(namespace).Get(name)
|
||||
endpoint, err := c.factoriesKube[c.lookupNamespace(namespace)].Core().V1().Endpoints().Lister().Endpoints(namespace).Get(name)
|
||||
exist, err := translateNotFoundError(err)
|
||||
return endpoint, exist, err
|
||||
}
|
||||
|
@ -357,7 +388,7 @@ func (c *clientWrapper) GetSecret(namespace, name string) (*corev1.Secret, bool,
|
|||
return nil, false, fmt.Errorf("failed to get secret %s/%s: namespace is not within watched namespaces", namespace, name)
|
||||
}
|
||||
|
||||
secret, err := c.factories[c.lookupNamespace(namespace)].Core().V1().Secrets().Lister().Secrets(namespace).Get(name)
|
||||
secret, err := c.factoriesSecret[c.lookupNamespace(namespace)].Core().V1().Secrets().Lister().Secrets(namespace).Get(name)
|
||||
exist, err := translateNotFoundError(err)
|
||||
return secret, exist, err
|
||||
}
|
||||
|
@ -394,25 +425,6 @@ func (c *clientWrapper) lookupNamespace(ns string) string {
|
|||
return ns
|
||||
}
|
||||
|
||||
func (c *clientWrapper) newResourceEventHandler(events chan<- interface{}) cache.ResourceEventHandler {
|
||||
return &cache.FilteringResourceEventHandler{
|
||||
FilterFunc: func(obj interface{}) bool {
|
||||
// Ignore Ingresses that do not match our custom label selector.
|
||||
switch v := obj.(type) {
|
||||
case *extensionsv1beta1.Ingress:
|
||||
lbls := labels.Set(v.GetLabels())
|
||||
return c.ingressLabelSelector.Matches(lbls)
|
||||
case *networkingv1beta1.Ingress:
|
||||
lbls := labels.Set(v.GetLabels())
|
||||
return c.ingressLabelSelector.Matches(lbls)
|
||||
default:
|
||||
return true
|
||||
}
|
||||
},
|
||||
Handler: &resourceEventHandler{ev: events},
|
||||
}
|
||||
}
|
||||
|
||||
// GetServerVersion returns the cluster server version, or an error.
|
||||
func (c *clientWrapper) GetServerVersion() (*version.Version, error) {
|
||||
serverVersion, err := c.clientset.Discovery().ServerVersion()
|
||||
|
|
|
@ -3,11 +3,15 @@ package ingress
|
|||
import (
|
||||
"fmt"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/require"
|
||||
corev1 "k8s.io/api/core/v1"
|
||||
kubeerror "k8s.io/apimachinery/pkg/api/errors"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/runtime/schema"
|
||||
kubefake "k8s.io/client-go/kubernetes/fake"
|
||||
)
|
||||
|
||||
func TestTranslateNotFoundError(t *testing.T) {
|
||||
|
@ -125,3 +129,54 @@ func TestIsLoadBalancerIngressEquals(t *testing.T) {
|
|||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestClientIgnoresHelmOwnedSecrets(t *testing.T) {
|
||||
secret := &corev1.Secret{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Namespace: "default",
|
||||
Name: "secret",
|
||||
},
|
||||
}
|
||||
helmSecret := &corev1.Secret{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Namespace: "default",
|
||||
Name: "helm-secret",
|
||||
Labels: map[string]string{
|
||||
"owner": "helm",
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
kubeClient := kubefake.NewSimpleClientset(helmSecret, secret)
|
||||
|
||||
client := newClientImpl(kubeClient)
|
||||
|
||||
stopCh := make(chan struct{})
|
||||
|
||||
eventCh, err := client.WatchAll(nil, stopCh)
|
||||
require.NoError(t, err)
|
||||
|
||||
select {
|
||||
case event := <-eventCh:
|
||||
secret, ok := event.(*corev1.Secret)
|
||||
require.True(t, ok)
|
||||
|
||||
assert.NotEqual(t, "helm-secret", secret.Name)
|
||||
case <-time.After(50 * time.Millisecond):
|
||||
assert.Fail(t, "expected to receive event for secret")
|
||||
}
|
||||
|
||||
select {
|
||||
case <-eventCh:
|
||||
assert.Fail(t, "received more than one event")
|
||||
case <-time.After(50 * time.Millisecond):
|
||||
}
|
||||
|
||||
_, found, err := client.GetSecret("default", "secret")
|
||||
require.NoError(t, err)
|
||||
assert.True(t, found)
|
||||
|
||||
_, found, err = client.GetSecret("default", "helm-secret")
|
||||
require.NoError(t, err)
|
||||
assert.False(t, found)
|
||||
}
|
||||
|
|
|
@ -53,15 +53,15 @@ type EndpointIngress struct {
|
|||
PublishedService string `description:"Published Kubernetes Service to copy status from." json:"publishedService,omitempty" toml:"publishedService,omitempty" yaml:"publishedService,omitempty"`
|
||||
}
|
||||
|
||||
func (p *Provider) newK8sClient(ctx context.Context, ingressLabelSelector string) (*clientWrapper, error) {
|
||||
ingLabelSel, err := labels.Parse(ingressLabelSelector)
|
||||
func (p *Provider) newK8sClient(ctx context.Context) (*clientWrapper, error) {
|
||||
_, err := labels.Parse(p.LabelSelector)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("invalid ingress label selector: %q", ingressLabelSelector)
|
||||
return nil, fmt.Errorf("invalid ingress label selector: %q", p.LabelSelector)
|
||||
}
|
||||
|
||||
logger := log.FromContext(ctx)
|
||||
|
||||
logger.Infof("ingress label selector is: %q", ingLabelSel)
|
||||
logger.Infof("ingress label selector is: %q", p.LabelSelector)
|
||||
|
||||
withEndpoint := ""
|
||||
if p.Endpoint != "" {
|
||||
|
@ -81,11 +81,12 @@ func (p *Provider) newK8sClient(ctx context.Context, ingressLabelSelector string
|
|||
cl, err = newExternalClusterClient(p.Endpoint, p.Token, p.CertAuthFilePath)
|
||||
}
|
||||
|
||||
if err == nil {
|
||||
cl.ingressLabelSelector = ingLabelSel
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return cl, err
|
||||
cl.ingressLabelSelector = p.LabelSelector
|
||||
return cl, nil
|
||||
}
|
||||
|
||||
// Init the provider.
|
||||
|
@ -99,8 +100,7 @@ func (p *Provider) Provide(configurationChan chan<- dynamic.Message, pool *safe.
|
|||
ctxLog := log.With(context.Background(), log.Str(log.ProviderName, "kubernetes"))
|
||||
logger := log.FromContext(ctxLog)
|
||||
|
||||
logger.Debugf("Using Ingress label selector: %q", p.LabelSelector)
|
||||
k8sClient, err := p.newK8sClient(ctxLog, p.LabelSelector)
|
||||
k8sClient, err := p.newK8sClient(ctxLog)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
|
|
@ -13,9 +13,11 @@ import (
|
|||
|
||||
// Proxy forwards a TCP request to a TCP service.
|
||||
type Proxy struct {
|
||||
address string
|
||||
target *net.TCPAddr
|
||||
terminationDelay time.Duration
|
||||
proxyProtocol *dynamic.ProxyProtocol
|
||||
refreshTarget bool
|
||||
}
|
||||
|
||||
// NewProxy creates a new Proxy.
|
||||
|
@ -29,7 +31,19 @@ func NewProxy(address string, terminationDelay time.Duration, proxyProtocol *dyn
|
|||
return nil, fmt.Errorf("unknown proxyProtocol version: %d", proxyProtocol.Version)
|
||||
}
|
||||
|
||||
return &Proxy{target: tcpAddr, terminationDelay: terminationDelay, proxyProtocol: proxyProtocol}, nil
|
||||
// enable the refresh of the target only if the address in an IP
|
||||
refreshTarget := false
|
||||
if host, _, err := net.SplitHostPort(address); err == nil && net.ParseIP(host) == nil {
|
||||
refreshTarget = true
|
||||
}
|
||||
|
||||
return &Proxy{
|
||||
address: address,
|
||||
target: tcpAddr,
|
||||
refreshTarget: refreshTarget,
|
||||
terminationDelay: terminationDelay,
|
||||
proxyProtocol: proxyProtocol,
|
||||
}, nil
|
||||
}
|
||||
|
||||
// ServeTCP forwards the connection to a service.
|
||||
|
@ -39,6 +53,15 @@ func (p *Proxy) ServeTCP(conn WriteCloser) {
|
|||
// needed because of e.g. server.trackedConnection
|
||||
defer conn.Close()
|
||||
|
||||
if p.refreshTarget {
|
||||
tcpAddr, err := net.ResolveTCPAddr("tcp", p.address)
|
||||
if err != nil {
|
||||
log.Errorf("Error resolving tcp address: %v", err)
|
||||
return
|
||||
}
|
||||
p.target = tcpAddr
|
||||
}
|
||||
|
||||
connBackend, err := net.DialTCP("tcp", nil, p.target)
|
||||
if err != nil {
|
||||
log.Errorf("Error while connection to backend: %v", err)
|
||||
|
|
|
@ -6,6 +6,7 @@ import (
|
|||
"fmt"
|
||||
"io"
|
||||
"net"
|
||||
"sync"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
|
@ -169,3 +170,74 @@ func TestProxyProtocol(t *testing.T) {
|
|||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestLookupAddress(t *testing.T) {
|
||||
testCases := []struct {
|
||||
desc string
|
||||
address string
|
||||
expectSame assert.ComparisonAssertionFunc
|
||||
}{
|
||||
{
|
||||
desc: "IP doesn't need refresh",
|
||||
address: "8.8.4.4:53",
|
||||
expectSame: assert.Same,
|
||||
},
|
||||
{
|
||||
desc: "Hostname needs refresh",
|
||||
address: "dns.google:53",
|
||||
expectSame: assert.NotSame,
|
||||
},
|
||||
}
|
||||
|
||||
for _, test := range testCases {
|
||||
test := test
|
||||
t.Run(test.desc, func(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
proxy, err := NewProxy(test.address, 10*time.Millisecond, nil)
|
||||
require.NoError(t, err)
|
||||
|
||||
require.NotNil(t, proxy.target)
|
||||
|
||||
proxyListener, err := net.Listen("tcp", ":0")
|
||||
require.NoError(t, err)
|
||||
|
||||
var wg sync.WaitGroup
|
||||
go func(wg *sync.WaitGroup) {
|
||||
for {
|
||||
conn, err := proxyListener.Accept()
|
||||
require.NoError(t, err)
|
||||
|
||||
proxy.ServeTCP(conn.(*net.TCPConn))
|
||||
|
||||
wg.Done()
|
||||
}
|
||||
}(&wg)
|
||||
|
||||
var lastTarget *net.TCPAddr
|
||||
|
||||
for i := 0; i < 3; i++ {
|
||||
wg.Add(1)
|
||||
|
||||
conn, err := net.Dial("tcp", proxyListener.Addr().String())
|
||||
require.NoError(t, err)
|
||||
|
||||
_, err = conn.Write([]byte("ping\n"))
|
||||
require.NoError(t, err)
|
||||
|
||||
err = conn.Close()
|
||||
require.NoError(t, err)
|
||||
|
||||
wg.Wait()
|
||||
|
||||
assert.NotNil(t, proxy.target)
|
||||
|
||||
if lastTarget != nil {
|
||||
test.expectSame(t, lastTarget, proxy.target)
|
||||
}
|
||||
|
||||
lastTarget = proxy.target
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
|
|
@ -1,4 +1,4 @@
|
|||
#!/bin/bash
|
||||
#!/usr/bin/env bash
|
||||
set -e
|
||||
|
||||
if ! test -e autogen/genstatic/gen.go; then
|
||||
|
|
|
@ -4,11 +4,11 @@ RepositoryName = "traefik"
|
|||
OutputType = "file"
|
||||
FileName = "traefik_changelog.md"
|
||||
|
||||
# example new bugfix v2.3.2
|
||||
# example new bugfix v2.3.3
|
||||
CurrentRef = "v2.3"
|
||||
PreviousRef = "v2.3.1"
|
||||
PreviousRef = "v2.3.2"
|
||||
BaseBranch = "v2.3"
|
||||
FutureCurrentRefName = "v2.3.2"
|
||||
FutureCurrentRefName = "v2.3.3"
|
||||
|
||||
ThresholdPreviousRef = 10
|
||||
ThresholdCurrentRef = 10
|
||||
|
|
|
@ -1,4 +1,5 @@
|
|||
#!/bin/bash -e
|
||||
#!/usr/bin/env bash
|
||||
set -e
|
||||
|
||||
HACK_DIR="$( cd "$( dirname "${0}" )" && pwd -P)"; export HACK_DIR
|
||||
REPO_ROOT=${HACK_DIR}/..
|
||||
|
|
Loading…
Reference in a new issue