Compare commits

...

30 commits

Author SHA1 Message Date
fa154de4a2
Merge branch 'v3.0' of github.com:traefik/traefik
Signed-off-by: baalajimaestro <me@baalajimaestro.me>
2024-01-14 15:44:08 +05:30
Suyash Choudhary
980dac4572
Support file path as input param for Kubernetes token value 2024-01-11 17:06:06 +01:00
Michael
ff7966f9cd
feat: re introduce IpWhitelist middleware as deprecated 2024-01-11 10:40:06 +01:00
mmatur
3bbc560283
Merge v2.11 into v3.0 2024-01-10 14:08:30 +01:00
8cb5e0ac30
Merge branch 'v3.0' of github.com:traefik/traefik
Signed-off-by: baalajimaestro <me@baalajimaestro.me>
2024-01-10 10:54:52 +05:30
Jeremy Fleischman
ccf3a9995a
Add rejectStatusCode option to IPAllowList middleware 2024-01-09 20:26:05 +01:00
Michael
e522446909
Improve integration tests
Co-authored-by: Julien Salleyron <julien.salleyron@gmail.com>
2024-01-09 17:00:07 +01:00
Michael
fea94a3393
feat: upgrade gateway api to v1.0.0 2024-01-09 10:28:05 +01:00
Jesse Haka
4ddef9830b
Migrate to opentelemetry 2024-01-08 09:10:06 +01:00
Romain
45bb00be04
Improve migration guide
Co-authored-by: Baptiste Mayelle <baptiste.mayelle@traefik.io>
2024-01-05 16:04:05 +01:00
Ludovic Fernandez
cd8d5b8f10
chore: update github.com/docker/docker to v24.0.7 2024-01-05 15:10:05 +01:00
Baptiste Mayelle
eff294829f
Add missing TCP IPAllowList middleware constructor
Co-authored-by: Romain <rtribotte@users.noreply.github.com>
2024-01-04 14:58:05 +01:00
mmatur
a69c1ba3b7
Merge branch v2.11 into v3.0 2024-01-03 17:28:22 +01:00
Michael
9adf0fb638
Prepare release v2.11.0-rc1 2024-01-03 11:12:05 +01:00
Julien Salleyron
56e2110dc5
Fix readHeaderTimeout in proxyproto 2024-01-02 22:02:05 +01:00
Ludovic Fernandez
5be13802dc
chore: update github.com/fsnotify/fsnotify to v1.7.0 2024-01-02 20:58:06 +01:00
Ludovic Fernandez
7345afd8b6
Update quic-go to v0.40.1 2024-01-02 20:36:06 +01:00
Romain
a84d5c0ef1
Adjust deprecation notice for Kubernetes CRD provider
Co-authored-by: Baptiste Mayelle <baptiste.mayelle@traefik.io>
2024-01-02 20:04:06 +01:00
youpsla
2a9471d278
docs: fix typo 2024-01-02 19:12:06 +01:00
Domenico Andreoli
0042562678
docs: fix the explanation of the TLS challenge 2024-01-02 18:46:05 +01:00
Ari Yonaty
74ab88d47e
docs: fix description for anonymous usage statistics references 2024-01-02 18:20:06 +01:00
sven
6df9578ace
Update wording of compose example 2024-01-02 17:56:06 +01:00
sven
cd7d324295
Documentation enhancements 2024-01-02 17:30:06 +01:00
Landry Benguigui
0e92b02474
Deprecate IPWhiteList middleware in favor of IPAllowList
Co-authored-by: Kevin Pollet <pollet.kevin@gmail.com>
2024-01-02 17:04:06 +01:00
Julien Salleyron
9662cdca64
Add KeepAliveMaxTime and KeepAliveMaxRequests features to entrypoints 2024-01-02 16:40:06 +01:00
Baptiste Mayelle
3dfaa3d5fa
Add Redis Sentinel support
Co-authored-by: Romain <rtribotte@users.noreply.github.com>
2024-01-02 16:16:05 +01:00
Baptiste Mayelle
60123a8f3f
Hash WRR sticky cookies
Co-authored-by: Romain <rtribotte@users.noreply.github.com>
2024-01-02 15:52:05 +01:00
Michael
2a7b2ef772
chore: happy new year 2024 2024-01-02 15:28:09 +01:00
Ludovic Fernandez
0a79643001
Prepare release v2.10.7 2023-12-06 16:42:09 +01:00
Suyash Choudhary
e77a66c2ac
Fixed datadog logs json format issue 2023-12-06 14:36:05 +01:00
410 changed files with 23492 additions and 13853 deletions

View file

@ -144,11 +144,11 @@ linters-settings:
gomoddirectives:
replace-allow-list:
- github.com/abbot/go-http-auth
- github.com/go-check/check
- github.com/gorilla/mux
- github.com/mailgun/minheap
- github.com/mailgun/multibuf
- github.com/jaguilar/vt100
- github.com/cucumber/godog
testifylint:
enable:
- bool-compare
@ -159,7 +159,6 @@ linters-settings:
- expected-actual
- float-compare
- len
- suite-dont-use-pkg
- suite-extra-assert-call
- suite-thelper
@ -253,6 +252,12 @@ issues:
text: 'SA1019: config.ClientCAs.Subjects has been deprecated since Go 1.18'
- path: pkg/types/tls_test.go
text: 'SA1019: tlsConfig.RootCAs.Subjects has been deprecated since Go 1.18'
- path: pkg/provider/kubernetes/crd/kubernetes.go
text: 'SA1019: middleware.Spec.IPWhiteList is deprecated: please use IPAllowList instead.'
- path: pkg/server/middleware/tcp/middlewares.go
text: 'SA1019: config.IPWhiteList is deprecated: please use IPAllowList instead.'
- path: pkg/server/middleware/middlewares.go
text: 'SA1019: config.IPWhiteList is deprecated: please use IPAllowList instead.'
- path: pkg/provider/kubernetes/(crd|gateway)/client.go
linters:
- interfacebloat

View file

@ -31,24 +31,6 @@ global_job_config:
- cache restore traefik-$(checksum go.sum)
blocks:
- name: Test Integration
dependencies: []
run:
when: "branch =~ '.*' OR pull_request =~'.*'"
task:
jobs:
- name: Test Integration
commands:
- make pull-images
- touch webui/static/index.html # Avoid generating webui
- IN_DOCKER="" make binary
- make test-integration
- df -h
epilogue:
always:
commands:
- cache store traefik-$(checksum go.sum) $HOME/go/pkg/mod
- name: Release
dependencies: []
run:
@ -65,8 +47,6 @@ blocks:
value: 2.32.1
- name: CODENAME
value: "beaufort"
- name: IN_DOCKER
value: ""
prologue:
commands:
- export VERSION=${SEMAPHORE_GIT_TAG_NAME}

View file

@ -1,3 +1,31 @@
## [v2.11.0-rc1](https://github.com/traefik/traefik/tree/v2.11.0-rc1) (2024-01-02)
[All Commits](https://github.com/traefik/traefik/compare/0a7964300166d167f68d5502bc245b3b9c8842b4...v2.11.0-rc1)
**Enhancements:**
- **[middleware]** Deprecate IPWhiteList middleware in favor of IPAllowList ([#10249](https://github.com/traefik/traefik/pull/10249) by [lbenguigui](https://github.com/lbenguigui))
- **[redis]** Add Redis Sentinel support ([#10245](https://github.com/traefik/traefik/pull/10245) by [youkoulayley](https://github.com/youkoulayley))
- **[server]** Add KeepAliveMaxTime and KeepAliveMaxRequests features to entrypoints ([#10247](https://github.com/traefik/traefik/pull/10247) by [juliens](https://github.com/juliens))
- **[sticky-session]** Hash WRR sticky cookies ([#10243](https://github.com/traefik/traefik/pull/10243) by [youkoulayley](https://github.com/youkoulayley))
**Bug fixes:**
- **[file]** Update github.com/fsnotify/fsnotify to v1.7.0 ([#10313](https://github.com/traefik/traefik/pull/10313) by [ldez](https://github.com/ldez))
- **[http3]** Update quic-go to v0.40.1 ([#10296](https://github.com/traefik/traefik/pull/10296) by [ldez](https://github.com/ldez))
- **[server]** Fix ReadHeaderTimeout for PROXY protocol ([#10320](https://github.com/traefik/traefik/pull/10320) by [juliens](https://github.com/juliens))
**Documentation:**
- **[acme]** Fix TLS challenge explanation ([#10293](https://github.com/traefik/traefik/pull/10293) by [cavokz](https://github.com/cavokz))
- **[docker,acme]** Fix typo ([#10294](https://github.com/traefik/traefik/pull/10294) by [youpsla](https://github.com/youpsla))
- **[docker]** Update wording of compose example ([#10276](https://github.com/traefik/traefik/pull/10276) by [svx](https://github.com/svx))
- **[k8s/crd]** Adjust deprecation notice for Kubernetes CRD provider ([#10317](https://github.com/traefik/traefik/pull/10317) by [rtribotte](https://github.com/rtribotte))
- Fix description for anonymous usage statistics references ([#10287](https://github.com/traefik/traefik/pull/10287) by [ariyonaty](https://github.com/ariyonaty))
- Documentation enhancements ([#10261](https://github.com/traefik/traefik/pull/10261) by [svx](https://github.com/svx))
## [v2.10.7](https://github.com/traefik/traefik/tree/v2.10.7) (2023-12-06)
[All Commits](https://github.com/traefik/traefik/compare/v2.10.6...v2.10.7)
**Bug fixes:**
- **[logs]** Fixed datadog logs json format issue ([#10233](https://github.com/traefik/traefik/pull/10233) by [sssash18](https://github.com/sssash18))
## [v3.0.0-beta5](https://github.com/traefik/traefik/tree/v3.0.0-beta5) (2023-11-29)
[All Commits](https://github.com/traefik/traefik/compare/v3.0.0-beta4...v3.0.0-beta5)

View file

@ -1,6 +1,6 @@
The MIT License (MIT)
Copyright (c) 2016-2020 Containous SAS; 2020-2023 Traefik Labs
Copyright (c) 2016-2020 Containous SAS; 2020-2024 Traefik Labs
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal

103
Makefile
View file

@ -6,34 +6,6 @@ VERSION_GIT := $(if $(TAG_NAME),$(TAG_NAME),$(SHA))
VERSION := $(if $(VERSION),$(VERSION),$(VERSION_GIT))
GIT_BRANCH := $(subst heads/,,$(shell git rev-parse --abbrev-ref HEAD 2>/dev/null))
TRAEFIK_DEV_IMAGE := traefik-dev$(if $(GIT_BRANCH),:$(subst /,-,$(GIT_BRANCH)))
REPONAME := $(shell echo $(REPO) | tr '[:upper:]' '[:lower:]')
TRAEFIK_IMAGE := $(if $(REPONAME),$(REPONAME),"traefik/traefik")
INTEGRATION_OPTS := $(if $(MAKE_DOCKER_HOST),-e "DOCKER_HOST=$(MAKE_DOCKER_HOST)",-v "/var/run/docker.sock:/var/run/docker.sock")
DOCKER_BUILD_ARGS := $(if $(DOCKER_VERSION), "--build-arg=DOCKER_VERSION=$(DOCKER_VERSION)",)
# only used when running in docker
TRAEFIK_ENVS := \
-e OS_ARCH_ARG \
-e OS_PLATFORM_ARG \
-e TESTFLAGS \
-e VERBOSE \
-e VERSION \
-e CODENAME \
-e TESTDIRS \
-e CI \
-e IN_DOCKER=true # Indicator for integration tests that we are running inside a container.
TRAEFIK_MOUNT := -v "$(CURDIR)/dist:/go/src/github.com/traefik/traefik/dist"
DOCKER_RUN_OPTS := $(TRAEFIK_ENVS) $(TRAEFIK_MOUNT) "$(TRAEFIK_DEV_IMAGE)"
DOCKER_NON_INTERACTIVE ?= false
DOCKER_RUN_TRAEFIK := docker run $(INTEGRATION_OPTS) $(if $(DOCKER_NON_INTERACTIVE), , -it) $(DOCKER_RUN_OPTS)
DOCKER_RUN_TRAEFIK_TEST := docker run --add-host=host.docker.internal:127.0.0.1 --rm --name=traefik --network traefik-test-network -v $(PWD):$(PWD) -w $(PWD) $(INTEGRATION_OPTS) $(if $(DOCKER_NON_INTERACTIVE), , -it) $(DOCKER_RUN_OPTS)
DOCKER_RUN_TRAEFIK_NOTTY := docker run $(INTEGRATION_OPTS) $(if $(DOCKER_NON_INTERACTIVE), , -i) $(DOCKER_RUN_OPTS)
IN_DOCKER ?= true
.PHONY: default
default: binary
@ -42,20 +14,6 @@ default: binary
dist:
mkdir -p dist
## Build Dev Docker image
.PHONY: build-dev-image
build-dev-image: dist
ifneq ("$(IN_DOCKER)", "")
docker build $(DOCKER_BUILD_ARGS) -t "$(TRAEFIK_DEV_IMAGE)" --build-arg HOST_PWD="$(PWD)" -f build.Dockerfile .
endif
## Build Dev Docker image without cache
.PHONY: build-dev-image-no-cache
build-dev-image-no-cache: dist
ifneq ("$(IN_DOCKER)", "")
docker build $(DOCKER_BUILD_ARGS) --no-cache -t "$(TRAEFIK_DEV_IMAGE)" --build-arg HOST_PWD="$(PWD)" -f build.Dockerfile .
endif
## Build WebUI Docker image
.PHONY: build-webui-image
build-webui-image:
@ -79,8 +37,8 @@ generate-webui: webui/static/index.html
## Build the binary
.PHONY: binary
binary: generate-webui build-dev-image
$(if $(IN_DOCKER),$(DOCKER_RUN_TRAEFIK)) ./script/make.sh generate binary
binary: generate-webui
./script/make.sh generate binary
## Build the linux binary locally
.PHONY: binary-debug
@ -89,35 +47,29 @@ binary-debug: generate-webui
## Build the binary for the standard platforms (linux, darwin, windows)
.PHONY: crossbinary-default
crossbinary-default: generate-webui build-dev-image
$(DOCKER_RUN_TRAEFIK_NOTTY) ./script/make.sh generate crossbinary-default
crossbinary-default: generate-webui
./script/make.sh generate crossbinary-default
## Build the binary for the standard platforms (linux, darwin, windows) in parallel
.PHONY: crossbinary-default-parallel
crossbinary-default-parallel:
$(MAKE) generate-webui
$(MAKE) build-dev-image crossbinary-default
$(MAKE) crossbinary-default
## Run the unit and integration tests
.PHONY: test
test: build-dev-image
-docker network create traefik-test-network --driver bridge --subnet 172.31.42.0/24
trap 'docker network rm traefik-test-network' EXIT; \
$(if $(IN_DOCKER),$(DOCKER_RUN_TRAEFIK_TEST)) ./script/make.sh generate test-unit binary test-integration
test:
./script/make.sh generate test-unit binary test-integration
## Run the unit tests
.PHONY: test-unit
test-unit: build-dev-image
-docker network create traefik-test-network --driver bridge --subnet 172.31.42.0/24
trap 'docker network rm traefik-test-network' EXIT; \
$(if $(IN_DOCKER),$(DOCKER_RUN_TRAEFIK_TEST)) ./script/make.sh generate test-unit
test-unit:
./script/make.sh generate test-unit
## Run the integration tests
.PHONY: test-integration
test-integration: build-dev-image
-docker network create traefik-test-network --driver bridge --subnet 172.31.42.0/24
trap 'docker network rm traefik-test-network' EXIT; \
$(if $(IN_DOCKER),$(DOCKER_RUN_TRAEFIK_TEST)) ./script/make.sh generate binary test-integration
test-integration:
./script/make.sh generate binary test-integration
## Pull all images for integration tests
.PHONY: pull-images
@ -128,16 +80,22 @@ pull-images:
| uniq \
| xargs -P 6 -n 1 docker pull
EXECUTABLES = misspell shellcheck
## Validate code and docs
.PHONY: validate-files
validate-files: build-dev-image
$(if $(IN_DOCKER),$(DOCKER_RUN_TRAEFIK)) ./script/make.sh generate validate-lint validate-misspell
validate-files:
$(foreach exec,$(EXECUTABLES),\
$(if $(shell which $(exec)),,$(error "No $(exec) in PATH")))
./script/make.sh generate validate-lint validate-misspell
bash $(CURDIR)/script/validate-shell-script.sh
## Validate code, docs, and vendor
.PHONY: validate
validate: build-dev-image
$(if $(IN_DOCKER),$(DOCKER_RUN_TRAEFIK)) ./script/make.sh generate validate-lint validate-misspell validate-vendor
validate:
$(foreach exec,$(EXECUTABLES),\
$(if $(shell which $(exec)),,$(error "No $(exec) in PATH")))
./script/make.sh generate validate-lint validate-misspell validate-vendor
bash $(CURDIR)/script/validate-shell-script.sh
## Clean up static directory and build a Docker Traefik image
@ -155,11 +113,6 @@ build-image-dirty: binary
build-image-debug: binary-debug
docker build -t $(TRAEFIK_IMAGE) -f debug.Dockerfile .
## Start a shell inside the build env
.PHONY: shell
shell: build-dev-image
$(DOCKER_RUN_TRAEFIK) /bin/bash
## Build documentation site
.PHONY: docs
docs:
@ -187,23 +140,23 @@ generate-genconf:
## Create packages for the release
.PHONY: release-packages
release-packages: generate-webui build-dev-image
release-packages: generate-webui
rm -rf dist
@- $(foreach os, linux darwin windows freebsd openbsd, \
$(if $(IN_DOCKER),$(DOCKER_RUN_TRAEFIK_NOTTY)) goreleaser release --skip-publish -p 2 --timeout="90m" --config $(shell go run ./internal/release $(os)); \
$(if $(IN_DOCKER),$(DOCKER_RUN_TRAEFIK_NOTTY)) go clean -cache; \
goreleaser release --skip-publish -p 2 --timeout="90m" --config $(shell go run ./internal/release $(os)); \
go clean -cache; \
)
$(if $(IN_DOCKER),$(DOCKER_RUN_TRAEFIK_NOTTY)) cat dist/**/*_checksums.txt >> dist/traefik_${VERSION}_checksums.txt
$(if $(IN_DOCKER),$(DOCKER_RUN_TRAEFIK_NOTTY)) rm dist/**/*_checksums.txt
$(if $(IN_DOCKER),$(DOCKER_RUN_TRAEFIK_NOTTY)) tar cfz dist/traefik-${VERSION}.src.tar.gz \
cat dist/**/*_checksums.txt >> dist/traefik_${VERSION}_checksums.txt
rm dist/**/*_checksums.txt
tar cfz dist/traefik-${VERSION}.src.tar.gz \
--exclude-vcs \
--exclude .idea \
--exclude .travis \
--exclude .semaphoreci \
--exclude .github \
--exclude dist .
$(if $(IN_DOCKER),$(DOCKER_RUN_TRAEFIK_NOTTY)) chown -R $(shell id -u):$(shell id -g) dist/
chown -R $(shell id -u):$(shell id -g) dist/
## Format the Code
.PHONY: fmt

View file

@ -87,11 +87,11 @@ func run(dest string) error {
}
func cleanType(typ types.Type, base string) string {
if typ.String() == "github.com/traefik/traefik/v3/pkg/tls.FileOrContent" {
if typ.String() == "github.com/traefik/traefik/v3/pkg/types.FileOrContent" {
return "string"
}
if typ.String() == "[]github.com/traefik/traefik/v3/pkg/tls.FileOrContent" {
if typ.String() == "[]github.com/traefik/traefik/v3/pkg/types.FileOrContent" {
return "[]string"
}

View file

@ -5,6 +5,7 @@ import (
"crypto/x509"
"encoding/json"
"fmt"
"io"
stdlog "log"
"net/http"
"os"
@ -43,9 +44,9 @@ import (
"github.com/traefik/traefik/v3/pkg/tcp"
traefiktls "github.com/traefik/traefik/v3/pkg/tls"
"github.com/traefik/traefik/v3/pkg/tracing"
"github.com/traefik/traefik/v3/pkg/tracing/jaeger"
"github.com/traefik/traefik/v3/pkg/types"
"github.com/traefik/traefik/v3/pkg/version"
"go.opentelemetry.io/otel/trace"
)
func main() {
@ -265,10 +266,9 @@ func setupServer(staticConfiguration *static.Configuration) (*server.Server, err
managerFactory := service.NewManagerFactory(*staticConfiguration, routinesPool, metricsRegistry, roundTripperManager, acmeHTTPHandler)
// Router factory
accessLog := setupAccessLog(staticConfiguration.AccessLog)
tracer := setupTracing(staticConfiguration.Tracing)
tracer, tracerCloser := setupTracing(staticConfiguration.Tracing)
chainBuilder := middleware.NewChainBuilder(metricsRegistry, accessLog, tracer)
routerFactory := server.NewRouterFactory(*staticConfiguration, managerFactory, tlsManager, chainBuilder, pluginBuilder, metricsRegistry, dialerManager)
@ -351,7 +351,7 @@ func setupServer(staticConfiguration *static.Configuration) (*server.Server, err
}
})
return server.NewServer(routinesPool, serverEntryPointsTCP, serverEntryPointsUDP, watcher, chainBuilder, accessLog), nil
return server.NewServer(routinesPool, serverEntryPointsTCP, serverEntryPointsUDP, watcher, chainBuilder, accessLog, tracerCloser), nil
}
func getHTTPChallengeHandler(acmeProviders []*acme.Provider, httpChallengeProvider http.Handler) http.Handler {
@ -564,78 +564,18 @@ func setupAccessLog(conf *types.AccessLog) *accesslog.Handler {
return accessLoggerMiddleware
}
func setupTracing(conf *static.Tracing) *tracing.Tracing {
func setupTracing(conf *static.Tracing) (trace.Tracer, io.Closer) {
if conf == nil {
return nil
return nil, nil
}
var backend tracing.Backend
if conf.Jaeger != nil {
backend = conf.Jaeger
}
if conf.Zipkin != nil {
if backend != nil {
log.Error().Msg("Multiple tracing backend are not supported: cannot create Zipkin backend.")
} else {
backend = conf.Zipkin
}
}
if conf.Datadog != nil {
if backend != nil {
log.Error().Msg("Multiple tracing backend are not supported: cannot create Datadog backend.")
} else {
backend = conf.Datadog
}
}
if conf.Instana != nil {
if backend != nil {
log.Error().Msg("Multiple tracing backend are not supported: cannot create Instana backend.")
} else {
backend = conf.Instana
}
}
if conf.Haystack != nil {
if backend != nil {
log.Error().Msg("Multiple tracing backend are not supported: cannot create Haystack backend.")
} else {
backend = conf.Haystack
}
}
if conf.Elastic != nil {
if backend != nil {
log.Error().Msg("Multiple tracing backend are not supported: cannot create Elastic backend.")
} else {
backend = conf.Elastic
}
}
if conf.OpenTelemetry != nil {
if backend != nil {
log.Error().Msg("Tracing backends are all mutually exclusive: cannot create OpenTelemetry backend.")
} else {
backend = conf.OpenTelemetry
}
}
if backend == nil {
log.Debug().Msg("Could not initialize tracing, using Jaeger by default")
defaultBackend := &jaeger.Config{}
defaultBackend.SetDefaults()
backend = defaultBackend
}
tracer, err := tracing.NewTracing(conf.ServiceName, conf.SpanNameLimit, backend)
tracer, closer, err := tracing.NewTracing(conf)
if err != nil {
log.Warn().Err(err).Msg("Unable to create tracer")
return nil
return nil, nil
}
return tracer
return tracer, closer
}
func checkNewVersion() {

View file

@ -13,67 +13,13 @@ Let's see how.
## Building
You need either [Docker](https://github.com/docker/docker "Link to website of Docker") and `make` (Method 1), or [Go](https://go.dev/ "Link to website of Go") (Method 2) in order to build Traefik.
For changes to its dependencies, the `dep` dependency management tool is required.
### Method 1: Using `Docker` and `Makefile`
Run make with the `binary` target.
```bash
make binary
```
This will create binaries for the Linux platform in the `dist` folder.
In case when you run build on CI, you may probably want to run docker in non-interactive mode. To achieve that define `DOCKER_NON_INTERACTIVE=true` environment variable.
```bash
$ make binary
docker build -t traefik-webui -f webui/Dockerfile webui
Sending build context to Docker daemon 2.686MB
Step 1/11 : FROM node:8.15.0
---> 1f6c34f7921c
[...]
Successfully built ce4ff439c06a
Successfully tagged traefik-webui:latest
[...]
docker build -t "traefik-dev:4475--feature-documentation" -f build.Dockerfile .
Sending build context to Docker daemon 279MB
Step 1/10 : FROM golang:1.16-alpine
---> f4bfb3d22bda
[...]
Successfully built 5c3c1a911277
Successfully tagged traefik-dev:4475--feature-documentation
docker run -e "TEST_CONTAINER=1" -v "/var/run/docker.sock:/var/run/docker.sock" -it -e OS_ARCH_ARG -e OS_PLATFORM_ARG -e TESTFLAGS -e VERBOSE -e VERSION -e CODENAME -e TESTDIRS -e CI -e CONTAINER=DOCKER -v "/home/ldez/sources/go/src/github.com/traefik/traefik/"dist":/go/src/github.com/traefik/traefik/"dist"" "traefik-dev:4475--feature-documentation" ./script/make.sh generate binary
---> Making bundle: generate (in .)
removed 'autogen/genstatic/gen.go'
---> Making bundle: binary (in .)
$ ls dist/
traefik*
```
The following targets can be executed outside Docker by setting the variable `IN_DOCKER` to an empty string (although be aware that some of the tests might fail in that context):
- `test-unit`
- `test-integration`
- `validate`
- `binary` (the webUI is still generated by using Docker)
ex:
```bash
IN_DOCKER= make test-unit
```
### Method 2: Using `go`
Requirements:
- `go` v1.16+
- environment variable `GO111MODULE=on`
You need:
- [Docker](https://github.com/docker/docker "Link to website of Docker")
- `make`
- [Go](https://go.dev/ "Link to website of Go")
- [misspell](https://github.com/golangci/misspell)
- [shellcheck](https://github.com/koalaman/shellcheck)
- [Tailscale](https://tailscale.com/) if you are using Docker Desktop
!!! tip "Source Directory"
@ -106,41 +52,33 @@ Requirements:
## ... and the list goes on
```
#### Build Traefik
### Build Traefik
Once you've set up your go environment and cloned the source repository, you can build Traefik.
```bash
# Generate UI static files
make clean-webui generate-webui
$ make binary
./script/make.sh generate binary
---> Making bundle: generate (in .)
# required to merge non-code components into the final binary,
# such as the web dashboard/UI
go generate
---> Making bundle: binary (in .)
$ ls dist/
traefik*
```
```bash
# Standard go build
go build ./cmd/traefik
```
You will find the Traefik executable (`traefik`) in the `~/go/src/github.com/traefik/traefik` directory.
You will find the Traefik executable (`traefik`) in the `./dist` directory.
## Testing
### Method 1: `Docker` and `make`
Run unit tests using the `test-unit` target.
Run integration tests using the `test-integration` target.
Run all tests (unit and integration) using the `test` target.
```bash
$ make test-unit
docker build -t "traefik-dev:your-feature-branch" -f build.Dockerfile .
# […]
docker run --rm -it -e OS_ARCH_ARG -e OS_PLATFORM_ARG -e TESTFLAGS -v "/home/user/go/src/github/traefik/traefik/dist:/go/src/github.com/traefik/traefik/dist" "traefik-dev:your-feature-branch" ./script/make.sh generate test-unit
./script/make.sh generate test-unit
---> Making bundle: generate (in .)
removed 'gen.go'
---> Making bundle: test-unit (in .)
+ go test -cover -coverprofile=cover.out .
@ -151,28 +89,30 @@ Test success
For development purposes, you can specify which tests to run by using (only works the `test-integration` target):
??? note "Configuring Tailscale for Docker Desktop user"
Create `tailscale.secret` file in `integration` directory.
This file need to contains a [Tailscale auth key](https://tailscale.com/kb/1085/auth-keys)
(an ephemeral, but reusable, one is recommended).
Add this section to your tailscale ACLs to auto-approve the routes for the
containers in the docker subnet:
```json
"autoApprovers": {
// Allow myself to automatically
// advertize routes for docker networks
"routes": {
"172.31.42.0/24": ["your_tailscale_identity"],
},
},
```
```bash
# Run every tests in the MyTest suite
TESTFLAGS="-check.f MyTestSuite" make test-integration
TESTFLAGS="-test.run TestAccessLogSuite" make test-integration
# Run the test "MyTest" in the MyTest suite
TESTFLAGS="-check.f MyTestSuite.MyTest" make test-integration
# Run every tests starting with "My", in the MyTest suite
TESTFLAGS="-check.f MyTestSuite.My" make test-integration
# Run every tests ending with "Test", in the MyTest suite
TESTFLAGS="-check.f MyTestSuite.*Test" make test-integration
TESTFLAGS="-test.run TestAccessLogSuite -testify.m ^TestAccessLog$" make test-integration
```
Check [gocheck](https://labix.org/gocheck "Link to website of gocheck") for more information.
### Method 2: `go`
Unit tests can be run from the cloned directory using `$ go test ./...` which should return `ok`, similar to:
```test
ok _/home/user/go/src/github/traefik/traefik 0.004s
```
Integration tests must be run from the `integration/` directory and require the `-integration` switch: `$ cd integration && go test -integration ./...`.

View file

@ -4,20 +4,23 @@ This page is maintained and updated periodically to reflect our roadmap and any
| Feature | Deprecated | End of Support | Removal |
|----------------------------------------------------------------------------------------------------------------------|------------|----------------|---------|
| [Kubernetes CRDs API Version `traefik.io/v1alpha1`](#kubernetes-crds-api-version-traefikiov1alpha1) | N/A | N/A | 3.0 |
| [Kubernetes CRD Provider API Version `traefik.io/v1alpha1`](#kubernetes-crd-provider-api-version-traefikiov1alpha1) | 3.0 | N/A | 4.0 |
| [Kubernetes Ingress API Version `networking.k8s.io/v1beta1`](#kubernetes-ingress-api-version-networkingk8siov1beta1) | N/A | N/A | 3.0 |
| [CRD API Version `apiextensions.k8s.io/v1beta1`](#kubernetes-ingress-api-version-networkingk8siov1beta1) | N/A | N/A | 3.0 |
## Impact
### Kubernetes CRDs API Version `traefik.io/v1alpha1`
### Kubernetes CRD Provider API Version `traefik.io/v1alpha1`
The newly introduced Kubernetes CRD API Version `traefik.io/v1alpha1` will subsequently be removed in Traefik v3. The following version will be `traefik.io/v1`.
The Kubernetes CRD provider API Version `traefik.io/v1alpha1` is deprecated in Traefik v3.
Please use the API Group `traefik.io/v1` instead.
### Kubernetes Ingress API Version `networking.k8s.io/v1beta1`
The Kubernetes Ingress API Version `networking.k8s.io/v1beta1` is removed in v3. Please use the API Group `networking.k8s.io/v1` instead.
The Kubernetes Ingress API Version `networking.k8s.io/v1beta1` support is removed in v3.
Please use the API Group `networking.k8s.io/v1` instead.
### Traefik CRD API Version `apiextensions.k8s.io/v1beta1`
### Traefik CRD Definitions API Version `apiextensions.k8s.io/v1beta1`
The Traefik CRD API Version `apiextensions.k8s.io/v1beta1` is removed in v3. Please use the API Group `apiextensions.k8s.io/v1` instead.
The Traefik CRD definitions API Version `apiextensions.k8s.io/v1beta1` support is removed in v3.
Please use the API Group `apiextensions.k8s.io/v1` instead.

View file

@ -82,11 +82,11 @@ docker run traefik[:version] --help
# ex: docker run traefik:v3.0 --help
```
All available arguments can also be found [here](../reference/static-configuration/cli.md).
Check the [CLI reference](../reference/static-configuration/cli.md "Link to CLI reference overview") for an overview about all available arguments.
### Environment Variables
All available environment variables can be found [here](../reference/static-configuration/env.md)
All available environment variables can be found in the [static configuration environment overview](../reference/static-configuration/env.md).
## Available Configuration Options

View file

@ -29,7 +29,7 @@ Not to mention that dynamic configuration changes potentially make that kind of
Therefore, in this dynamic context,
the static configuration of an `entryPoint` does not give any hint whatsoever about how the traffic going through that `entryPoint` is going to be routed.
Or whether it's even going to be routed at all,
i.e. whether there is a Router matching the kind of traffic going through it.
that is whether there is a Router matching the kind of traffic going through it.
### `404 Not found`
@ -71,7 +71,7 @@ Traefik returns a `502` response code when an error happens while contacting the
### `503 Service Unavailable`
Traefik returns a `503` response code when a Router has been matched
Traefik returns a `503` response code when a Router has been matched,
but there are no servers ready to handle the request.
This situation is encountered when a service has been explicitly configured without servers,
@ -84,7 +84,7 @@ Sometimes, the `404` response code doesn't play well with other parties or servi
In these situations, you may want Traefik to always reply with a `503` response code,
instead of a `404` response code.
To achieve this behavior, a simple catchall router,
To achieve this behavior, a catchall router,
with the lowest possible priority and routing to a service without servers,
can handle all the requests when no other router has been matched.
@ -216,7 +216,7 @@ error: field not found, node: -badField-
The "field not found" error occurs, when an unknown property is encountered in the dynamic or static configuration.
One easy way to check whether a configuration file is well-formed, is to validate it with:
One way to check whether a configuration file is well-formed, is to validate it with:
- [JSON Schema of the static configuration](https://json.schemastore.org/traefik-v2.json)
- [JSON Schema of the dynamic configuration](https://json.schemastore.org/traefik-v2-file-provider.json)
@ -226,11 +226,11 @@ One easy way to check whether a configuration file is well-formed, is to validat
As a common tip, if a resource is dropped/not created by Traefik after the dynamic configuration was evaluated,
one should look for an error in the logs.
If found, the error obviously confirms that something went wrong while creating the resource,
If found, the error confirms that something went wrong while creating the resource,
and the message should help in figuring out the mistake(s) in the configuration, and how to fix it.
When using the file provider,
one easy way to check if the dynamic configuration is well-formed is to validate it with the [JSON Schema of the dynamic configuration](https://json.schemastore.org/traefik-v2-file-provider.json).
one way to check if the dynamic configuration is well-formed is to validate it with the [JSON Schema of the dynamic configuration](https://json.schemastore.org/traefik-v2-file-provider.json).
## Why does Let's Encrypt wildcard certificate renewal/generation with DNS challenge fail?
@ -248,6 +248,6 @@ then it could be due to `CNAME` support.
In which case, you should make sure your infrastructure is properly set up for a
`DNS` challenge that does not rely on `CNAME`, and you should try disabling `CNAME` support with:
```bash
```shell
LEGO_DISABLE_CNAME_SUPPORT=true
```

View file

@ -19,7 +19,7 @@ Choose one of the [official Docker images](https://hub.docker.com/_/traefik) and
* [YAML](https://raw.githubusercontent.com/traefik/traefik/v3.0/traefik.sample.yml)
* [TOML](https://raw.githubusercontent.com/traefik/traefik/v3.0/traefik.sample.toml)
```bash
```shell
docker run -d -p 8080:8080 -p 80:80 \
-v $PWD/traefik.yml:/etc/traefik/traefik.yml traefik:v3.0
```
@ -59,7 +59,7 @@ You can update the chart repository by running:
helm repo update
```
And install it with the `helm` command line:
And install it with the Helm command line:
```bash
helm install traefik traefik/traefik
@ -106,7 +106,7 @@ helm install traefik traefik/traefik
### Exposing the Traefik dashboard
This HelmChart does not expose the Traefik dashboard by default, for security concerns.
This Helm chart does not expose the Traefik dashboard by default, for security concerns.
Thus, there are multiple ways to expose the dashboard.
For instance, the dashboard access could be achieved through a port-forward:

View file

@ -1,11 +1,11 @@
---
title: "Traefik Getting Started With Kubernetes"
description: "Looking to get started with Traefik Proxy? Read the technical documentation to learn a simple use case that leverages Kubernetes."
description: "Get started with Traefik Proxy and Kubernetes."
---
# Quick Start
A Simple Use Case of Traefik Proxy and Kubernetes
A Use Case of Traefik Proxy and Kubernetes
{: .subtitle }
This guide is an introduction to using Traefik Proxy in a Kubernetes environment.
@ -16,7 +16,7 @@ It presents and explains the basic blocks required to start with Traefik such as
Traefik uses the Kubernetes API to discover running services.
In order to use the Kubernetes API, Traefik needs some permissions.
To use the Kubernetes API, Traefik needs some permissions.
This [permission mechanism](https://kubernetes.io/docs/reference/access-authn-authz/rbac/) is based on roles defined by the cluster administrator.
The role is then bound to an account used by an application, in this case, Traefik Proxy.
@ -88,7 +88,7 @@ roleRef:
subjects:
- kind: ServiceAccount
name: traefik-account
namespace: default # Using "default" because we did not specify a namespace when creating the ClusterAccount.
namespace: default # This tutorial uses the "default" K8s namespace.
```
!!! info "`roleRef` is the Kubernetes reference to the role created in `00-role.yml`."
@ -146,7 +146,7 @@ These arguments are the static configuration for Traefik.
From here, it is possible to enable the dashboard,
configure entry points,
select dynamic configuration providers,
and [more](../reference/static-configuration/cli.md)...
and [more](../reference/static-configuration/cli.md).
In this deployment,
the static configuration enables the Traefik dashboard,
@ -214,7 +214,7 @@ The only part still missing is the business application behind the reverse proxy
For this guide, we use the example application [traefik/whoami](https://github.com/traefik/whoami),
but the principles are applicable to any other application.
The `whoami` application is a simple HTTP server running on port 80 which answers host-related information to the incoming requests.
The `whoami` application is an HTTP server running on port 80 which answers host-related information to the incoming requests.
As usual, start by creating a file called `03-whoami.yml` and paste the following `Deployment` resource:
```yaml tab="03-whoami.yml"

View file

@ -1,11 +1,11 @@
---
title: "Traefik Getting Started Quickly"
description: "Looking to get started with Traefik Proxy quickly? Read the technical documentation to see a basic use case that leverages Docker."
description: "Get started with Traefik Proxy and Docker."
---
# Quick Start
A Basic Use Case Using Docker
A Use Case Using Docker
{: .subtitle }
![quickstart-diagram](../assets/img/quickstart-diagram.png)
@ -19,9 +19,9 @@ version: '3'
services:
reverse-proxy:
# The official v3 Traefik Docker image
# The official v2 Traefik docker image
image: traefik:v3.0
# Enables the web UI and tells Traefik to listen to Docker
# Enables the web UI and tells Traefik to listen to docker
command: --api.insecure=true --providers.docker
ports:
# The HTTP port
@ -41,11 +41,11 @@ Start your `reverse-proxy` with the following command:
docker-compose up -d reverse-proxy
```
You can open a browser and go to `http://localhost:8080/api/rawdata` to see Traefik's API rawdata (we'll go back there once we have launched a service in step 2).
You can open a browser and go to `http://localhost:8080/api/rawdata` to see Traefik's API rawdata (you'll go back there once you have launched a service in step 2).
## Traefik Detects New Services and Creates the Route for You
Now that we have a Traefik instance up and running, we will deploy new services.
Now that you have a Traefik instance up and running, you will deploy new services.
Edit your `docker-compose.yml` file and add the following at the end of your file.
@ -63,7 +63,7 @@ services:
- "traefik.http.routers.whoami.rule=Host(`whoami.docker.localhost`)"
```
The above defines [`whoami`](https://github.com/traefik/whoami "Link to whoami app on GitHub"), a web service that outputs information about the machine it is deployed on (its IP address, host, etc.).
The above defines `whoami`: a web service that outputs information about the machine it is deployed on (its IP address, host, and others).
Start the `whoami` service with the following command:
@ -73,7 +73,7 @@ docker-compose up -d whoami
Browse `http://localhost:8080/api/rawdata` and see that Traefik has automatically detected the new container and updated its own configuration.
When Traefik detects new services, it creates the corresponding routes, so you can call them ... _let's see!_ (Here, we're using curl)
When Traefik detects new services, it creates the corresponding routes, so you can call them ... _let's see!_ (Here, you're using curl)
```shell
curl -H Host:whoami.docker.localhost http://127.0.0.1
@ -103,7 +103,7 @@ Finally, see that Traefik load-balances between the two instances of your servic
curl -H Host:whoami.docker.localhost http://127.0.0.1
```
The output will show alternatively one of the followings:
The output will show alternatively one of the following:
```yaml
Hostname: a656c8ddca6c

View file

@ -18,7 +18,7 @@ Traefik is natively compliant with every major cluster technology, such as Kuber
With Traefik, there is no need to maintain and synchronize a separate configuration file: everything happens automatically, in real time (no restarts, no connection interruptions).
With Traefik, you spend time developing and deploying new features to your system, not on configuring and maintaining its working state.
Developing Traefik, our main goal is to make it simple to use, and we're sure you'll enjoy it.
Developing Traefik, our main goal is to make it effortless to use, and we're sure you'll enjoy it.
-- The Traefik Maintainer Team

View file

@ -0,0 +1,201 @@
---
title: "Traefik HTTP Middlewares IPWhiteList"
description: "Learn how to use IPWhiteList in HTTP middleware for limiting clients to specific IPs in Traefik Proxy. Read the technical documentation."
---
# IPWhiteList
Limiting Clients to Specific IPs
{: .subtitle }
![IPWhiteList](../../assets/img/middleware/ipwhitelist.png)
IPWhiteList accepts / refuses requests based on the client IP.
!!! warning
This middleware is deprecated, please use the [IPAllowList](./ipallowlist.md) middleware instead.
## Configuration Examples
```yaml tab="Docker"
# Accepts request from defined IP
labels:
- "traefik.http.middlewares.test-ipwhitelist.ipwhitelist.sourcerange=127.0.0.1/32, 192.168.1.7"
```
```yaml tab="Kubernetes"
apiVersion: traefik.io/v1alpha1
kind: Middleware
metadata:
name: test-ipwhitelist
spec:
ipWhiteList:
sourceRange:
- 127.0.0.1/32
- 192.168.1.7
```
```yaml tab="Consul Catalog"
# Accepts request from defined IP
- "traefik.http.middlewares.test-ipwhitelist.ipwhitelist.sourcerange=127.0.0.1/32, 192.168.1.7"
```
```yaml tab="File (YAML)"
# Accepts request from defined IP
http:
middlewares:
test-ipwhitelist:
ipWhiteList:
sourceRange:
- "127.0.0.1/32"
- "192.168.1.7"
```
```toml tab="File (TOML)"
# Accepts request from defined IP
[http.middlewares]
[http.middlewares.test-ipwhitelist.ipWhiteList]
sourceRange = ["127.0.0.1/32", "192.168.1.7"]
```
## Configuration Options
### `sourceRange`
The `sourceRange` option sets the allowed IPs (or ranges of allowed IPs by using CIDR notation).
### `ipStrategy`
The `ipStrategy` option defines two parameters that set how Traefik determines the client IP: `depth`, and `excludedIPs`.
If no strategy is set, the default behavior is to match `sourceRange` against the Remote address found in the request.
!!! important "As a middleware, whitelisting happens before the actual proxying to the backend takes place. In addition, the previous network hop only gets appended to `X-Forwarded-For` during the last stages of proxying, i.e. after it has already passed through whitelisting. Therefore, during whitelisting, as the previous network hop is not yet present in `X-Forwarded-For`, it cannot be matched against `sourceRange`."
#### `ipStrategy.depth`
The `depth` option tells Traefik to use the `X-Forwarded-For` header and take the IP located at the `depth` position (starting from the right).
- If `depth` is greater than the total number of IPs in `X-Forwarded-For`, then the client IP will be empty.
- `depth` is ignored if its value is less than or equal to 0.
!!! example "Examples of Depth & X-Forwarded-For"
If `depth` is set to 2, and the request `X-Forwarded-For` header is `"10.0.0.1,11.0.0.1,12.0.0.1,13.0.0.1"` then the "real" client IP is `"10.0.0.1"` (at depth 4) but the IP used for the whitelisting is `"12.0.0.1"` (`depth=2`).
| `X-Forwarded-For` | `depth` | clientIP |
|-----------------------------------------|---------|--------------|
| `"10.0.0.1,11.0.0.1,12.0.0.1,13.0.0.1"` | `1` | `"13.0.0.1"` |
| `"10.0.0.1,11.0.0.1,12.0.0.1,13.0.0.1"` | `3` | `"11.0.0.1"` |
| `"10.0.0.1,11.0.0.1,12.0.0.1,13.0.0.1"` | `5` | `""` |
```yaml tab="Docker"
# Whitelisting Based on `X-Forwarded-For` with `depth=2`
labels:
- "traefik.http.middlewares.test-ipwhitelist.ipwhitelist.sourcerange=127.0.0.1/32, 192.168.1.7"
- "traefik.http.middlewares.test-ipwhitelist.ipwhitelist.ipstrategy.depth=2"
```
```yaml tab="Kubernetes"
# Whitelisting Based on `X-Forwarded-For` with `depth=2`
apiVersion: traefik.io/v1alpha1
kind: Middleware
metadata:
name: test-ipwhitelist
spec:
ipWhiteList:
sourceRange:
- 127.0.0.1/32
- 192.168.1.7
ipStrategy:
depth: 2
```
```yaml tab="Consul Catalog"
# Whitelisting Based on `X-Forwarded-For` with `depth=2`
- "traefik.http.middlewares.test-ipwhitelist.ipwhitelist.sourcerange=127.0.0.1/32, 192.168.1.7"
- "traefik.http.middlewares.test-ipwhitelist.ipwhitelist.ipstrategy.depth=2"
```
```yaml tab="File (YAML)"
# Whitelisting Based on `X-Forwarded-For` with `depth=2`
http:
middlewares:
test-ipwhitelist:
ipWhiteList:
sourceRange:
- "127.0.0.1/32"
- "192.168.1.7"
ipStrategy:
depth: 2
```
```toml tab="File (TOML)"
# Whitelisting Based on `X-Forwarded-For` with `depth=2`
[http.middlewares]
[http.middlewares.test-ipwhitelist.ipWhiteList]
sourceRange = ["127.0.0.1/32", "192.168.1.7"]
[http.middlewares.test-ipwhitelist.ipWhiteList.ipStrategy]
depth = 2
```
#### `ipStrategy.excludedIPs`
`excludedIPs` configures Traefik to scan the `X-Forwarded-For` header and select the first IP not in the list.
!!! important "If `depth` is specified, `excludedIPs` is ignored."
!!! example "Example of ExcludedIPs & X-Forwarded-For"
| `X-Forwarded-For` | `excludedIPs` | clientIP |
|-----------------------------------------|-----------------------|--------------|
| `"10.0.0.1,11.0.0.1,12.0.0.1,13.0.0.1"` | `"12.0.0.1,13.0.0.1"` | `"11.0.0.1"` |
| `"10.0.0.1,11.0.0.1,12.0.0.1,13.0.0.1"` | `"15.0.0.1,13.0.0.1"` | `"12.0.0.1"` |
| `"10.0.0.1,11.0.0.1,12.0.0.1,13.0.0.1"` | `"10.0.0.1,13.0.0.1"` | `"12.0.0.1"` |
| `"10.0.0.1,11.0.0.1,12.0.0.1,13.0.0.1"` | `"15.0.0.1,16.0.0.1"` | `"13.0.0.1"` |
| `"10.0.0.1,11.0.0.1"` | `"10.0.0.1,11.0.0.1"` | `""` |
```yaml tab="Docker"
# Exclude from `X-Forwarded-For`
labels:
- "traefik.http.middlewares.test-ipwhitelist.ipwhitelist.ipstrategy.excludedips=127.0.0.1/32, 192.168.1.7"
```
```yaml tab="Kubernetes"
# Exclude from `X-Forwarded-For`
apiVersion: traefik.io/v1alpha1
kind: Middleware
metadata:
name: test-ipwhitelist
spec:
ipWhiteList:
ipStrategy:
excludedIPs:
- 127.0.0.1/32
- 192.168.1.7
```
```yaml tab="Consul Catalog"
# Exclude from `X-Forwarded-For`
- "traefik.http.middlewares.test-ipwhitelist.ipwhitelist.ipstrategy.excludedips=127.0.0.1/32, 192.168.1.7"
```
```yaml tab="File (YAML)"
# Exclude from `X-Forwarded-For`
http:
middlewares:
test-ipwhitelist:
ipWhiteList:
ipStrategy:
excludedIPs:
- "127.0.0.1/32"
- "192.168.1.7"
```
```toml tab="File (TOML)"
# Exclude from `X-Forwarded-For`
[http.middlewares]
[http.middlewares.test-ipwhitelist.ipWhiteList]
[http.middlewares.test-ipwhitelist.ipWhiteList.ipStrategy]
excludedIPs = ["127.0.0.1/32", "192.168.1.7"]
```

View file

@ -0,0 +1,64 @@
---
title: "Traefik TCP Middlewares IPWhiteList"
description: "Learn how to use IPWhiteList in TCP middleware for limiting clients to specific IPs in Traefik Proxy. Read the technical documentation."
---
# IPWhiteList
Limiting Clients to Specific IPs
{: .subtitle }
IPWhiteList accepts / refuses connections based on the client IP.
!!! warning
This middleware is deprecated, please use the [IPAllowList](./ipallowlist.md) middleware instead.
## Configuration Examples
```yaml tab="Docker"
# Accepts connections from defined IP
labels:
- "traefik.tcp.middlewares.test-ipwhitelist.ipwhitelist.sourcerange=127.0.0.1/32, 192.168.1.7"
```
```yaml tab="Kubernetes"
apiVersion: traefik.io/v1alpha1
kind: MiddlewareTCP
metadata:
name: test-ipwhitelist
spec:
ipWhiteList:
sourceRange:
- 127.0.0.1/32
- 192.168.1.7
```
```yaml tab="Consul Catalog"
# Accepts request from defined IP
- "traefik.tcp.middlewares.test-ipwhitelist.ipwhitelist.sourcerange=127.0.0.1/32, 192.168.1.7"
```
```toml tab="File (TOML)"
# Accepts request from defined IP
[tcp.middlewares]
[tcp.middlewares.test-ipwhitelist.ipWhiteList]
sourceRange = ["127.0.0.1/32", "192.168.1.7"]
```
```yaml tab="File (YAML)"
# Accepts request from defined IP
tcp:
middlewares:
test-ipwhitelist:
ipWhiteList:
sourceRange:
- "127.0.0.1/32"
- "192.168.1.7"
```
## Configuration Options
### `sourceRange`
The `sourceRange` option sets the allowed IPs (or ranges of allowed IPs by using CIDR notation).

View file

@ -10,28 +10,335 @@ How to Migrate from Traefik v2 to Traefik v3.
The version 3 of Traefik introduces a number of breaking changes,
which require one to update their configuration when they migrate from v2 to v3.
The goal of this page is to recapitulate all of these changes, and in particular to give examples,
feature by feature, of how the configuration looked like in v2, and how it now looks like in v3.
The goal of this page is to recapitulate all of these changes,
and in particular to give examples, feature by feature,
of how the configuration looked like in v2,
and how it now looks like in v3.
## IPWhiteList
## Static configuration
### Docker & Docker Swarm
In v3, the provider Docker has been split into 2 providers:
- Docker provider (without Swarm support)
- Swarm provider (Swarm support only)
??? example "An example usage of v2 Docker provider with Swarm"
```yaml tab="File (YAML)"
providers:
docker:
swarmMode: true
```
```toml tab="File (TOML)"
[providers.docker]
swarmMode=true
```
```bash tab="CLI"
--providers.docker.swarmMode=true
```
This configuration is now unsupported and would prevent Traefik to start.
#### Remediation
In v3, the `swarmMode` should not be used with the Docker provider, and, to use Swarm, the Swarm provider should be used instead.
??? example "An example usage of the Swarm provider"
```yaml tab="File (YAML)"
providers:
swarm:
endpoint: "tcp://127.0.0.1:2377"
```
```toml tab="File (TOML)"
[providers.swarm]
endpoint="tcp://127.0.0.1:2377"
```
```bash tab="CLI"
--providers.swarm.endpoint=tcp://127.0.0.1:2377
```
### HTTP3 Experimental Configuration
In v3, HTTP/3 is no longer an experimental feature.
It can be enabled on entry points without the associated `experimental.http3` option, which is now removed.
It is now unsupported and would prevent Traefik to start.
??? example "An example usage of v2 Experimental `http3` option"
```yaml tab="File (YAML)"
experimental:
http3: true
```
```toml tab="File (TOML)"
[experimental]
http3=true
```
```bash tab="CLI"
--experimental.http3=true
```
#### Remediation
The `http3` option should be removed from the static configuration experimental section.
### Consul provider
The Consul provider `namespace` option was deprecated in v2 and is now removed in v3.
It is now unsupported and would prevent Traefik to start.
??? example "An example usage of v2 Consul `namespace` option"
```yaml tab="File (YAML)"
consul:
namespace: foobar
```
```toml tab="File (TOML)"
[consul]
namespace=foobar
```
```bash tab="CLI"
--consul.namespace=foobar
```
#### Remediation
In v3, the `namespaces` option should be used instead of the `namespace` option.
??? example "An example usage of Consul `namespaces` option"
```yaml tab="File (YAML)"
consul:
namespaces:
- foobar
```
```toml tab="File (TOML)"
[consul]
namespaces=["foobar"]
```
```bash tab="CLI"
--consul.namespaces=foobar
```
### ConsulCatalog provider
The ConsulCatalog provider `namespace` option was deprecated in v2 and is now removed in v3.
It is now unsupported and would prevent Traefik to start.
??? example "An example usage of v2 ConsulCatalog `namespace` option"
```yaml tab="File (YAML)"
consulCatalog:
namespace: foobar
```
```toml tab="File (TOML)"
[consulCatalog]
namespace=foobar
```
```bash tab="CLI"
--consulCatalog.namespace=foobar
```
#### Remediation
In v3, the `namespaces` option should be used instead of the `namespace` option.
??? example "An example usage of ConsulCatalog `namespaces` option"
```yaml tab="File (YAML)"
consulCatalog:
namespaces:
- foobar
```
```toml tab="File (TOML)"
[consulCatalog]
namespaces=["foobar"]
```
```bash tab="CLI"
--consulCatalog.namespaces=foobar
```
### Nomad provider
The Nomad provider `namespace` option was deprecated in v2 and is now removed in v3.
It is now unsupported and would prevent Traefik to start.
??? example "An example usage of v2 Nomad `namespace` option"
```yaml tab="File (YAML)"
nomad:
namespace: foobar
```
```toml tab="File (TOML)"
[nomad]
namespace=foobar
```
```bash tab="CLI"
--nomad.namespace=foobar
```
#### Remediation
In v3, the `namespaces` option should be used instead of the `namespace` option.
??? example "An example usage of Nomad `namespaces` option"
```yaml tab="File (YAML)"
nomad:
namespaces:
- foobar
```
```toml tab="File (TOML)"
[nomad]
namespaces=["foobar"]
```
```bash tab="CLI"
--nomad.namespaces=foobar
```
### Rancher v1 Provider
In v3, the Rancher v1 provider has been removed because Rancher v1 is [no longer actively maintaned](https://rancher.com/docs/os/v1.x/en/support/),
and Rancher v2 is supported as a standard Kubernetes provider.
??? example "An example of Traefik v2 Rancher v1 configuration"
```yaml tab="File (YAML)"
providers:
rancher: {}
```
```toml tab="File (TOML)"
[providers.rancher]
```
```bash tab="CLI"
--providers.rancher=true
```
This configuration is now unsupported and would prevent Traefik to start.
#### Remediation
Rancher 2.x requires Kubernetes and does not have a metadata endpoint of its own for Traefik to query.
As such, Rancher 2.x users should utilize the [Kubernetes CRD provider](../providers/kubernetes-crd.md) directly.
Also, all Rancher provider related configuration should be removed from the static configuration.
### Marathon provider
Marathon maintenance [ended on October 31, 2021](https://github.com/mesosphere/marathon/blob/master/README.md).
In v3, the Marathon provider has been removed.
??? example "An example of v2 Marathon provider configuration"
```yaml tab="File (YAML)"
providers:
marathon: {}
```
```toml tab="File (TOML)"
[providers.marathon]
```
```bash tab="CLI"
--providers.marathon=true
```
This configuration is now unsupported and would prevent Traefik to start.
#### Remediation
All Marathon provider related configuration should be removed from the static configuration.
### InfluxDB v1
InfluxDB v1.x maintenance [ended in 2021](https://www.influxdata.com/blog/influxdb-oss-and-enterprise-roadmap-update-from-influxdays-emea/).
In v3, the InfluxDB v1 metrics provider has been removed.
??? example "An example of Traefik v2 InfluxDB v1 metrics configuration"
```yaml tab="File (YAML)"
metrics:
influxDB: {}
```
```toml tab="File (TOML)"
[metrics.influxDB]
```
```bash tab="CLI"
--metrics.influxDB=true
```
This configuration is now unsupported and would prevent Traefik to start.
#### Remediation
All InfluxDB v1 metrics provider related configuration should be removed from the static configuration.
### Pilot
Traefik Pilot is no longer available since October 4th, 2022.
??? example "An example of v2 Pilot configuration"
```yaml tab="File (YAML)"
pilot:
token: foobar
```
```toml tab="File (TOML)"
[pilot]
token=foobar
```
```bash tab="CLI"
--pilot.token=foobar
```
In v2, Pilot configuration was deprecated and ineffective,
it is now unsupported and would prevent Traefik to start.
#### Remediation
All Pilot related configuration should be removed from the static configuration.
## Dynamic configuration
### IPWhiteList
In v3, we renamed the `IPWhiteList` middleware to `IPAllowList` without changing anything to the configuration.
## gRPC Metrics
### Deprecated Options Removal
In v3, the reported status code for gRPC requests is now the value of the `Grpc-Status` header.
## Deprecated Options Removal
- The `pilot` option has been removed from the static configuration.
- The `tracing.datadog.globaltag` option has been removed.
- The `namespace` option of Consul, Consul Catalog and Nomad providers has been removed.
- The `tls.caOptional` option has been removed from the ForwardAuth middleware, as well as from the HTTP, Consul, Etcd, Redis, ZooKeeper, Consul Catalog, and Docker providers.
- `sslRedirect`, `sslTemporaryRedirect`, `sslHost`, `sslForceHost` and `featurePolicy` options of the Headers middleware have been removed.
- The `forceSlash` option of the StripPrefix middleware has been removed.
- The `preferServerCipherSuites` option has been removed.
## Matchers
### Matchers
In v3, the `Headers` and `HeadersRegexp` matchers have been renamed to `Header` and `HeaderRegexp` respectively.
@ -48,61 +355,63 @@ and should be explicitly combined using logical operators to mimic previous beha
`HostHeader` has been removed, use `Host` instead.
## Content-Type Auto-Detection
In v3, the `Content-Type` header is not auto-detected anymore when it is not set by the backend.
One should use the `ContentType` middleware to enable the `Content-Type` header value auto-detection.
## HTTP/3
In v3, HTTP/3 is no longer an experimental feature.
The `experimental.http3` option has been removed from the static configuration.
## TCP ServersTransport
In v3, the support of `TCPServersTransport` has been introduced.
When using the KubernetesCRD provider, it is therefore necessary to update [RBAC](../reference/dynamic-configuration/kubernetes-crd.md#rbac) and [CRD](../reference/dynamic-configuration/kubernetes-crd.md) manifests.
### TCP LoadBalancer `terminationDelay` option
The TCP LoadBalancer `terminationDelay` option has been removed.
This option can now be configured directly on the `TCPServersTransport` level, please take a look at this [documentation](../routing/services/index.md#terminationdelay)
## Rancher v1
In v3, the rancher v1 provider has been removed because Rancher v1 is [no longer actively maintaned](https://rancher.com/docs/os/v1.x/en/support/) and v2 is supported as a standard Kubernetes provider.
Rancher 2.x requires Kubernetes and does not have a metadata endpoint of its own for Traefik to query.
As such, Rancher 2.x users should utilize the [Kubernetes CRD provider](../providers/kubernetes-crd.md) directly.
## Marathon provider
In v3, the Marathon provider has been removed.
## InfluxDB v1
In v3, the InfluxDB v1 metrics provider has been removed because InfluxDB v1.x maintenance [ended in 2021](https://www.influxdata.com/blog/influxdb-oss-and-enterprise-roadmap-update-from-influxdays-emea/).
## Kubernetes CRDs API Group `traefik.containo.us`
### Kubernetes CRDs API Group `traefik.containo.us`
In v3, the Kubernetes CRDs API Group `traefik.containo.us` has been removed.
Please use the API Group `traefik.io` instead.
## Docker & Docker Swarm
In v3, the provider Docker has been split into 2 providers:
- Docker provider (without Swarm support)
- Swarm provider (Swarm support only)
## Kubernetes Ingress API Group `networking.k8s.io/v1beta1`
### Kubernetes Ingress API Group `networking.k8s.io/v1beta1`
In v3, the Kubernetes Ingress API Group `networking.k8s.io/v1beta1` ([removed since Kubernetes v1.22](https://kubernetes.io/docs/reference/using-api/deprecation-guide/#ingress-v122)) support has been removed.
Please use the API Group `networking.k8s.io/v1` instead.
## Traefik CRD API Version `apiextensions.k8s.io/v1beta1`
### Traefik CRD API Version `apiextensions.k8s.io/v1beta1`
In v3, the Traefik CRD API Version `apiextensions.k8s.io/v1beta1` ([removed since Kubernetes v1.22](https://kubernetes.io/docs/reference/using-api/deprecation-guide/#customresourcedefinition-v122)) support has been removed.
Please use the CRD definition with the API Version `apiextensions.k8s.io/v1` instead.
## Operations
### Traefik RBAC Update
In v3, the support of `TCPServersTransport` has been introduced.
When using the KubernetesCRD provider, it is therefore necessary to update [RBAC](../reference/dynamic-configuration/kubernetes-crd.md#rbac) and [CRD](../reference/dynamic-configuration/kubernetes-crd.md) manifests.
### Content-Type Auto-Detection
In v3, the `Content-Type` header is not auto-detected anymore when it is not set by the backend.
One should use the `ContentType` middleware to enable the `Content-Type` header value auto-detection.
### Observability
#### gRPC Metrics
In v3, the reported status code for gRPC requests is now the value of the `Grpc-Status` header.
#### Tracing
In v3, the tracing feature has been revamped and is now powered exclusively by [OpenTelemetry](https://opentelemetry.io/ "Link to website of OTel") (OTel).
!!! warning "Important"
Traefik v3 **no** longer supports direct output formats for specific vendors such as Instana, Jaeger, Zipkin, Haystack, Datadog, and Elastic.
Instead, it focuses on pure OpenTelemetry implementation, providing a unified and standardized approach for observability.
Here are two possible transition strategies:
1. OTLP Ingestion Endpoints:
Most vendors now offer OpenTelemetry Protocol (OTLP) ingestion endpoints.
You can seamlessly integrate Traefik v3 with these endpoints to continue leveraging tracing capabilities.
2. Legacy Stack Compatibility:
For legacy stacks that cannot immediately upgrade to the latest vendor agents supporting OTLP ingestion,
using OpenTelemetry (OTel) collectors with appropriate exporters configuration is a viable solution.
This allows continued compatibility with the existing infrastructure.

View file

@ -526,3 +526,13 @@ kubectl apply -f https://raw.githubusercontent.com/traefik/traefik/v2.10/docs/co
### Traefik Hub
In `v2.10`, Traefik Hub configuration has been removed because Traefik Hub v2 doesn't require this configuration.
## v2.11
### IPWhiteList (HTTP)
In `v2.11`, the `IPWhiteList` middleware is deprecated, please use the [IPAllowList](../middlewares/http/ipallowlist.md) middleware instead.
### IPWhiteList (TCP)
In `v2.11`, the `IPWhiteList` middleware is deprecated, please use the [IPAllowList](../middlewares/tcp/ipallowlist.md) middleware instead.

View file

@ -1,139 +0,0 @@
---
title: "Traefik Datadog Tracing Documentation"
description: "Traefik Proxy supports Datadog for tracing. Read the technical documentation to enable Datadog for observability."
---
# Datadog
To enable the Datadog tracer:
```yaml tab="File (YAML)"
tracing:
datadog: {}
```
```toml tab="File (TOML)"
[tracing]
[tracing.datadog]
```
```bash tab="CLI"
--tracing.datadog=true
```
#### `localAgentHostPort`
_Optional, Default="localhost:8126"_
Local Agent Host Port instructs the reporter to send spans to the Datadog Agent at this address (host:port).
```yaml tab="File (YAML)"
tracing:
datadog:
localAgentHostPort: localhost:8126
```
```toml tab="File (TOML)"
[tracing]
[tracing.datadog]
localAgentHostPort = "localhost:8126"
```
```bash tab="CLI"
--tracing.datadog.localAgentHostPort=localhost:8126
```
#### `localAgentSocket`
_Optional, Default=""_
Local Agent Socket instructs the reporter to send spans to the Datadog Agent at this UNIX socket.
```yaml tab="File (YAML)"
tracing:
datadog:
localAgentSocket: /var/run/datadog/apm.socket
```
```toml tab="File (TOML)"
[tracing]
[tracing.datadog]
localAgentSocket = "/var/run/datadog/apm.socket"
```
```bash tab="CLI"
--tracing.datadog.localAgentSocket=/var/run/datadog/apm.socket
```
#### `debug`
_Optional, Default=false_
Enables Datadog debug.
```yaml tab="File (YAML)"
tracing:
datadog:
debug: true
```
```toml tab="File (TOML)"
[tracing]
[tracing.datadog]
debug = true
```
```bash tab="CLI"
--tracing.datadog.debug=true
```
#### `globalTags`
_Optional, Default=empty_
Applies a list of shared key:value tags on all spans.
```yaml tab="File (YAML)"
tracing:
datadog:
globalTags:
tag1: foo
tag2: bar
```
```toml tab="File (TOML)"
[tracing]
[tracing.datadog]
[tracing.datadog.globalTags]
tag1 = "foo"
tag2 = "bar"
```
```bash tab="CLI"
--tracing.datadog.globalTags.tag1=foo
--tracing.datadog.globalTags.tag2=bar
```
#### `prioritySampling`
_Optional, Default=false_
Enables priority sampling.
When using distributed tracing,
this option must be enabled in order to get all the parts of a distributed trace sampled.
```yaml tab="File (YAML)"
tracing:
datadog:
prioritySampling: true
```
```toml tab="File (TOML)"
[tracing]
[tracing.datadog]
prioritySampling = true
```
```bash tab="CLI"
--tracing.datadog.prioritySampling=true
```

View file

@ -1,93 +0,0 @@
---
title: "Traefik Elastic Documentation"
description: "Traefik supports several tracing backends, including Elastic. Learn how to implement it for observability in Traefik Proxy. Read the technical documentation."
---
# Elastic
To enable the Elastic tracer:
```yaml tab="File (YAML)"
tracing:
elastic: {}
```
```toml tab="File (TOML)"
[tracing]
[tracing.elastic]
```
```bash tab="CLI"
--tracing.elastic=true
```
#### `serverURL`
_Optional, Default="http://localhost:8200"_
URL of the Elastic APM server.
```yaml tab="File (YAML)"
tracing:
elastic:
serverURL: "http://apm:8200"
```
```toml tab="File (TOML)"
[tracing]
[tracing.elastic]
serverURL = "http://apm:8200"
```
```bash tab="CLI"
--tracing.elastic.serverurl="http://apm:8200"
```
#### `secretToken`
_Optional, Default=""_
Token used to connect to Elastic APM Server.
```yaml tab="File (YAML)"
tracing:
elastic:
secretToken: "mytoken"
```
```toml tab="File (TOML)"
[tracing]
[tracing.elastic]
secretToken = "mytoken"
```
```bash tab="CLI"
--tracing.elastic.secrettoken="mytoken"
```
#### `serviceEnvironment`
_Optional, Default=""_
Environment's name where Traefik is deployed in, e.g. `production` or `staging`.
```yaml tab="File (YAML)"
tracing:
elastic:
serviceEnvironment: "production"
```
```toml tab="File (TOML)"
[tracing]
[tracing.elastic]
serviceEnvironment = "production"
```
```bash tab="CLI"
--tracing.elastic.serviceenvironment="production"
```
### Further
Additional configuration of Elastic APM Go agent can be done using environment variables.
See [APM Go agent reference](https://www.elastic.co/guide/en/apm/agent/go/current/configuration.html).

View file

@ -1,176 +0,0 @@
---
title: "Traefik Haystack Documentation"
description: "Traefik supports several tracing backends, including Haystack. Learn how to implement it for observability in Traefik Proxy. Read the technical documentation."
---
# Haystack
To enable the Haystack tracer:
```yaml tab="File (YAML)"
tracing:
haystack: {}
```
```toml tab="File (TOML)"
[tracing]
[tracing.haystack]
```
```bash tab="CLI"
--tracing.haystack=true
```
#### `localAgentHost`
_Required, Default="127.0.0.1"_
Local Agent Host instructs reporter to send spans to the Haystack Agent at this address.
```yaml tab="File (YAML)"
tracing:
haystack:
localAgentHost: 127.0.0.1
```
```toml tab="File (TOML)"
[tracing]
[tracing.haystack]
localAgentHost = "127.0.0.1"
```
```bash tab="CLI"
--tracing.haystack.localAgentHost=127.0.0.1
```
#### `localAgentPort`
_Required, Default=35000_
Local Agent Port instructs reporter to send spans to the Haystack Agent at this port.
```yaml tab="File (YAML)"
tracing:
haystack:
localAgentPort: 35000
```
```toml tab="File (TOML)"
[tracing]
[tracing.haystack]
localAgentPort = 35000
```
```bash tab="CLI"
--tracing.haystack.localAgentPort=35000
```
#### `globalTag`
_Optional, Default=empty_
Applies shared key:value tag on all spans.
```yaml tab="File (YAML)"
tracing:
haystack:
globalTag: sample:test
```
```toml tab="File (TOML)"
[tracing]
[tracing.haystack]
globalTag = "sample:test"
```
```bash tab="CLI"
--tracing.haystack.globalTag=sample:test
```
#### `traceIDHeaderName`
_Optional, Default=empty_
Sets the header name used to store the trace ID.
```yaml tab="File (YAML)"
tracing:
haystack:
traceIDHeaderName: Trace-ID
```
```toml tab="File (TOML)"
[tracing]
[tracing.haystack]
traceIDHeaderName = "Trace-ID"
```
```bash tab="CLI"
--tracing.haystack.traceIDHeaderName=Trace-ID
```
#### `parentIDHeaderName`
_Optional, Default=empty_
Sets the header name used to store the parent ID.
```yaml tab="File (YAML)"
tracing:
haystack:
parentIDHeaderName: Parent-Message-ID
```
```toml tab="File (TOML)"
[tracing]
[tracing.haystack]
parentIDHeaderName = "Parent-Message-ID"
```
```bash tab="CLI"
--tracing.haystack.parentIDHeaderName=Parent-Message-ID
```
#### `spanIDHeaderName`
_Optional, Default=empty_
Sets the header name used to store the span ID.
```yaml tab="File (YAML)"
tracing:
haystack:
spanIDHeaderName: Message-ID
```
```toml tab="File (TOML)"
[tracing]
[tracing.haystack]
spanIDHeaderName = "Message-ID"
```
```bash tab="CLI"
--tracing.haystack.spanIDHeaderName=Message-ID
```
#### `baggagePrefixHeaderName`
_Optional, Default=empty_
Sets the header name prefix used to store baggage items in a map.
```yaml tab="File (YAML)"
tracing:
haystack:
baggagePrefixHeaderName: "sample"
```
```toml tab="File (TOML)"
[tracing]
[tracing.haystack]
baggagePrefixHeaderName = "sample"
```
```bash tab="CLI"
--tracing.haystack.baggagePrefixHeaderName=sample
```

View file

@ -1,117 +0,0 @@
---
title: "Traefik Instana Documentation"
description: "Traefik supports several tracing backends, including Instana. Learn how to implement it for observability in Traefik Proxy. Read the technical documentation."
---
# Instana
To enable the Instana tracer:
```yaml tab="File (YAML)"
tracing:
instana: {}
```
```toml tab="File (TOML)"
[tracing]
[tracing.instana]
```
```bash tab="CLI"
--tracing.instana=true
```
#### `localAgentHost`
_Required, Default="127.0.0.1"_
Local Agent Host instructs reporter to send spans to the Instana Agent at this address.
```yaml tab="File (YAML)"
tracing:
instana:
localAgentHost: 127.0.0.1
```
```toml tab="File (TOML)"
[tracing]
[tracing.instana]
localAgentHost = "127.0.0.1"
```
```bash tab="CLI"
--tracing.instana.localAgentHost=127.0.0.1
```
#### `localAgentPort`
_Required, Default=42699_
Local Agent port instructs reporter to send spans to the Instana Agent listening on this port.
```yaml tab="File (YAML)"
tracing:
instana:
localAgentPort: 42699
```
```toml tab="File (TOML)"
[tracing]
[tracing.instana]
localAgentPort = 42699
```
```bash tab="CLI"
--tracing.instana.localAgentPort=42699
```
#### `logLevel`
_Required, Default="info"_
Sets Instana tracer log level.
Valid values are:
- `error`
- `warn`
- `debug`
- `info`
```yaml tab="File (YAML)"
tracing:
instana:
logLevel: info
```
```toml tab="File (TOML)"
[tracing]
[tracing.instana]
logLevel = "info"
```
```bash tab="CLI"
--tracing.instana.logLevel=info
```
#### `enableAutoProfile`
_Required, Default=false_
Enables [automatic profiling](https://www.ibm.com/docs/en/obi/current?topic=instana-profile-processes) for the Traefik process.
```yaml tab="File (YAML)"
tracing:
instana:
enableAutoProfile: true
```
```toml tab="File (TOML)"
[tracing]
[tracing.instana]
enableAutoProfile = true
```
```bash tab="CLI"
--tracing.instana.enableAutoProfile=true
```

View file

@ -1,294 +0,0 @@
---
title: "Traefik Jaeger Documentation"
description: "Traefik supports several tracing backends, including Jaeger. Learn how to implement it for observability in Traefik Proxy. Read the technical documentation."
---
# Jaeger
To enable the Jaeger tracer:
```yaml tab="File (YAML)"
tracing:
jaeger: {}
```
```toml tab="File (TOML)"
[tracing]
[tracing.jaeger]
```
```bash tab="CLI"
--tracing.jaeger=true
```
!!! warning
Traefik is able to send data over the compact thrift protocol to the [Jaeger agent](https://www.jaegertracing.io/docs/deployment/#agent)
or a [Jaeger collector](https://www.jaegertracing.io/docs/deployment/#collector).
!!! info
All Jaeger configuration can be overridden by [environment variables](https://github.com/jaegertracing/jaeger-client-go#environment-variables)
#### `samplingServerURL`
_Required, Default="http://localhost:5778/sampling"_
Address of the Jaeger Agent HTTP sampling server.
```yaml tab="File (YAML)"
tracing:
jaeger:
samplingServerURL: http://localhost:5778/sampling
```
```toml tab="File (TOML)"
[tracing]
[tracing.jaeger]
samplingServerURL = "http://localhost:5778/sampling"
```
```bash tab="CLI"
--tracing.jaeger.samplingServerURL=http://localhost:5778/sampling
```
#### `samplingType`
_Required, Default="const"_
Type of the sampler.
Valid values are:
- `const`
- `probabilistic`
- `rateLimiting`
```yaml tab="File (YAML)"
tracing:
jaeger:
samplingType: const
```
```toml tab="File (TOML)"
[tracing]
[tracing.jaeger]
samplingType = "const"
```
```bash tab="CLI"
--tracing.jaeger.samplingType=const
```
#### `samplingParam`
_Required, Default=1.0_
Value passed to the sampler.
Valid values are:
- for `const` sampler, 0 or 1 for always false/true respectively
- for `probabilistic` sampler, a probability between 0 and 1
- for `rateLimiting` sampler, the number of spans per second
```yaml tab="File (YAML)"
tracing:
jaeger:
samplingParam: 1.0
```
```toml tab="File (TOML)"
[tracing]
[tracing.jaeger]
samplingParam = 1.0
```
```bash tab="CLI"
--tracing.jaeger.samplingParam=1.0
```
#### `localAgentHostPort`
_Required, Default="127.0.0.1:6831"_
Local Agent Host Port instructs the reporter to send spans to the Jaeger Agent at this address (host:port).
```yaml tab="File (YAML)"
tracing:
jaeger:
localAgentHostPort: 127.0.0.1:6831
```
```toml tab="File (TOML)"
[tracing]
[tracing.jaeger]
localAgentHostPort = "127.0.0.1:6831"
```
```bash tab="CLI"
--tracing.jaeger.localAgentHostPort=127.0.0.1:6831
```
#### `gen128Bit`
_Optional, Default=false_
Generates 128 bits trace IDs, compatible with OpenCensus.
```yaml tab="File (YAML)"
tracing:
jaeger:
gen128Bit: true
```
```toml tab="File (TOML)"
[tracing]
[tracing.jaeger]
gen128Bit = true
```
```bash tab="CLI"
--tracing.jaeger.gen128Bit
```
#### `propagation`
_Required, Default="jaeger"_
Sets the propagation header type.
Valid values are:
- `jaeger`, jaeger's default trace header.
- `b3`, compatible with OpenZipkin
```yaml tab="File (YAML)"
tracing:
jaeger:
propagation: jaeger
```
```toml tab="File (TOML)"
[tracing]
[tracing.jaeger]
propagation = "jaeger"
```
```bash tab="CLI"
--tracing.jaeger.propagation=jaeger
```
#### `traceContextHeaderName`
_Required, Default="uber-trace-id"_
HTTP header name used to propagate tracing context.
This must be in lower-case to avoid mismatches when decoding incoming headers.
```yaml tab="File (YAML)"
tracing:
jaeger:
traceContextHeaderName: uber-trace-id
```
```toml tab="File (TOML)"
[tracing]
[tracing.jaeger]
traceContextHeaderName = "uber-trace-id"
```
```bash tab="CLI"
--tracing.jaeger.traceContextHeaderName=uber-trace-id
```
### disableAttemptReconnecting
_Optional, Default=true_
Disables the UDP connection helper that periodically re-resolves the agent's hostname and reconnects if there was a change.
Enabling the re-resolving of UDP address make the client more robust in Kubernetes deployments.
```yaml tab="File (YAML)"
tracing:
jaeger:
disableAttemptReconnecting: false
```
```toml tab="File (TOML)"
[tracing]
[tracing.jaeger]
disableAttemptReconnecting = false
```
```bash tab="CLI"
--tracing.jaeger.disableAttemptReconnecting=false
```
### `collector`
#### `endpoint`
_Optional, Default=""_
Collector Endpoint instructs the reporter to send spans to the Jaeger Collector at this URL.
```yaml tab="File (YAML)"
tracing:
jaeger:
collector:
endpoint: http://127.0.0.1:14268/api/traces?format=jaeger.thrift
```
```toml tab="File (TOML)"
[tracing]
[tracing.jaeger.collector]
endpoint = "http://127.0.0.1:14268/api/traces?format=jaeger.thrift"
```
```bash tab="CLI"
--tracing.jaeger.collector.endpoint=http://127.0.0.1:14268/api/traces?format=jaeger.thrift
```
#### `user`
_Optional, Default=""_
User instructs the reporter to include a user for basic HTTP authentication when sending spans to the Jaeger Collector.
```yaml tab="File (YAML)"
tracing:
jaeger:
collector:
user: my-user
```
```toml tab="File (TOML)"
[tracing]
[tracing.jaeger.collector]
user = "my-user"
```
```bash tab="CLI"
--tracing.jaeger.collector.user=my-user
```
#### `password`
_Optional, Default=""_
Password instructs the reporter to include a password for basic HTTP authentication when sending spans to the Jaeger Collector.
```yaml tab="File (YAML)"
tracing:
jaeger:
collector:
password: my-password
```
```toml tab="File (TOML)"
[tracing]
[tracing.jaeger.collector]
password = "my-password"
```
```bash tab="CLI"
--tracing.jaeger.collector.password=my-password
```

View file

@ -9,122 +9,75 @@ To enable the OpenTelemetry tracer:
```yaml tab="File (YAML)"
tracing:
openTelemetry: {}
otlp: {}
```
```toml tab="File (TOML)"
[tracing]
[tracing.openTelemetry]
[tracing.otlp]
```
```bash tab="CLI"
--tracing.openTelemetry=true
--tracing.otlp=true
```
!!! info "The OpenTelemetry trace reporter will export traces to the collector using HTTP by default, see the [gRPC Section](#grpc-configuration) to use gRPC."
!!! info "The OpenTelemetry trace reporter will export traces to the collector using HTTP by default (http://localhost:4318/v1/traces),
see the [gRPC Section](#grpc-configuration) to use gRPC."
!!! info "Trace sampling"
By default, the OpenTelemetry trace reporter will sample 100% of traces.
See [OpenTelemetry's SDK configuration](https://opentelemetry.io/docs/reference/specification/sdk-environment-variables/#general-sdk-configuration) to customize the sampling strategy.
#### `address`
### HTTP configuration
_Required, Default="localhost:4318", Format="`<host>:<port>`"_
_Optional_
Address of the OpenTelemetry Collector to send spans to.
This instructs the reporter to send spans to the OpenTelemetry Collector using HTTP.
```yaml tab="File (YAML)"
tracing:
openTelemetry:
address: localhost:4318
otlp:
http: {}
```
```toml tab="File (TOML)"
[tracing]
[tracing.openTelemetry]
address = "localhost:4318"
[tracing.otlp.http]
```
```bash tab="CLI"
--tracing.openTelemetry.address=localhost:4318
--tracing.otlp.http=true
```
#### `headers`
#### `endpoint`
_Optional, Default={}_
_Required, Default="http://localhost:4318/v1/traces", Format="`<scheme>://<host>:<port><path>`"_
Additional headers sent with spans by the reporter to the OpenTelemetry Collector.
URL of the OpenTelemetry Collector to send spans to.
```yaml tab="File (YAML)"
tracing:
openTelemetry:
headers:
foo: bar
baz: buz
otlp:
http:
endpoint: http://localhost:4318/v1/traces
```
```toml tab="File (TOML)"
[tracing]
[tracing.openTelemetry.headers]
foo = "bar"
baz = "buz"
[tracing.otlp.http]
endpoint = "http://localhost:4318/v1/traces"
```
```bash tab="CLI"
--tracing.openTelemetry.headers.foo=bar --tracing.openTelemetry.headers.baz=buz
```
#### `insecure`
_Optional, Default=false_
Allows reporter to send spans to the OpenTelemetry Collector without using a secured protocol.
```yaml tab="File (YAML)"
tracing:
openTelemetry:
insecure: true
```
```toml tab="File (TOML)"
[tracing]
[tracing.openTelemetry]
insecure = true
```
```bash tab="CLI"
--tracing.openTelemetry.insecure=true
```
#### `path`
_Required, Default="/v1/traces"_
Allows to override the default URL path used for sending traces.
This option has no effect when using gRPC transport.
```yaml tab="File (YAML)"
tracing:
openTelemetry:
path: /foo/v1/traces
```
```toml tab="File (TOML)"
[tracing]
[tracing.openTelemetry]
path = "/foo/v1/traces"
```
```bash tab="CLI"
--tracing.openTelemetry.path=/foo/v1/traces
--tracing.otlp.http.endpoint=http://localhost:4318/v1/traces
```
#### `tls`
_Optional_
Defines the TLS configuration used by the reporter to send spans to the OpenTelemetry Collector.
Defines the Client TLS configuration used by the reporter to send spans to the OpenTelemetry Collector.
##### `ca`
@ -135,18 +88,19 @@ it defaults to the system bundle.
```yaml tab="File (YAML)"
tracing:
openTelemetry:
otlp:
http:
tls:
ca: path/to/ca.crt
```
```toml tab="File (TOML)"
[tracing.openTelemetry.tls]
[tracing.otlp.http.tls]
ca = "path/to/ca.crt"
```
```bash tab="CLI"
--tracing.openTelemetry.tls.ca=path/to/ca.crt
--tracing.otlp.http.tls.ca=path/to/ca.crt
```
##### `cert`
@ -158,21 +112,22 @@ When using this option, setting the `key` option is required.
```yaml tab="File (YAML)"
tracing:
openTelemetry:
otlp:
http:
tls:
cert: path/to/foo.cert
key: path/to/foo.key
```
```toml tab="File (TOML)"
[tracing.openTelemetry.tls]
[tracing.otlp.http.tls]
cert = "path/to/foo.cert"
key = "path/to/foo.key"
```
```bash tab="CLI"
--tracing.openTelemetry.tls.cert=path/to/foo.cert
--tracing.openTelemetry.tls.key=path/to/foo.key
--tracing.otlp.http.tls.cert=path/to/foo.cert
--tracing.otlp.http.tls.key=path/to/foo.key
```
##### `key`
@ -184,21 +139,22 @@ When using this option, setting the `cert` option is required.
```yaml tab="File (YAML)"
tracing:
openTelemetry:
otlp:
http:
tls:
cert: path/to/foo.cert
key: path/to/foo.key
```
```toml tab="File (TOML)"
[tracing.openTelemetry.tls]
[tracing.otlp.http.tls]
cert = "path/to/foo.cert"
key = "path/to/foo.key"
```
```bash tab="CLI"
--tracing.openTelemetry.tls.cert=path/to/foo.cert
--tracing.openTelemetry.tls.key=path/to/foo.key
--tracing.otlp.http.tls.cert=path/to/foo.cert
--tracing.otlp.http.tls.key=path/to/foo.key
```
##### `insecureSkipVerify`
@ -210,18 +166,19 @@ the TLS connection to the OpenTelemetry Collector accepts any certificate presen
```yaml tab="File (YAML)"
tracing:
openTelemetry:
otlp:
http:
tls:
insecureSkipVerify: true
```
```toml tab="File (TOML)"
[tracing.openTelemetry.tls]
[tracing.otlp.http.tls]
insecureSkipVerify = true
```
```bash tab="CLI"
--tracing.openTelemetry.tls.insecureSkipVerify=true
--tracing.otlp.http.tls.insecureSkipVerify=true
```
#### gRPC configuration
@ -232,15 +189,168 @@ This instructs the reporter to send spans to the OpenTelemetry Collector using g
```yaml tab="File (YAML)"
tracing:
openTelemetry:
otlp:
grpc: {}
```
```toml tab="File (TOML)"
[tracing]
[tracing.openTelemetry.grpc]
[tracing.otlp.grpc]
```
```bash tab="CLI"
--tracing.openTelemetry.grpc=true
--tracing.otlp.grpc=true
```
#### `endpoint`
_Required, Default="localhost:4317", Format="`<host>:<port>`"_
Address of the OpenTelemetry Collector to send spans to.
```yaml tab="File (YAML)"
tracing:
otlp:
grpc:
endpoint: localhost:4317
```
```toml tab="File (TOML)"
[tracing]
[tracing.otlp.grpc]
endpoint = "localhost:4317"
```
```bash tab="CLI"
--tracing.otlp.grpc.endpoint=localhost:4317
```
#### `insecure`
_Optional, Default=false_
Allows reporter to send spans to the OpenTelemetry Collector without using a secured protocol.
```yaml tab="File (YAML)"
tracing:
otlp:
grpc:
insecure: true
```
```toml tab="File (TOML)"
[tracing]
[tracing.otlp.grpc]
insecure = true
```
```bash tab="CLI"
--tracing.otlp.grpc.insecure=true
```
#### `tls`
_Optional_
Defines the Client TLS configuration used by the reporter to send spans to the OpenTelemetry Collector.
##### `ca`
_Optional_
`ca` is the path to the certificate authority used for the secure connection to the OpenTelemetry Collector,
it defaults to the system bundle.
```yaml tab="File (YAML)"
tracing:
otlp:
grpc:
tls:
ca: path/to/ca.crt
```
```toml tab="File (TOML)"
[tracing.otlp.grpc.tls]
ca = "path/to/ca.crt"
```
```bash tab="CLI"
--tracing.otlp.grpc.tls.ca=path/to/ca.crt
```
##### `cert`
_Optional_
`cert` is the path to the public certificate used for the secure connection to the OpenTelemetry Collector.
When using this option, setting the `key` option is required.
```yaml tab="File (YAML)"
tracing:
otlp:
grpc:
tls:
cert: path/to/foo.cert
key: path/to/foo.key
```
```toml tab="File (TOML)"
[tracing.otlp.grpc.tls]
cert = "path/to/foo.cert"
key = "path/to/foo.key"
```
```bash tab="CLI"
--tracing.otlp.grpc.tls.cert=path/to/foo.cert
--tracing.otlp.grpc.tls.key=path/to/foo.key
```
##### `key`
_Optional_
`key` is the path to the private key used for the secure connection to the OpenTelemetry Collector.
When using this option, setting the `cert` option is required.
```yaml tab="File (YAML)"
tracing:
otlp:
grpc:
tls:
cert: path/to/foo.cert
key: path/to/foo.key
```
```toml tab="File (TOML)"
[tracing.otlp.grpc.tls]
cert = "path/to/foo.cert"
key = "path/to/foo.key"
```
```bash tab="CLI"
--tracing.otlp.grpc.tls.cert=path/to/foo.cert
--tracing.otlp.grpc.tls.key=path/to/foo.key
```
##### `insecureSkipVerify`
_Optional, Default=false_
If `insecureSkipVerify` is `true`,
the TLS connection to the OpenTelemetry Collector accepts any certificate presented by the server regardless of the hostnames it covers.
```yaml tab="File (YAML)"
tracing:
otlp:
grpc:
tls:
insecureSkipVerify: true
```
```toml tab="File (TOML)"
[tracing.otlp.grpc.tls]
insecureSkipVerify = true
```
```bash tab="CLI"
--tracing.otlp.grpc.tls.insecureSkipVerify=true
```

View file

@ -10,21 +10,13 @@ Visualize the Requests Flow
The tracing system allows developers to visualize call flows in their infrastructure.
Traefik uses OpenTracing, an open standard designed for distributed tracing.
Traefik uses [OpenTelemetry](https://opentelemetry.io/ "Link to website of OTel"), an open standard designed for distributed tracing.
Traefik supports seven tracing backends:
Please check our dedicated [OTel docs](./opentelemetry.md) to learn more.
- [Jaeger](./jaeger.md)
- [Zipkin](./zipkin.md)
- [Datadog](./datadog.md)
- [Instana](./instana.md)
- [Haystack](./haystack.md)
- [Elastic](./elastic.md)
- [OpenTelemetry](./opentelemetry.md)
## Configuration
By default, Traefik uses Jaeger as tracing backend.
To enable the tracing:
@ -62,25 +54,71 @@ tracing:
--tracing.serviceName=traefik
```
#### `spanNameLimit`
#### `sampleRate`
_Required, Default=0_
_Optional, Default=1.0_
Span name limit allows for name truncation in case of very long names.
This can prevent certain tracing providers to drop traces that exceed their length limits.
`0` means no truncation will occur.
The proportion of requests to trace, specified between 0.0 and 1.0.
```yaml tab="File (YAML)"
tracing:
spanNameLimit: 150
sampleRate: 0.2
```
```toml tab="File (TOML)"
[tracing]
spanNameLimit = 150
sampleRate = 0.2
```
```bash tab="CLI"
--tracing.spanNameLimit=150
--tracing.sampleRate=0.2
```
#### `headers`
_Optional, Default={}_
Defines additional headers to be sent with the span's payload.
```yaml tab="File (YAML)"
tracing:
headers:
foo: bar
baz: buz
```
```toml tab="File (TOML)"
[tracing]
[tracing.headers]
foo = "bar"
baz = "buz"
```
```bash tab="CLI"
--tracing.headers.foo=bar --tracing.headers.baz=buz
```
#### `globalAttributes`
_Optional, Default=empty_
Applies a list of shared key:value attributes on all spans.
```yaml tab="File (YAML)"
tracing:
globalAttributes:
attr1: foo
attr2: bar
```
```toml tab="File (TOML)"
[tracing]
[tracing.globalAttributes]
attr1 = "foo"
attr2 = "bar"
```
```bash tab="CLI"
--tracing.globalAttributes.attr1=foo
--tracing.globalAttributes.attr2=bar
```

View file

@ -1,110 +0,0 @@
---
title: "Traefik Zipkin Documentation"
description: "Traefik supports several tracing backends, including Zipkin. Learn how to implement it for observability in Traefik Proxy. Read the technical documentation."
---
# Zipkin
To enable the Zipkin tracer:
```yaml tab="File (YAML)"
tracing:
zipkin: {}
```
```toml tab="File (TOML)"
[tracing]
[tracing.zipkin]
```
```bash tab="CLI"
--tracing.zipkin=true
```
#### `httpEndpoint`
_Required, Default="http://localhost:9411/api/v2/spans"_
HTTP endpoint used to send data.
```yaml tab="File (YAML)"
tracing:
zipkin:
httpEndpoint: http://localhost:9411/api/v2/spans
```
```toml tab="File (TOML)"
[tracing]
[tracing.zipkin]
httpEndpoint = "http://localhost:9411/api/v2/spans"
```
```bash tab="CLI"
--tracing.zipkin.httpEndpoint=http://localhost:9411/api/v2/spans
```
#### `sameSpan`
_Optional, Default=false_
Uses SameSpan RPC style traces.
```yaml tab="File (YAML)"
tracing:
zipkin:
sameSpan: true
```
```toml tab="File (TOML)"
[tracing]
[tracing.zipkin]
sameSpan = true
```
```bash tab="CLI"
--tracing.zipkin.sameSpan=true
```
#### `id128Bit`
_Optional, Default=true_
Uses 128 bits trace IDs.
```yaml tab="File (YAML)"
tracing:
zipkin:
id128Bit: false
```
```toml tab="File (TOML)"
[tracing]
[tracing.zipkin]
id128Bit = false
```
```bash tab="CLI"
--tracing.zipkin.id128Bit=false
```
#### `sampleRate`
_Required, Default=1.0_
The proportion of requests to trace, specified between 0.0 and 1.0.
```yaml tab="File (YAML)"
tracing:
zipkin:
sampleRate: 0.2
```
```toml tab="File (TOML)"
[tracing]
[tracing.zipkin]
sampleRate = 0.2
```
```bash tab="CLI"
--tracing.zipkin.sampleRate=0.2
```

View file

@ -75,7 +75,7 @@ to allow defining:
[forwardAuth](../middlewares/http/forwardauth.md)) or [allowlisting](../middlewares/http/ipallowlist.md).
- A [router rule](#dashboard-router-rule) for accessing the dashboard,
through Traefik itself (sometimes referred as "Traefik-ception").
through Traefik itself (sometimes referred to as "Traefik-ception").
### Dashboard Router Rule
@ -83,7 +83,7 @@ As underlined in the [documentation for the `api.dashboard` option](./api.md#das
the [router rule](../routing/routers/index.md#rule) defined for Traefik must match
the path prefixes `/api` and `/dashboard`.
We recommend to use a "Host Based rule" as ```Host(`traefik.example.com`)``` to match everything on the host domain,
We recommend using a "Host Based rule" as ```Host(`traefik.example.com`)``` to match everything on the host domain,
or to make sure that the defined rule captures both prefixes:
```bash tab="Host Rule"

View file

@ -33,7 +33,7 @@ whose default value is `traefik` (port `8080`).
| Path | Method | Description |
|---------|---------------|-----------------------------------------------------------------------------------------------------|
| `/ping` | `GET`, `HEAD` | A simple endpoint to check for Traefik process liveness. Return a code `200` with the content: `OK` |
| `/ping` | `GET`, `HEAD` | An endpoint to check for Traefik process liveness. Return a code `200` with the content: `OK` |
!!! note
The `cli` comes with a [`healthcheck`](./cli.md#healthcheck) command which can be used for calling this endpoint.
@ -92,10 +92,11 @@ ping:
_Optional, Default=503_
During the period in which Traefik is gracefully shutting down, the ping handler
returns a 503 status code by default. If Traefik is behind e.g. a load-balancer
returns a `503` status code by default.
If Traefik is behind, for example a load-balancer
doing health checks (such as the Kubernetes LivenessProbe), another code might
be expected as the signal for graceful termination. In which case, the
terminatingStatusCode can be used to set the code returned by the ping
be expected as the signal for graceful termination.
In that case, the terminatingStatusCode can be used to set the code returned by the ping
handler during termination.
```yaml tab="File (YAML)"

View file

@ -163,7 +163,7 @@ See the [Docker API Access](#docker-api-access) section for more information.
services:
traefik:
image: traefik:v3.0 # The official v2 Traefik docker image
image: traefik:v3.0 # The official v3 Traefik docker image
ports:
- "80:80"
volumes:

View file

@ -80,17 +80,13 @@ This provider is proposed as an experimental feature and partially supports the
The Kubernetes Gateway API project provides several guides on how to use the APIs.
These guides can help you to go further than the example above.
The [getting started guide](https://gateway-api.sigs.k8s.io/v1alpha2/guides/) details how to install the CRDs from their repository.
!!! note ""
Keep in mind that the Traefik Gateway provider only supports the `v0.4.0` (v1alpha2).
The [getting started guide](https://gateway-api.sigs.k8s.io/guides/) details how to install the CRDs from their repository.
For now, the Traefik Gateway Provider can be used while following the below guides:
* [Simple Gateway](https://gateway-api.sigs.k8s.io/v1alpha2/guides/simple-gateway/)
* [HTTP routing](https://gateway-api.sigs.k8s.io/v1alpha2/guides/http-routing/)
* [TLS](https://gateway-api.sigs.k8s.io/v1alpha2/guides/tls/)
* [Simple Gateway](https://gateway-api.sigs.k8s.io/guides/simple-gateway/)
* [HTTP routing](https://gateway-api.sigs.k8s.io/guides/http-routing/)
* [TLS](https://gateway-api.sigs.k8s.io/guides/tls/)
## Resource Configuration

View file

@ -229,3 +229,166 @@ providers:
```bash tab="CLI"
--providers.redis.tls.insecureSkipVerify=true
```
### `sentinel`
_Optional_
Defines the Sentinel configuration used to interact with Redis Sentinel.
#### `masterName`
_Required_
`masterName` is the name of the Sentinel master.
```yaml tab="File (YAML)"
providers:
redis:
sentinel:
masterName: my-master
```
```toml tab="File (TOML)"
[providers.redis.sentinel]
masterName = "my-master"
```
```bash tab="CLI"
--providers.redis.sentinel.masterName=my-master
```
#### `username`
_Optional_
`username` is the username for Sentinel authentication.
```yaml tab="File (YAML)"
providers:
redis:
sentinel:
username: user
```
```toml tab="File (TOML)"
[providers.redis.sentinel]
username = "user"
```
```bash tab="CLI"
--providers.redis.sentinel.username=user
```
#### `password`
_Optional_
`password` is the password for Sentinel authentication.
```yaml tab="File (YAML)"
providers:
redis:
sentinel:
password: password
```
```toml tab="File (TOML)"
[providers.redis.sentinel]
password = "password"
```
```bash tab="CLI"
--providers.redis.sentinel.password=password
```
#### `latencyStrategy`
_Optional, Default=false_
`latencyStrategy` defines whether to route commands to the closest master or replica nodes
(mutually exclusive with RandomStrategy and ReplicaStrategy).
```yaml tab="File (YAML)"
providers:
redis:
sentinel:
latencyStrategy: true
```
```toml tab="File (TOML)"
[providers.redis.sentinel]
latencyStrategy = true
```
```bash tab="CLI"
--providers.redis.sentinel.latencyStrategy=true
```
#### `randomStrategy`
_Optional, Default=false_
`randomStrategy` defines whether to route commands randomly to master or replica nodes
(mutually exclusive with LatencyStrategy and ReplicaStrategy).
```yaml tab="File (YAML)"
providers:
redis:
sentinel:
randomStrategy: true
```
```toml tab="File (TOML)"
[providers.redis.sentinel]
randomStrategy = true
```
```bash tab="CLI"
--providers.redis.sentinel.randomStrategy=true
```
#### `replicaStrategy`
_Optional, Default=false_
`replicaStrategy` Defines whether to route all commands to replica nodes
(mutually exclusive with LatencyStrategy and RandomStrategy).
```yaml tab="File (YAML)"
providers:
redis:
sentinel:
replicaStrategy: true
```
```toml tab="File (TOML)"
[providers.redis.sentinel]
replicaStrategy = true
```
```bash tab="CLI"
--providers.redis.sentinel.replicaStrategy=true
```
#### `useDisconnectedReplicas`
_Optional, Default=false_
`useDisconnectedReplicas` defines whether to use replicas disconnected with master when cannot get connected replicas.
```yaml tab="File (YAML)"
providers:
redis:
sentinel:
useDisconnectedReplicas: true
```
```toml tab="File (TOML)"
[providers.redis.sentinel]
useDisconnectedReplicas = true
```
```bash tab="CLI"
--providers.redis.sentinel.useDisconnectedReplicas=true
```

View file

@ -65,56 +65,60 @@
- "traefik.http.middlewares.middleware10.headers.stsincludesubdomains=true"
- "traefik.http.middlewares.middleware10.headers.stspreload=true"
- "traefik.http.middlewares.middleware10.headers.stsseconds=42"
- "traefik.http.middlewares.middleware11.ipallowlist.ipstrategy.depth=42"
- "traefik.http.middlewares.middleware11.ipallowlist.ipstrategy.excludedips=foobar, foobar"
- "traefik.http.middlewares.middleware11.ipallowlist.sourcerange=foobar, foobar"
- "traefik.http.middlewares.middleware12.inflightreq.amount=42"
- "traefik.http.middlewares.middleware12.inflightreq.sourcecriterion.ipstrategy.depth=42"
- "traefik.http.middlewares.middleware12.inflightreq.sourcecriterion.ipstrategy.excludedips=foobar, foobar"
- "traefik.http.middlewares.middleware12.inflightreq.sourcecriterion.requestheadername=foobar"
- "traefik.http.middlewares.middleware12.inflightreq.sourcecriterion.requesthost=true"
- "traefik.http.middlewares.middleware13.passtlsclientcert.info.issuer.commonname=true"
- "traefik.http.middlewares.middleware13.passtlsclientcert.info.issuer.country=true"
- "traefik.http.middlewares.middleware13.passtlsclientcert.info.issuer.domaincomponent=true"
- "traefik.http.middlewares.middleware13.passtlsclientcert.info.issuer.locality=true"
- "traefik.http.middlewares.middleware13.passtlsclientcert.info.issuer.organization=true"
- "traefik.http.middlewares.middleware13.passtlsclientcert.info.issuer.province=true"
- "traefik.http.middlewares.middleware13.passtlsclientcert.info.issuer.serialnumber=true"
- "traefik.http.middlewares.middleware13.passtlsclientcert.info.notafter=true"
- "traefik.http.middlewares.middleware13.passtlsclientcert.info.notbefore=true"
- "traefik.http.middlewares.middleware13.passtlsclientcert.info.sans=true"
- "traefik.http.middlewares.middleware13.passtlsclientcert.info.serialnumber=true"
- "traefik.http.middlewares.middleware13.passtlsclientcert.info.subject.commonname=true"
- "traefik.http.middlewares.middleware13.passtlsclientcert.info.subject.country=true"
- "traefik.http.middlewares.middleware13.passtlsclientcert.info.subject.domaincomponent=true"
- "traefik.http.middlewares.middleware13.passtlsclientcert.info.subject.locality=true"
- "traefik.http.middlewares.middleware13.passtlsclientcert.info.subject.organization=true"
- "traefik.http.middlewares.middleware13.passtlsclientcert.info.subject.organizationalunit=true"
- "traefik.http.middlewares.middleware13.passtlsclientcert.info.subject.province=true"
- "traefik.http.middlewares.middleware13.passtlsclientcert.info.subject.serialnumber=true"
- "traefik.http.middlewares.middleware13.passtlsclientcert.pem=true"
- "traefik.http.middlewares.middleware14.plugin.foobar.foo=bar"
- "traefik.http.middlewares.middleware15.ratelimit.average=42"
- "traefik.http.middlewares.middleware15.ratelimit.burst=42"
- "traefik.http.middlewares.middleware15.ratelimit.period=42"
- "traefik.http.middlewares.middleware15.ratelimit.sourcecriterion.ipstrategy.depth=42"
- "traefik.http.middlewares.middleware15.ratelimit.sourcecriterion.ipstrategy.excludedips=foobar, foobar"
- "traefik.http.middlewares.middleware15.ratelimit.sourcecriterion.requestheadername=foobar"
- "traefik.http.middlewares.middleware15.ratelimit.sourcecriterion.requesthost=true"
- "traefik.http.middlewares.middleware16.redirectregex.permanent=true"
- "traefik.http.middlewares.middleware16.redirectregex.regex=foobar"
- "traefik.http.middlewares.middleware16.redirectregex.replacement=foobar"
- "traefik.http.middlewares.middleware17.redirectscheme.permanent=true"
- "traefik.http.middlewares.middleware17.redirectscheme.port=foobar"
- "traefik.http.middlewares.middleware17.redirectscheme.scheme=foobar"
- "traefik.http.middlewares.middleware18.replacepath.path=foobar"
- "traefik.http.middlewares.middleware19.replacepathregex.regex=foobar"
- "traefik.http.middlewares.middleware19.replacepathregex.replacement=foobar"
- "traefik.http.middlewares.middleware20.retry.attempts=42"
- "traefik.http.middlewares.middleware20.retry.initialinterval=42"
- "traefik.http.middlewares.middleware21.stripprefix.prefixes=foobar, foobar"
- "traefik.http.middlewares.middleware22.stripprefixregex.regex=foobar, foobar"
- "traefik.http.middlewares.middleware23.grpcweb.alloworigins=foobar, foobar"
- "traefik.http.middlewares.middleware11.ipwhitelist.ipstrategy.depth=42"
- "traefik.http.middlewares.middleware11.ipwhitelist.ipstrategy.excludedips=foobar, foobar"
- "traefik.http.middlewares.middleware12.ipwhitelist.sourcerange=foobar, foobar"
- "traefik.http.middlewares.middleware12.ipallowlist.ipstrategy.depth=42"
- "traefik.http.middlewares.middleware12.ipallowlist.ipstrategy.excludedips=foobar, foobar"
- "traefik.http.middlewares.middleware12.ipallowlist.sourcerange=foobar, foobar"
- "traefik.http.middlewares.middleware12.ipallowlist.rejectstatuscode=404"
- "traefik.http.middlewares.middleware13.inflightreq.amount=42"
- "traefik.http.middlewares.middleware13.inflightreq.sourcecriterion.ipstrategy.depth=42"
- "traefik.http.middlewares.middleware13.inflightreq.sourcecriterion.ipstrategy.excludedips=foobar, foobar"
- "traefik.http.middlewares.middleware13.inflightreq.sourcecriterion.requestheadername=foobar"
- "traefik.http.middlewares.middleware13.inflightreq.sourcecriterion.requesthost=true"
- "traefik.http.middlewares.middleware14.passtlsclientcert.info.issuer.commonname=true"
- "traefik.http.middlewares.middleware14.passtlsclientcert.info.issuer.country=true"
- "traefik.http.middlewares.middleware14.passtlsclientcert.info.issuer.domaincomponent=true"
- "traefik.http.middlewares.middleware14.passtlsclientcert.info.issuer.locality=true"
- "traefik.http.middlewares.middleware14.passtlsclientcert.info.issuer.organization=true"
- "traefik.http.middlewares.middleware14.passtlsclientcert.info.issuer.province=true"
- "traefik.http.middlewares.middleware14.passtlsclientcert.info.issuer.serialnumber=true"
- "traefik.http.middlewares.middleware14.passtlsclientcert.info.notafter=true"
- "traefik.http.middlewares.middleware14.passtlsclientcert.info.notbefore=true"
- "traefik.http.middlewares.middleware14.passtlsclientcert.info.sans=true"
- "traefik.http.middlewares.middleware14.passtlsclientcert.info.serialnumber=true"
- "traefik.http.middlewares.middleware14.passtlsclientcert.info.subject.commonname=true"
- "traefik.http.middlewares.middleware14.passtlsclientcert.info.subject.country=true"
- "traefik.http.middlewares.middleware14.passtlsclientcert.info.subject.domaincomponent=true"
- "traefik.http.middlewares.middleware14.passtlsclientcert.info.subject.locality=true"
- "traefik.http.middlewares.middleware14.passtlsclientcert.info.subject.organization=true"
- "traefik.http.middlewares.middleware14.passtlsclientcert.info.subject.organizationalunit=true"
- "traefik.http.middlewares.middleware14.passtlsclientcert.info.subject.province=true"
- "traefik.http.middlewares.middleware14.passtlsclientcert.info.subject.serialnumber=true"
- "traefik.http.middlewares.middleware14.passtlsclientcert.pem=true"
- "traefik.http.middlewares.middleware15.plugin.foobar.foo=bar"
- "traefik.http.middlewares.middleware16.ratelimit.average=42"
- "traefik.http.middlewares.middleware16.ratelimit.burst=42"
- "traefik.http.middlewares.middleware16.ratelimit.period=42"
- "traefik.http.middlewares.middleware16.ratelimit.sourcecriterion.ipstrategy.depth=42"
- "traefik.http.middlewares.middleware16.ratelimit.sourcecriterion.ipstrategy.excludedips=foobar, foobar"
- "traefik.http.middlewares.middleware16.ratelimit.sourcecriterion.requestheadername=foobar"
- "traefik.http.middlewares.middleware16.ratelimit.sourcecriterion.requesthost=true"
- "traefik.http.middlewares.middleware17.redirectregex.permanent=true"
- "traefik.http.middlewares.middleware17.redirectregex.regex=foobar"
- "traefik.http.middlewares.middleware17.redirectregex.replacement=foobar"
- "traefik.http.middlewares.middleware18.redirectscheme.permanent=true"
- "traefik.http.middlewares.middleware18.redirectscheme.port=foobar"
- "traefik.http.middlewares.middleware18.redirectscheme.scheme=foobar"
- "traefik.http.middlewares.middleware19.replacepath.path=foobar"
- "traefik.http.middlewares.middleware20.replacepathregex.regex=foobar"
- "traefik.http.middlewares.middleware20.replacepathregex.replacement=foobar"
- "traefik.http.middlewares.middleware21.retry.attempts=42"
- "traefik.http.middlewares.middleware21.retry.initialinterval=42"
- "traefik.http.middlewares.middleware22.stripprefix.prefixes=foobar, foobar"
- "traefik.http.middlewares.middleware23.stripprefixregex.regex=foobar, foobar"
- "traefik.http.middlewares.middleware24.grpcweb.alloworigins=foobar, foobar"
- "traefik.http.routers.router0.entrypoints=foobar, foobar"
- "traefik.http.routers.router0.middlewares=foobar, foobar"
- "traefik.http.routers.router0.priority=42"
@ -161,8 +165,9 @@
- "traefik.http.services.service01.loadbalancer.sticky.cookie.secure=true"
- "traefik.http.services.service01.loadbalancer.server.port=foobar"
- "traefik.http.services.service01.loadbalancer.server.scheme=foobar"
- "traefik.tcp.middlewares.tcpmiddleware00.ipallowlist.sourcerange=foobar, foobar"
- "traefik.tcp.middlewares.tcpmiddleware01.inflightconn.amount=42"
- "traefik.tcp.middlewares.tcpmiddleware00.inflightconn.amount=42"
- "traefik.tcp.middlewares.tcpmiddleware01.ipwhitelist.sourcerange=foobar, foobar"
- "traefik.tcp.middlewares.tcpmiddleware02.ipallowlist.sourcerange=foobar, foobar"
- "traefik.tcp.routers.tcprouter0.entrypoints=foobar, foobar"
- "traefik.tcp.routers.tcprouter0.middlewares=foobar, foobar"
- "traefik.tcp.routers.tcprouter0.rule=foobar"

View file

@ -197,29 +197,36 @@
name0 = "foobar"
name1 = "foobar"
[http.middlewares.Middleware11]
[http.middlewares.Middleware11.ipAllowList]
[http.middlewares.Middleware11.ipWhiteList]
sourceRange = ["foobar", "foobar"]
[http.middlewares.Middleware11.ipAllowList.ipStrategy]
[http.middlewares.Middleware11.ipWhiteList.ipStrategy]
depth = 42
excludedIPs = ["foobar", "foobar"]
[http.middlewares.Middleware12]
[http.middlewares.Middleware12.inFlightReq]
amount = 42
[http.middlewares.Middleware12.inFlightReq.sourceCriterion]
requestHeaderName = "foobar"
requestHost = true
[http.middlewares.Middleware12.inFlightReq.sourceCriterion.ipStrategy]
[http.middlewares.Middleware12.ipAllowList]
sourceRange = ["foobar", "foobar"]
rejectStatusCode = 404
[http.middlewares.Middleware12.ipAllowList.ipStrategy]
depth = 42
excludedIPs = ["foobar", "foobar"]
[http.middlewares.Middleware13]
[http.middlewares.Middleware13.passTLSClientCert]
[http.middlewares.Middleware13.inFlightReq]
amount = 42
[http.middlewares.Middleware13.inFlightReq.sourceCriterion]
requestHeaderName = "foobar"
requestHost = true
[http.middlewares.Middleware13.inFlightReq.sourceCriterion.ipStrategy]
depth = 42
excludedIPs = ["foobar", "foobar"]
[http.middlewares.Middleware14]
[http.middlewares.Middleware14.passTLSClientCert]
pem = true
[http.middlewares.Middleware13.passTLSClientCert.info]
[http.middlewares.Middleware14.passTLSClientCert.info]
notAfter = true
notBefore = true
sans = true
serialNumber = true
[http.middlewares.Middleware13.passTLSClientCert.info.subject]
[http.middlewares.Middleware14.passTLSClientCert.info.subject]
country = true
province = true
locality = true
@ -228,7 +235,7 @@
commonName = true
serialNumber = true
domainComponent = true
[http.middlewares.Middleware13.passTLSClientCert.info.issuer]
[http.middlewares.Middleware14.passTLSClientCert.info.issuer]
country = true
province = true
locality = true
@ -236,50 +243,50 @@
commonName = true
serialNumber = true
domainComponent = true
[http.middlewares.Middleware14]
[http.middlewares.Middleware14.plugin]
[http.middlewares.Middleware14.plugin.PluginConf]
foo = "bar"
[http.middlewares.Middleware15]
[http.middlewares.Middleware15.rateLimit]
[http.middlewares.Middleware15.plugin]
[http.middlewares.Middleware15.plugin.PluginConf]
foo = "bar"
[http.middlewares.Middleware16]
[http.middlewares.Middleware16.rateLimit]
average = 42
period = "42s"
burst = 42
[http.middlewares.Middleware15.rateLimit.sourceCriterion]
[http.middlewares.Middleware16.rateLimit.sourceCriterion]
requestHeaderName = "foobar"
requestHost = true
[http.middlewares.Middleware15.rateLimit.sourceCriterion.ipStrategy]
[http.middlewares.Middleware16.rateLimit.sourceCriterion.ipStrategy]
depth = 42
excludedIPs = ["foobar", "foobar"]
[http.middlewares.Middleware16]
[http.middlewares.Middleware16.redirectRegex]
[http.middlewares.Middleware17]
[http.middlewares.Middleware17.redirectRegex]
regex = "foobar"
replacement = "foobar"
permanent = true
[http.middlewares.Middleware17]
[http.middlewares.Middleware17.redirectScheme]
[http.middlewares.Middleware18]
[http.middlewares.Middleware18.redirectScheme]
scheme = "foobar"
port = "foobar"
permanent = true
[http.middlewares.Middleware18]
[http.middlewares.Middleware18.replacePath]
path = "foobar"
[http.middlewares.Middleware19]
[http.middlewares.Middleware19.replacePathRegex]
[http.middlewares.Middleware19.replacePath]
path = "foobar"
[http.middlewares.Middleware20]
[http.middlewares.Middleware20.replacePathRegex]
regex = "foobar"
replacement = "foobar"
[http.middlewares.Middleware20]
[http.middlewares.Middleware20.retry]
[http.middlewares.Middleware21]
[http.middlewares.Middleware21.retry]
attempts = 42
initialInterval = "42s"
[http.middlewares.Middleware21]
[http.middlewares.Middleware21.stripPrefix]
prefixes = ["foobar", "foobar"]
[http.middlewares.Middleware22]
[http.middlewares.Middleware22.stripPrefixRegex]
regex = ["foobar", "foobar"]
[http.middlewares.Middleware22.stripPrefix]
prefixes = ["foobar", "foobar"]
[http.middlewares.Middleware23]
[http.middlewares.Middleware23.grpcWeb]
[http.middlewares.Middleware23.stripPrefixRegex]
regex = ["foobar", "foobar"]
[http.middlewares.Middleware24]
[http.middlewares.Middleware24.grpcWeb]
allowOrigins = ["foobar", "foobar"]
[http.serversTransports]
[http.serversTransports.ServersTransport0]

View file

@ -224,7 +224,7 @@ http:
permissionsPolicy: foobar
isDevelopment: true
Middleware11:
ipAllowList:
ipWhiteList:
sourceRange:
- foobar
- foobar
@ -234,6 +234,17 @@ http:
- foobar
- foobar
Middleware12:
ipAllowList:
rejectStatusCode: 404
sourceRange:
- foobar
- foobar
ipStrategy:
depth: 42
excludedIPs:
- foobar
- foobar
Middleware13:
inFlightReq:
amount: 42
sourceCriterion:
@ -244,7 +255,7 @@ http:
- foobar
requestHeaderName: foobar
requestHost: true
Middleware13:
Middleware14:
passTLSClientCert:
pem: true
info:
@ -269,11 +280,11 @@ http:
serialNumber: true
domainComponent: true
serialNumber: true
Middleware14:
Middleware15:
plugin:
PluginConf:
foo: bar
Middleware15:
Middleware16:
rateLimit:
average: 42
period: 42s
@ -286,38 +297,38 @@ http:
- foobar
requestHeaderName: foobar
requestHost: true
Middleware16:
Middleware17:
redirectRegex:
regex: foobar
replacement: foobar
permanent: true
Middleware17:
Middleware18:
redirectScheme:
scheme: foobar
port: foobar
permanent: true
Middleware18:
Middleware19:
replacePath:
path: foobar
Middleware19:
Middleware20:
replacePathRegex:
regex: foobar
replacement: foobar
Middleware20:
Middleware21:
retry:
attempts: 42
initialInterval: 42s
Middleware21:
Middleware22:
stripPrefix:
prefixes:
- foobar
- foobar
Middleware22:
Middleware23:
stripPrefixRegex:
regex:
- foobar
- foobar
Middleware23:
Middleware24:
grpcWeb:
allowOrigins:
- foobar
@ -450,6 +461,11 @@ tcp:
TCPMiddleware01:
inFlightConn:
amount: 42
TCPMiddleware02:
ipAllowList:
sourceRange:
- foobar
- foobar
serversTransports:
TCPServersTransport0:
dialTimeout: 42s

View file

@ -0,0 +1,281 @@
# Copyright 2023 The Kubernetes Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Gateway API Experimental channel install
#
#
# config/crd/experimental/gateway.networking.k8s.io_backendtlspolicies.yaml
#
apiVersion: apiextensions.k8s.io/v1
kind: CustomResourceDefinition
metadata:
annotations:
api-approved.kubernetes.io: https://github.com/kubernetes-sigs/gateway-api/pull/2466
gateway.networking.k8s.io/bundle-version: v1.0.0
gateway.networking.k8s.io/channel: experimental
creationTimestamp: null
labels:
gateway.networking.k8s.io/policy: Direct
name: backendtlspolicies.gateway.networking.k8s.io
spec:
group: gateway.networking.k8s.io
names:
categories:
- gateway-api
kind: BackendTLSPolicy
listKind: BackendTLSPolicyList
plural: backendtlspolicies
shortNames:
- btlspolicy
singular: backendtlspolicy
scope: Namespaced
versions:
- additionalPrinterColumns:
- jsonPath: .metadata.creationTimestamp
name: Age
type: date
name: v1alpha2
schema:
openAPIV3Schema:
description: BackendTLSPolicy provides a way to configure how a Gateway connects to a Backend via TLS.
properties:
apiVersion:
description: 'APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources'
type: string
kind:
description: 'Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds'
type: string
metadata:
type: object
spec:
description: Spec defines the desired state of BackendTLSPolicy.
properties:
targetRef:
description: "TargetRef identifies an API object to apply the policy to. Only Services have Extended support. Implementations MAY support additional objects, with Implementation Specific support. Note that this config applies to the entire referenced resource by default, but this default may change in the future to provide a more granular application of the policy. \n Support: Extended for Kubernetes Service \n Support: Implementation-specific for any other resource"
properties:
group:
description: Group is the group of the target resource.
maxLength: 253
pattern: ^$|^[a-z0-9]([-a-z0-9]*[a-z0-9])?(\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*$
type: string
kind:
description: Kind is kind of the target resource.
maxLength: 63
minLength: 1
pattern: ^[a-zA-Z]([-a-zA-Z0-9]*[a-zA-Z0-9])?$
type: string
name:
description: Name is the name of the target resource.
maxLength: 253
minLength: 1
type: string
namespace:
description: Namespace is the namespace of the referent. When unspecified, the local namespace is inferred. Even when policy targets a resource in a different namespace, it MUST only apply to traffic originating from the same namespace as the policy.
maxLength: 63
minLength: 1
pattern: ^[a-z0-9]([-a-z0-9]*[a-z0-9])?$
type: string
sectionName:
description: "SectionName is the name of a section within the target resource. When unspecified, this targetRef targets the entire resource. In the following resources, SectionName is interpreted as the following: \n * Gateway: Listener Name * Service: Port Name \n If a SectionName is specified, but does not exist on the targeted object, the Policy must fail to attach, and the policy implementation should record a `ResolvedRefs` or similar Condition in the Policy's status."
maxLength: 253
minLength: 1
pattern: ^[a-z0-9]([-a-z0-9]*[a-z0-9])?(\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*$
type: string
required:
- group
- kind
- name
type: object
tls:
description: TLS contains backend TLS policy configuration.
properties:
caCertRefs:
description: "CACertRefs contains one or more references to Kubernetes objects that contain a PEM-encoded TLS CA certificate bundle, which is used to validate a TLS handshake between the Gateway and backend Pod. \n If CACertRefs is empty or unspecified, then WellKnownCACerts must be specified. Only one of CACertRefs or WellKnownCACerts may be specified, not both. If CACertRefs is empty or unspecified, the configuration for WellKnownCACerts MUST be honored instead. \n References to a resource in a different namespace are invalid for the moment, although we will revisit this in the future. \n A single CACertRef to a Kubernetes ConfigMap kind has \"Core\" support. Implementations MAY choose to support attaching multiple certificates to a backend, but this behavior is implementation-specific. \n Support: Core - An optional single reference to a Kubernetes ConfigMap, with the CA certificate in a key named `ca.crt`. \n Support: Implementation-specific (More than one reference, or other kinds of resources)."
items:
description: "LocalObjectReference identifies an API object within the namespace of the referrer. The API object must be valid in the cluster; the Group and Kind must be registered in the cluster for this reference to be valid. \n References to objects with invalid Group and Kind are not valid, and must be rejected by the implementation, with appropriate Conditions set on the containing object."
properties:
group:
description: Group is the group of the referent. For example, "gateway.networking.k8s.io". When unspecified or empty string, core API group is inferred.
maxLength: 253
pattern: ^$|^[a-z0-9]([-a-z0-9]*[a-z0-9])?(\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*$
type: string
kind:
description: Kind is kind of the referent. For example "HTTPRoute" or "Service".
maxLength: 63
minLength: 1
pattern: ^[a-zA-Z]([-a-zA-Z0-9]*[a-zA-Z0-9])?$
type: string
name:
description: Name is the name of the referent.
maxLength: 253
minLength: 1
type: string
required:
- group
- kind
- name
type: object
maxItems: 8
type: array
hostname:
description: "Hostname is used for two purposes in the connection between Gateways and backends: \n 1. Hostname MUST be used as the SNI to connect to the backend (RFC 6066). 2. Hostname MUST be used for authentication and MUST match the certificate served by the matching backend. \n Support: Core"
maxLength: 253
minLength: 1
pattern: ^[a-z0-9]([-a-z0-9]*[a-z0-9])?(\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*$
type: string
wellKnownCACerts:
description: "WellKnownCACerts specifies whether system CA certificates may be used in the TLS handshake between the gateway and backend pod. \n If WellKnownCACerts is unspecified or empty (\"\"), then CACertRefs must be specified with at least one entry for a valid configuration. Only one of CACertRefs or WellKnownCACerts may be specified, not both. \n Support: Core for \"System\""
enum:
- System
type: string
required:
- hostname
type: object
x-kubernetes-validations:
- message: must not contain both CACertRefs and WellKnownCACerts
rule: '!(has(self.caCertRefs) && size(self.caCertRefs) > 0 && has(self.wellKnownCACerts) && self.wellKnownCACerts != "")'
- message: must specify either CACertRefs or WellKnownCACerts
rule: (has(self.caCertRefs) && size(self.caCertRefs) > 0 || has(self.wellKnownCACerts) && self.wellKnownCACerts != "")
required:
- targetRef
- tls
type: object
status:
description: Status defines the current state of BackendTLSPolicy.
properties:
ancestors:
description: "Ancestors is a list of ancestor resources (usually Gateways) that are associated with the policy, and the status of the policy with respect to each ancestor. When this policy attaches to a parent, the controller that manages the parent and the ancestors MUST add an entry to this list when the controller first sees the policy and SHOULD update the entry as appropriate when the relevant ancestor is modified. \n Note that choosing the relevant ancestor is left to the Policy designers; an important part of Policy design is designing the right object level at which to namespace this status. \n Note also that implementations MUST ONLY populate ancestor status for the Ancestor resources they are responsible for. Implementations MUST use the ControllerName field to uniquely identify the entries in this list that they are responsible for. \n Note that to achieve this, the list of PolicyAncestorStatus structs MUST be treated as a map with a composite key, made up of the AncestorRef and ControllerName fields combined. \n A maximum of 16 ancestors will be represented in this list. An empty list means the Policy is not relevant for any ancestors. \n If this slice is full, implementations MUST NOT add further entries. Instead they MUST consider the policy unimplementable and signal that on any related resources such as the ancestor that would be referenced here. For example, if this list was full on BackendTLSPolicy, no additional Gateways would be able to reference the Service targeted by the BackendTLSPolicy."
items:
description: "PolicyAncestorStatus describes the status of a route with respect to an associated Ancestor. \n Ancestors refer to objects that are either the Target of a policy or above it in terms of object hierarchy. For example, if a policy targets a Service, the Policy's Ancestors are, in order, the Service, the HTTPRoute, the Gateway, and the GatewayClass. Almost always, in this hierarchy, the Gateway will be the most useful object to place Policy status on, so we recommend that implementations SHOULD use Gateway as the PolicyAncestorStatus object unless the designers have a _very_ good reason otherwise. \n In the context of policy attachment, the Ancestor is used to distinguish which resource results in a distinct application of this policy. For example, if a policy targets a Service, it may have a distinct result per attached Gateway. \n Policies targeting the same resource may have different effects depending on the ancestors of those resources. For example, different Gateways targeting the same Service may have different capabilities, especially if they have different underlying implementations. \n For example, in BackendTLSPolicy, the Policy attaches to a Service that is used as a backend in a HTTPRoute that is itself attached to a Gateway. In this case, the relevant object for status is the Gateway, and that is the ancestor object referred to in this status. \n Note that a parent is also an ancestor, so for objects where the parent is the relevant object for status, this struct SHOULD still be used. \n This struct is intended to be used in a slice that's effectively a map, with a composite key made up of the AncestorRef and the ControllerName."
properties:
ancestorRef:
description: AncestorRef corresponds with a ParentRef in the spec that this PolicyAncestorStatus struct describes the status of.
properties:
group:
default: gateway.networking.k8s.io
description: "Group is the group of the referent. When unspecified, \"gateway.networking.k8s.io\" is inferred. To set the core API group (such as for a \"Service\" kind referent), Group must be explicitly set to \"\" (empty string). \n Support: Core"
maxLength: 253
pattern: ^$|^[a-z0-9]([-a-z0-9]*[a-z0-9])?(\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*$
type: string
kind:
default: Gateway
description: "Kind is kind of the referent. \n There are two kinds of parent resources with \"Core\" support: \n * Gateway (Gateway conformance profile) * Service (Mesh conformance profile, experimental, ClusterIP Services only) \n Support for other resources is Implementation-Specific."
maxLength: 63
minLength: 1
pattern: ^[a-zA-Z]([-a-zA-Z0-9]*[a-zA-Z0-9])?$
type: string
name:
description: "Name is the name of the referent. \n Support: Core"
maxLength: 253
minLength: 1
type: string
namespace:
description: "Namespace is the namespace of the referent. When unspecified, this refers to the local namespace of the Route. \n Note that there are specific rules for ParentRefs which cross namespace boundaries. Cross-namespace references are only valid if they are explicitly allowed by something in the namespace they are referring to. For example: Gateway has the AllowedRoutes field, and ReferenceGrant provides a generic way to enable any other kind of cross-namespace reference. \n ParentRefs from a Route to a Service in the same namespace are \"producer\" routes, which apply default routing rules to inbound connections from any namespace to the Service. \n ParentRefs from a Route to a Service in a different namespace are \"consumer\" routes, and these routing rules are only applied to outbound connections originating from the same namespace as the Route, for which the intended destination of the connections are a Service targeted as a ParentRef of the Route. \n Support: Core"
maxLength: 63
minLength: 1
pattern: ^[a-z0-9]([-a-z0-9]*[a-z0-9])?$
type: string
port:
description: "Port is the network port this Route targets. It can be interpreted differently based on the type of parent resource. \n When the parent resource is a Gateway, this targets all listeners listening on the specified port that also support this kind of Route(and select this Route). It's not recommended to set `Port` unless the networking behaviors specified in a Route must apply to a specific port as opposed to a listener(s) whose port(s) may be changed. When both Port and SectionName are specified, the name and port of the selected listener must match both specified values. \n When the parent resource is a Service, this targets a specific port in the Service spec. When both Port (experimental) and SectionName are specified, the name and port of the selected port must match both specified values. \n Implementations MAY choose to support other parent resources. Implementations supporting other types of parent resources MUST clearly document how/if Port is interpreted. \n For the purpose of status, an attachment is considered successful as long as the parent resource accepts it partially. For example, Gateway listeners can restrict which Routes can attach to them by Route kind, namespace, or hostname. If 1 of 2 Gateway listeners accept attachment from the referencing Route, the Route MUST be considered successfully attached. If no Gateway listeners accept attachment from this Route, the Route MUST be considered detached from the Gateway. \n Support: Extended \n "
format: int32
maximum: 65535
minimum: 1
type: integer
sectionName:
description: "SectionName is the name of a section within the target resource. In the following resources, SectionName is interpreted as the following: \n * Gateway: Listener Name. When both Port (experimental) and SectionName are specified, the name and port of the selected listener must match both specified values. * Service: Port Name. When both Port (experimental) and SectionName are specified, the name and port of the selected listener must match both specified values. Note that attaching Routes to Services as Parents is part of experimental Mesh support and is not supported for any other purpose. \n Implementations MAY choose to support attaching Routes to other resources. If that is the case, they MUST clearly document how SectionName is interpreted. \n When unspecified (empty string), this will reference the entire resource. For the purpose of status, an attachment is considered successful if at least one section in the parent resource accepts it. For example, Gateway listeners can restrict which Routes can attach to them by Route kind, namespace, or hostname. If 1 of 2 Gateway listeners accept attachment from the referencing Route, the Route MUST be considered successfully attached. If no Gateway listeners accept attachment from this Route, the Route MUST be considered detached from the Gateway. \n Support: Core"
maxLength: 253
minLength: 1
pattern: ^[a-z0-9]([-a-z0-9]*[a-z0-9])?(\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*$
type: string
required:
- name
type: object
conditions:
description: Conditions describes the status of the Policy with respect to the given Ancestor.
items:
description: "Condition contains details for one aspect of the current state of this API Resource. --- This struct is intended for direct use as an array at the field path .status.conditions. For example, \n type FooStatus struct{ // Represents the observations of a foo's current state. // Known .status.conditions.type are: \"Available\", \"Progressing\", and \"Degraded\" // +patchMergeKey=type // +patchStrategy=merge // +listType=map // +listMapKey=type Conditions []metav1.Condition `json:\"conditions,omitempty\" patchStrategy:\"merge\" patchMergeKey:\"type\" protobuf:\"bytes,1,rep,name=conditions\"` \n // other fields }"
properties:
lastTransitionTime:
description: lastTransitionTime is the last time the condition transitioned from one status to another. This should be when the underlying condition changed. If that is not known, then using the time when the API field changed is acceptable.
format: date-time
type: string
message:
description: message is a human readable message indicating details about the transition. This may be an empty string.
maxLength: 32768
type: string
observedGeneration:
description: observedGeneration represents the .metadata.generation that the condition was set based upon. For instance, if .metadata.generation is currently 12, but the .status.conditions[x].observedGeneration is 9, the condition is out of date with respect to the current state of the instance.
format: int64
minimum: 0
type: integer
reason:
description: reason contains a programmatic identifier indicating the reason for the condition's last transition. Producers of specific condition types may define expected values and meanings for this field, and whether the values are considered a guaranteed API. The value should be a CamelCase string. This field may not be empty.
maxLength: 1024
minLength: 1
pattern: ^[A-Za-z]([A-Za-z0-9_,:]*[A-Za-z0-9_])?$
type: string
status:
description: status of the condition, one of True, False, Unknown.
enum:
- "True"
- "False"
- Unknown
type: string
type:
description: type of condition in CamelCase or in foo.example.com/CamelCase. --- Many .condition.type values are consistent across resources like Available, but because arbitrary conditions can be useful (see .node.status.conditions), the ability to deconflict is important. The regex it matches is (dns1123SubdomainFmt/)?(qualifiedNameFmt)
maxLength: 316
pattern: ^([a-z0-9]([-a-z0-9]*[a-z0-9])?(\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*/)?(([A-Za-z0-9][-A-Za-z0-9_.]*)?[A-Za-z0-9])$
type: string
required:
- lastTransitionTime
- message
- reason
- status
- type
type: object
maxItems: 8
minItems: 1
type: array
x-kubernetes-list-map-keys:
- type
x-kubernetes-list-type: map
controllerName:
description: "ControllerName is a domain/path string that indicates the name of the controller that wrote this status. This corresponds with the controllerName field on GatewayClass. \n Example: \"example.net/gateway-controller\". \n The format of this field is DOMAIN \"/\" PATH, where DOMAIN and PATH are valid Kubernetes names (https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names). \n Controllers MUST populate this field when writing status. Controllers should ensure that entries to status populated with their ControllerName are cleaned up when they are no longer necessary."
maxLength: 253
minLength: 1
pattern: ^[a-z0-9]([-a-z0-9]*[a-z0-9])?(\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*\/[A-Za-z0-9\/\-._~%!$&'()*+,;=:]+$
type: string
required:
- ancestorRef
- controllerName
type: object
maxItems: 16
type: array
required:
- ancestors
type: object
required:
- spec
type: object
served: true
storage: true
subresources:
status: {}
status:
acceptedNames:
kind: ""
plural: ""
conditions: null
storedVersions: null

View file

@ -1,10 +1,13 @@
---
#
# config/crd/experimental/gateway.networking.k8s.io_gatewayclasses.yaml
#
apiVersion: apiextensions.k8s.io/v1
kind: CustomResourceDefinition
metadata:
annotations:
api-approved.kubernetes.io: https://github.com/kubernetes-sigs/gateway-api/pull/891
api-approved.kubernetes.io: https://github.com/kubernetes-sigs/gateway-api/pull/2466
gateway.networking.k8s.io/bundle-version: v1.0.0
gateway.networking.k8s.io/channel: experimental
creationTimestamp: null
name: gatewayclasses.gateway.networking.k8s.io
spec:
@ -21,9 +24,12 @@ spec:
scope: Cluster
versions:
- additionalPrinterColumns:
- jsonPath: .spec.controller
- jsonPath: .spec.controllerName
name: Controller
type: string
- jsonPath: .status.conditions[?(@.type=="Accepted")].status
name: Accepted
type: string
- jsonPath: .metadata.creationTimestamp
name: Age
type: date
@ -31,32 +37,16 @@ spec:
name: Description
priority: 1
type: string
name: v1alpha2
name: v1
schema:
openAPIV3Schema:
description: "GatewayClass describes a class of Gateways available to the
user for creating Gateway resources. \n It is recommended that this resource
be used as a template for Gateways. This means that a Gateway is based on
the state of the GatewayClass at the time it was created and changes to
the GatewayClass or associated parameters are not propagated down to existing
Gateways. This recommendation is intended to limit the blast radius of changes
to GatewayClass or associated parameters. If implementations choose to propagate
GatewayClass changes to existing Gateways, that MUST be clearly documented
by the implementation. \n Whenever one or more Gateways are using a GatewayClass,
implementations MUST add the `gateway-exists-finalizer.gateway.networking.k8s.io`
finalizer on the associated GatewayClass. This ensures that a GatewayClass
associated with a Gateway is not deleted while in use. \n GatewayClass is
a Cluster level resource."
description: "GatewayClass describes a class of Gateways available to the user for creating Gateway resources. \n It is recommended that this resource be used as a template for Gateways. This means that a Gateway is based on the state of the GatewayClass at the time it was created and changes to the GatewayClass or associated parameters are not propagated down to existing Gateways. This recommendation is intended to limit the blast radius of changes to GatewayClass or associated parameters. If implementations choose to propagate GatewayClass changes to existing Gateways, that MUST be clearly documented by the implementation. \n Whenever one or more Gateways are using a GatewayClass, implementations SHOULD add the `gateway-exists-finalizer.gateway.networking.k8s.io` finalizer on the associated GatewayClass. This ensures that a GatewayClass associated with a Gateway is not deleted while in use. \n GatewayClass is a Cluster level resource."
properties:
apiVersion:
description: 'APIVersion defines the versioned schema of this representation
of an object. Servers should convert recognized schemas to the latest
internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources'
description: 'APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources'
type: string
kind:
description: 'Kind is a string value representing the REST resource this
object represents. Servers may infer this from the endpoint the client
submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds'
description: 'Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds'
type: string
metadata:
type: object
@ -64,27 +54,20 @@ spec:
description: Spec defines the desired state of GatewayClass.
properties:
controllerName:
description: "ControllerName is the name of the controller that is
managing Gateways of this class. The value of this field MUST be
a domain prefixed path. \n Example: \"example.net/gateway-controller\".
\n This field is not mutable and cannot be empty. \n Support: Core"
description: "ControllerName is the name of the controller that is managing Gateways of this class. The value of this field MUST be a domain prefixed path. \n Example: \"example.net/gateway-controller\". \n This field is not mutable and cannot be empty. \n Support: Core"
maxLength: 253
minLength: 1
pattern: ^[a-z0-9]([-a-z0-9]*[a-z0-9])?(\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*\/[A-Za-z0-9\/\-._~%!$&'()*+,;=:]+$
type: string
x-kubernetes-validations:
- message: Value is immutable
rule: self == oldSelf
description:
description: Description helps describe a GatewayClass with more details.
maxLength: 64
type: string
parametersRef:
description: "ParametersRef is a reference to a resource that contains
the configuration parameters corresponding to the GatewayClass.
This is optional if the controller does not require any additional
configuration. \n ParametersRef can reference a standard Kubernetes
resource, i.e. ConfigMap, or an implementation-specific custom resource.
The resource can be cluster-scoped or namespace-scoped. \n If the
referent cannot be found, the GatewayClass's \"InvalidParameters\"
status condition will be true. \n Support: Custom"
description: "ParametersRef is a reference to a resource that contains the configuration parameters corresponding to the GatewayClass. This is optional if the controller does not require any additional configuration. \n ParametersRef can reference a standard Kubernetes resource, i.e. ConfigMap, or an implementation-specific custom resource. The resource can be cluster-scoped or namespace-scoped. \n If the referent cannot be found, the GatewayClass's \"InvalidParameters\" status condition will be true. \n Support: Implementation-specific"
properties:
group:
description: Group is the group of the referent.
@ -103,9 +86,7 @@ spec:
minLength: 1
type: string
namespace:
description: Namespace is the namespace of the referent. This
field is required when referring to a Namespace-scoped resource
and MUST be unset when referring to a Cluster-scoped resource.
description: Namespace is the namespace of the referent. This field is required when referring to a Namespace-scoped resource and MUST be unset when referring to a Cluster-scoped resource.
maxLength: 63
minLength: 1
pattern: ^[a-z0-9]([-a-z0-9]*[a-z0-9])?$
@ -126,58 +107,34 @@ spec:
reason: Waiting
status: Unknown
type: Accepted
description: Status defines the current state of GatewayClass.
description: "Status defines the current state of GatewayClass. \n Implementations MUST populate status on all GatewayClass resources which specify their controller name."
properties:
conditions:
default:
- lastTransitionTime: "1970-01-01T00:00:00Z"
message: Waiting for controller
reason: Waiting
reason: Pending
status: Unknown
type: Accepted
description: "Conditions is the current status from the controller
for this GatewayClass. \n Controllers should prefer to publish conditions
using values of GatewayClassConditionType for the type of each Condition."
description: "Conditions is the current status from the controller for this GatewayClass. \n Controllers should prefer to publish conditions using values of GatewayClassConditionType for the type of each Condition."
items:
description: "Condition contains details for one aspect of the current
state of this API Resource. --- This struct is intended for direct
use as an array at the field path .status.conditions. For example,
type FooStatus struct{ // Represents the observations of a
foo's current state. // Known .status.conditions.type are:
\"Available\", \"Progressing\", and \"Degraded\" // +patchMergeKey=type
\ // +patchStrategy=merge // +listType=map // +listMapKey=type
\ Conditions []metav1.Condition `json:\"conditions,omitempty\"
patchStrategy:\"merge\" patchMergeKey:\"type\" protobuf:\"bytes,1,rep,name=conditions\"`
\n // other fields }"
description: "Condition contains details for one aspect of the current state of this API Resource. --- This struct is intended for direct use as an array at the field path .status.conditions. For example, \n type FooStatus struct{ // Represents the observations of a foo's current state. // Known .status.conditions.type are: \"Available\", \"Progressing\", and \"Degraded\" // +patchMergeKey=type // +patchStrategy=merge // +listType=map // +listMapKey=type Conditions []metav1.Condition `json:\"conditions,omitempty\" patchStrategy:\"merge\" patchMergeKey:\"type\" protobuf:\"bytes,1,rep,name=conditions\"` \n // other fields }"
properties:
lastTransitionTime:
description: lastTransitionTime is the last time the condition
transitioned from one status to another. This should be when
the underlying condition changed. If that is not known, then
using the time when the API field changed is acceptable.
description: lastTransitionTime is the last time the condition transitioned from one status to another. This should be when the underlying condition changed. If that is not known, then using the time when the API field changed is acceptable.
format: date-time
type: string
message:
description: message is a human readable message indicating
details about the transition. This may be an empty string.
description: message is a human readable message indicating details about the transition. This may be an empty string.
maxLength: 32768
type: string
observedGeneration:
description: observedGeneration represents the .metadata.generation
that the condition was set based upon. For instance, if .metadata.generation
is currently 12, but the .status.conditions[x].observedGeneration
is 9, the condition is out of date with respect to the current
state of the instance.
description: observedGeneration represents the .metadata.generation that the condition was set based upon. For instance, if .metadata.generation is currently 12, but the .status.conditions[x].observedGeneration is 9, the condition is out of date with respect to the current state of the instance.
format: int64
minimum: 0
type: integer
reason:
description: reason contains a programmatic identifier indicating
the reason for the condition's last transition. Producers
of specific condition types may define expected values and
meanings for this field, and whether the values are considered
a guaranteed API. The value should be a CamelCase string.
This field may not be empty.
description: reason contains a programmatic identifier indicating the reason for the condition's last transition. Producers of specific condition types may define expected values and meanings for this field, and whether the values are considered a guaranteed API. The value should be a CamelCase string. This field may not be empty.
maxLength: 1024
minLength: 1
pattern: ^[A-Za-z]([A-Za-z0-9_,:]*[A-Za-z0-9_])?$
@ -190,11 +147,7 @@ spec:
- Unknown
type: string
type:
description: type of condition in CamelCase or in foo.example.com/CamelCase.
--- Many .condition.type values are consistent across resources
like Available, but because arbitrary conditions can be useful
(see .node.status.conditions), the ability to deconflict is
important. The regex it matches is (dns1123SubdomainFmt/)?(qualifiedNameFmt)
description: type of condition in CamelCase or in foo.example.com/CamelCase. --- Many .condition.type values are consistent across resources like Available, but because arbitrary conditions can be useful (see .node.status.conditions), the ability to deconflict is important. The regex it matches is (dns1123SubdomainFmt/)?(qualifiedNameFmt)
maxLength: 316
pattern: ^([a-z0-9]([-a-z0-9]*[a-z0-9])?(\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*/)?(([A-Za-z0-9][-A-Za-z0-9_.]*)?[A-Za-z0-9])$
type: string
@ -210,6 +163,208 @@ spec:
x-kubernetes-list-map-keys:
- type
x-kubernetes-list-type: map
supportedFeatures:
description: 'SupportedFeatures is the set of features the GatewayClass support. It MUST be sorted in ascending alphabetical order. '
items:
description: SupportedFeature is used to describe distinct features that are covered by conformance tests.
enum:
- Gateway
- GatewayPort8080
- GatewayStaticAddresses
- HTTPRoute
- HTTPRouteDestinationPortMatching
- HTTPRouteHostRewrite
- HTTPRouteMethodMatching
- HTTPRoutePathRedirect
- HTTPRoutePathRewrite
- HTTPRoutePortRedirect
- HTTPRouteQueryParamMatching
- HTTPRouteRequestMirror
- HTTPRouteRequestMultipleMirrors
- HTTPRouteResponseHeaderModification
- HTTPRouteSchemeRedirect
- Mesh
- ReferenceGrant
- TLSRoute
type: string
maxItems: 64
type: array
x-kubernetes-list-type: set
type: object
required:
- spec
type: object
served: true
storage: false
subresources:
status: {}
- additionalPrinterColumns:
- jsonPath: .spec.controllerName
name: Controller
type: string
- jsonPath: .status.conditions[?(@.type=="Accepted")].status
name: Accepted
type: string
- jsonPath: .metadata.creationTimestamp
name: Age
type: date
- jsonPath: .spec.description
name: Description
priority: 1
type: string
name: v1beta1
schema:
openAPIV3Schema:
description: "GatewayClass describes a class of Gateways available to the user for creating Gateway resources. \n It is recommended that this resource be used as a template for Gateways. This means that a Gateway is based on the state of the GatewayClass at the time it was created and changes to the GatewayClass or associated parameters are not propagated down to existing Gateways. This recommendation is intended to limit the blast radius of changes to GatewayClass or associated parameters. If implementations choose to propagate GatewayClass changes to existing Gateways, that MUST be clearly documented by the implementation. \n Whenever one or more Gateways are using a GatewayClass, implementations SHOULD add the `gateway-exists-finalizer.gateway.networking.k8s.io` finalizer on the associated GatewayClass. This ensures that a GatewayClass associated with a Gateway is not deleted while in use. \n GatewayClass is a Cluster level resource."
properties:
apiVersion:
description: 'APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources'
type: string
kind:
description: 'Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds'
type: string
metadata:
type: object
spec:
description: Spec defines the desired state of GatewayClass.
properties:
controllerName:
description: "ControllerName is the name of the controller that is managing Gateways of this class. The value of this field MUST be a domain prefixed path. \n Example: \"example.net/gateway-controller\". \n This field is not mutable and cannot be empty. \n Support: Core"
maxLength: 253
minLength: 1
pattern: ^[a-z0-9]([-a-z0-9]*[a-z0-9])?(\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*\/[A-Za-z0-9\/\-._~%!$&'()*+,;=:]+$
type: string
x-kubernetes-validations:
- message: Value is immutable
rule: self == oldSelf
description:
description: Description helps describe a GatewayClass with more details.
maxLength: 64
type: string
parametersRef:
description: "ParametersRef is a reference to a resource that contains the configuration parameters corresponding to the GatewayClass. This is optional if the controller does not require any additional configuration. \n ParametersRef can reference a standard Kubernetes resource, i.e. ConfigMap, or an implementation-specific custom resource. The resource can be cluster-scoped or namespace-scoped. \n If the referent cannot be found, the GatewayClass's \"InvalidParameters\" status condition will be true. \n Support: Implementation-specific"
properties:
group:
description: Group is the group of the referent.
maxLength: 253
pattern: ^$|^[a-z0-9]([-a-z0-9]*[a-z0-9])?(\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*$
type: string
kind:
description: Kind is kind of the referent.
maxLength: 63
minLength: 1
pattern: ^[a-zA-Z]([-a-zA-Z0-9]*[a-zA-Z0-9])?$
type: string
name:
description: Name is the name of the referent.
maxLength: 253
minLength: 1
type: string
namespace:
description: Namespace is the namespace of the referent. This field is required when referring to a Namespace-scoped resource and MUST be unset when referring to a Cluster-scoped resource.
maxLength: 63
minLength: 1
pattern: ^[a-z0-9]([-a-z0-9]*[a-z0-9])?$
type: string
required:
- group
- kind
- name
type: object
required:
- controllerName
type: object
status:
default:
conditions:
- lastTransitionTime: "1970-01-01T00:00:00Z"
message: Waiting for controller
reason: Waiting
status: Unknown
type: Accepted
description: "Status defines the current state of GatewayClass. \n Implementations MUST populate status on all GatewayClass resources which specify their controller name."
properties:
conditions:
default:
- lastTransitionTime: "1970-01-01T00:00:00Z"
message: Waiting for controller
reason: Pending
status: Unknown
type: Accepted
description: "Conditions is the current status from the controller for this GatewayClass. \n Controllers should prefer to publish conditions using values of GatewayClassConditionType for the type of each Condition."
items:
description: "Condition contains details for one aspect of the current state of this API Resource. --- This struct is intended for direct use as an array at the field path .status.conditions. For example, \n type FooStatus struct{ // Represents the observations of a foo's current state. // Known .status.conditions.type are: \"Available\", \"Progressing\", and \"Degraded\" // +patchMergeKey=type // +patchStrategy=merge // +listType=map // +listMapKey=type Conditions []metav1.Condition `json:\"conditions,omitempty\" patchStrategy:\"merge\" patchMergeKey:\"type\" protobuf:\"bytes,1,rep,name=conditions\"` \n // other fields }"
properties:
lastTransitionTime:
description: lastTransitionTime is the last time the condition transitioned from one status to another. This should be when the underlying condition changed. If that is not known, then using the time when the API field changed is acceptable.
format: date-time
type: string
message:
description: message is a human readable message indicating details about the transition. This may be an empty string.
maxLength: 32768
type: string
observedGeneration:
description: observedGeneration represents the .metadata.generation that the condition was set based upon. For instance, if .metadata.generation is currently 12, but the .status.conditions[x].observedGeneration is 9, the condition is out of date with respect to the current state of the instance.
format: int64
minimum: 0
type: integer
reason:
description: reason contains a programmatic identifier indicating the reason for the condition's last transition. Producers of specific condition types may define expected values and meanings for this field, and whether the values are considered a guaranteed API. The value should be a CamelCase string. This field may not be empty.
maxLength: 1024
minLength: 1
pattern: ^[A-Za-z]([A-Za-z0-9_,:]*[A-Za-z0-9_])?$
type: string
status:
description: status of the condition, one of True, False, Unknown.
enum:
- "True"
- "False"
- Unknown
type: string
type:
description: type of condition in CamelCase or in foo.example.com/CamelCase. --- Many .condition.type values are consistent across resources like Available, but because arbitrary conditions can be useful (see .node.status.conditions), the ability to deconflict is important. The regex it matches is (dns1123SubdomainFmt/)?(qualifiedNameFmt)
maxLength: 316
pattern: ^([a-z0-9]([-a-z0-9]*[a-z0-9])?(\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*/)?(([A-Za-z0-9][-A-Za-z0-9_.]*)?[A-Za-z0-9])$
type: string
required:
- lastTransitionTime
- message
- reason
- status
- type
type: object
maxItems: 8
type: array
x-kubernetes-list-map-keys:
- type
x-kubernetes-list-type: map
supportedFeatures:
description: 'SupportedFeatures is the set of features the GatewayClass support. It MUST be sorted in ascending alphabetical order. '
items:
description: SupportedFeature is used to describe distinct features that are covered by conformance tests.
enum:
- Gateway
- GatewayPort8080
- GatewayStaticAddresses
- HTTPRoute
- HTTPRouteDestinationPortMatching
- HTTPRouteHostRewrite
- HTTPRouteMethodMatching
- HTTPRoutePathRedirect
- HTTPRoutePathRewrite
- HTTPRoutePortRedirect
- HTTPRouteQueryParamMatching
- HTTPRouteRequestMirror
- HTTPRouteRequestMultipleMirrors
- HTTPRouteResponseHeaderModification
- HTTPRouteSchemeRedirect
- Mesh
- ReferenceGrant
- TLSRoute
type: string
maxItems: 64
type: array
x-kubernetes-list-type: set
type: object
required:
- spec
@ -222,5 +377,5 @@ status:
acceptedNames:
kind: ""
plural: ""
conditions: []
storedVersions: []
conditions: null
storedVersions: null

File diff suppressed because one or more lines are too long

View file

@ -0,0 +1,819 @@
#
# config/crd/experimental/gateway.networking.k8s.io_grpcroutes.yaml
#
apiVersion: apiextensions.k8s.io/v1
kind: CustomResourceDefinition
metadata:
annotations:
api-approved.kubernetes.io: https://github.com/kubernetes-sigs/gateway-api/pull/2466
gateway.networking.k8s.io/bundle-version: v1.0.0
gateway.networking.k8s.io/channel: experimental
creationTimestamp: null
name: grpcroutes.gateway.networking.k8s.io
spec:
group: gateway.networking.k8s.io
names:
categories:
- gateway-api
kind: GRPCRoute
listKind: GRPCRouteList
plural: grpcroutes
singular: grpcroute
scope: Namespaced
versions:
- additionalPrinterColumns:
- jsonPath: .spec.hostnames
name: Hostnames
type: string
- jsonPath: .metadata.creationTimestamp
name: Age
type: date
name: v1alpha2
schema:
openAPIV3Schema:
description: "GRPCRoute provides a way to route gRPC requests. This includes the capability to match requests by hostname, gRPC service, gRPC method, or HTTP/2 header. Filters can be used to specify additional processing steps. Backends specify where matching requests will be routed. \n GRPCRoute falls under extended support within the Gateway API. Within the following specification, the word \"MUST\" indicates that an implementation supporting GRPCRoute must conform to the indicated requirement, but an implementation not supporting this route type need not follow the requirement unless explicitly indicated. \n Implementations supporting `GRPCRoute` with the `HTTPS` `ProtocolType` MUST accept HTTP/2 connections without an initial upgrade from HTTP/1.1, i.e. via ALPN. If the implementation does not support this, then it MUST set the \"Accepted\" condition to \"False\" for the affected listener with a reason of \"UnsupportedProtocol\". Implementations MAY also accept HTTP/2 connections with an upgrade from HTTP/1. \n Implementations supporting `GRPCRoute` with the `HTTP` `ProtocolType` MUST support HTTP/2 over cleartext TCP (h2c, https://www.rfc-editor.org/rfc/rfc7540#section-3.1) without an initial upgrade from HTTP/1.1, i.e. with prior knowledge (https://www.rfc-editor.org/rfc/rfc7540#section-3.4). If the implementation does not support this, then it MUST set the \"Accepted\" condition to \"False\" for the affected listener with a reason of \"UnsupportedProtocol\". Implementations MAY also accept HTTP/2 connections with an upgrade from HTTP/1, i.e. without prior knowledge."
properties:
apiVersion:
description: 'APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources'
type: string
kind:
description: 'Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds'
type: string
metadata:
type: object
spec:
description: Spec defines the desired state of GRPCRoute.
properties:
hostnames:
description: "Hostnames defines a set of hostnames to match against the GRPC Host header to select a GRPCRoute to process the request. This matches the RFC 1123 definition of a hostname with 2 notable exceptions: \n 1. IPs are not allowed. 2. A hostname may be prefixed with a wildcard label (`*.`). The wildcard label MUST appear by itself as the first label. \n If a hostname is specified by both the Listener and GRPCRoute, there MUST be at least one intersecting hostname for the GRPCRoute to be attached to the Listener. For example: \n * A Listener with `test.example.com` as the hostname matches GRPCRoutes that have either not specified any hostnames, or have specified at least one of `test.example.com` or `*.example.com`. * A Listener with `*.example.com` as the hostname matches GRPCRoutes that have either not specified any hostnames or have specified at least one hostname that matches the Listener hostname. For example, `test.example.com` and `*.example.com` would both match. On the other hand, `example.com` and `test.example.net` would not match. \n Hostnames that are prefixed with a wildcard label (`*.`) are interpreted as a suffix match. That means that a match for `*.example.com` would match both `test.example.com`, and `foo.test.example.com`, but not `example.com`. \n If both the Listener and GRPCRoute have specified hostnames, any GRPCRoute hostnames that do not match the Listener hostname MUST be ignored. For example, if a Listener specified `*.example.com`, and the GRPCRoute specified `test.example.com` and `test.example.net`, `test.example.net` MUST NOT be considered for a match. \n If both the Listener and GRPCRoute have specified hostnames, and none match with the criteria above, then the GRPCRoute MUST NOT be accepted by the implementation. The implementation MUST raise an 'Accepted' Condition with a status of `False` in the corresponding RouteParentStatus. \n If a Route (A) of type HTTPRoute or GRPCRoute is attached to a Listener and that listener already has another Route (B) of the other type attached and the intersection of the hostnames of A and B is non-empty, then the implementation MUST accept exactly one of these two routes, determined by the following criteria, in order: \n * The oldest Route based on creation timestamp. * The Route appearing first in alphabetical order by \"{namespace}/{name}\". \n The rejected Route MUST raise an 'Accepted' condition with a status of 'False' in the corresponding RouteParentStatus. \n Support: Core"
items:
description: "Hostname is the fully qualified domain name of a network host. This matches the RFC 1123 definition of a hostname with 2 notable exceptions: \n 1. IPs are not allowed. 2. A hostname may be prefixed with a wildcard label (`*.`). The wildcard label must appear by itself as the first label. \n Hostname can be \"precise\" which is a domain name without the terminating dot of a network host (e.g. \"foo.example.com\") or \"wildcard\", which is a domain name prefixed with a single wildcard label (e.g. `*.example.com`). \n Note that as per RFC1035 and RFC1123, a *label* must consist of lower case alphanumeric characters or '-', and must start and end with an alphanumeric character. No other punctuation is allowed."
maxLength: 253
minLength: 1
pattern: ^(\*\.)?[a-z0-9]([-a-z0-9]*[a-z0-9])?(\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*$
type: string
maxItems: 16
type: array
parentRefs:
description: "ParentRefs references the resources (usually Gateways) that a Route wants to be attached to. Note that the referenced parent resource needs to allow this for the attachment to be complete. For Gateways, that means the Gateway needs to allow attachment from Routes of this kind and namespace. For Services, that means the Service must either be in the same namespace for a \"producer\" route, or the mesh implementation must support and allow \"consumer\" routes for the referenced Service. ReferenceGrant is not applicable for governing ParentRefs to Services - it is not possible to create a \"producer\" route for a Service in a different namespace from the Route. \n There are two kinds of parent resources with \"Core\" support: \n * Gateway (Gateway conformance profile) * Service (Mesh conformance profile, experimental, ClusterIP Services only) This API may be extended in the future to support additional kinds of parent resources. \n ParentRefs must be _distinct_. This means either that: \n * They select different objects. If this is the case, then parentRef entries are distinct. In terms of fields, this means that the multi-part key defined by `group`, `kind`, `namespace`, and `name` must be unique across all parentRef entries in the Route. * They do not select different objects, but for each optional field used, each ParentRef that selects the same object must set the same set of optional fields to different values. If one ParentRef sets a combination of optional fields, all must set the same combination. \n Some examples: \n * If one ParentRef sets `sectionName`, all ParentRefs referencing the same object must also set `sectionName`. * If one ParentRef sets `port`, all ParentRefs referencing the same object must also set `port`. * If one ParentRef sets `sectionName` and `port`, all ParentRefs referencing the same object must also set `sectionName` and `port`. \n It is possible to separately reference multiple distinct objects that may be collapsed by an implementation. For example, some implementations may choose to merge compatible Gateway Listeners together. If that is the case, the list of routes attached to those resources should also be merged. \n Note that for ParentRefs that cross namespace boundaries, there are specific rules. Cross-namespace references are only valid if they are explicitly allowed by something in the namespace they are referring to. For example, Gateway has the AllowedRoutes field, and ReferenceGrant provides a generic way to enable other kinds of cross-namespace reference. \n ParentRefs from a Route to a Service in the same namespace are \"producer\" routes, which apply default routing rules to inbound connections from any namespace to the Service. \n ParentRefs from a Route to a Service in a different namespace are \"consumer\" routes, and these routing rules are only applied to outbound connections originating from the same namespace as the Route, for which the intended destination of the connections are a Service targeted as a ParentRef of the Route. \n "
items:
description: "ParentReference identifies an API object (usually a Gateway) that can be considered a parent of this resource (usually a route). There are two kinds of parent resources with \"Core\" support: \n * Gateway (Gateway conformance profile) * Service (Mesh conformance profile, experimental, ClusterIP Services only) \n This API may be extended in the future to support additional kinds of parent resources. \n The API object must be valid in the cluster; the Group and Kind must be registered in the cluster for this reference to be valid."
properties:
group:
default: gateway.networking.k8s.io
description: "Group is the group of the referent. When unspecified, \"gateway.networking.k8s.io\" is inferred. To set the core API group (such as for a \"Service\" kind referent), Group must be explicitly set to \"\" (empty string). \n Support: Core"
maxLength: 253
pattern: ^$|^[a-z0-9]([-a-z0-9]*[a-z0-9])?(\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*$
type: string
kind:
default: Gateway
description: "Kind is kind of the referent. \n There are two kinds of parent resources with \"Core\" support: \n * Gateway (Gateway conformance profile) * Service (Mesh conformance profile, experimental, ClusterIP Services only) \n Support for other resources is Implementation-Specific."
maxLength: 63
minLength: 1
pattern: ^[a-zA-Z]([-a-zA-Z0-9]*[a-zA-Z0-9])?$
type: string
name:
description: "Name is the name of the referent. \n Support: Core"
maxLength: 253
minLength: 1
type: string
namespace:
description: "Namespace is the namespace of the referent. When unspecified, this refers to the local namespace of the Route. \n Note that there are specific rules for ParentRefs which cross namespace boundaries. Cross-namespace references are only valid if they are explicitly allowed by something in the namespace they are referring to. For example: Gateway has the AllowedRoutes field, and ReferenceGrant provides a generic way to enable any other kind of cross-namespace reference. \n ParentRefs from a Route to a Service in the same namespace are \"producer\" routes, which apply default routing rules to inbound connections from any namespace to the Service. \n ParentRefs from a Route to a Service in a different namespace are \"consumer\" routes, and these routing rules are only applied to outbound connections originating from the same namespace as the Route, for which the intended destination of the connections are a Service targeted as a ParentRef of the Route. \n Support: Core"
maxLength: 63
minLength: 1
pattern: ^[a-z0-9]([-a-z0-9]*[a-z0-9])?$
type: string
port:
description: "Port is the network port this Route targets. It can be interpreted differently based on the type of parent resource. \n When the parent resource is a Gateway, this targets all listeners listening on the specified port that also support this kind of Route(and select this Route). It's not recommended to set `Port` unless the networking behaviors specified in a Route must apply to a specific port as opposed to a listener(s) whose port(s) may be changed. When both Port and SectionName are specified, the name and port of the selected listener must match both specified values. \n When the parent resource is a Service, this targets a specific port in the Service spec. When both Port (experimental) and SectionName are specified, the name and port of the selected port must match both specified values. \n Implementations MAY choose to support other parent resources. Implementations supporting other types of parent resources MUST clearly document how/if Port is interpreted. \n For the purpose of status, an attachment is considered successful as long as the parent resource accepts it partially. For example, Gateway listeners can restrict which Routes can attach to them by Route kind, namespace, or hostname. If 1 of 2 Gateway listeners accept attachment from the referencing Route, the Route MUST be considered successfully attached. If no Gateway listeners accept attachment from this Route, the Route MUST be considered detached from the Gateway. \n Support: Extended \n "
format: int32
maximum: 65535
minimum: 1
type: integer
sectionName:
description: "SectionName is the name of a section within the target resource. In the following resources, SectionName is interpreted as the following: \n * Gateway: Listener Name. When both Port (experimental) and SectionName are specified, the name and port of the selected listener must match both specified values. * Service: Port Name. When both Port (experimental) and SectionName are specified, the name and port of the selected listener must match both specified values. Note that attaching Routes to Services as Parents is part of experimental Mesh support and is not supported for any other purpose. \n Implementations MAY choose to support attaching Routes to other resources. If that is the case, they MUST clearly document how SectionName is interpreted. \n When unspecified (empty string), this will reference the entire resource. For the purpose of status, an attachment is considered successful if at least one section in the parent resource accepts it. For example, Gateway listeners can restrict which Routes can attach to them by Route kind, namespace, or hostname. If 1 of 2 Gateway listeners accept attachment from the referencing Route, the Route MUST be considered successfully attached. If no Gateway listeners accept attachment from this Route, the Route MUST be considered detached from the Gateway. \n Support: Core"
maxLength: 253
minLength: 1
pattern: ^[a-z0-9]([-a-z0-9]*[a-z0-9])?(\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*$
type: string
required:
- name
type: object
maxItems: 32
type: array
x-kubernetes-validations:
- message: sectionName or port must be specified when parentRefs includes 2 or more references to the same parent
rule: 'self.all(p1, self.all(p2, p1.group == p2.group && p1.kind == p2.kind && p1.name == p2.name && (((!has(p1.__namespace__) || p1.__namespace__ == '''') && (!has(p2.__namespace__) || p2.__namespace__ == '''')) || (has(p1.__namespace__) && has(p2.__namespace__) && p1.__namespace__ == p2.__namespace__)) ? ((!has(p1.sectionName) || p1.sectionName == '''') == (!has(p2.sectionName) || p2.sectionName == '''') && (!has(p1.port) || p1.port == 0) == (!has(p2.port) || p2.port == 0)): true))'
- message: sectionName or port must be unique when parentRefs includes 2 or more references to the same parent
rule: self.all(p1, self.exists_one(p2, p1.group == p2.group && p1.kind == p2.kind && p1.name == p2.name && (((!has(p1.__namespace__) || p1.__namespace__ == '') && (!has(p2.__namespace__) || p2.__namespace__ == '')) || (has(p1.__namespace__) && has(p2.__namespace__) && p1.__namespace__ == p2.__namespace__ )) && (((!has(p1.sectionName) || p1.sectionName == '') && (!has(p2.sectionName) || p2.sectionName == '')) || ( has(p1.sectionName) && has(p2.sectionName) && p1.sectionName == p2.sectionName)) && (((!has(p1.port) || p1.port == 0) && (!has(p2.port) || p2.port == 0)) || (has(p1.port) && has(p2.port) && p1.port == p2.port))))
rules:
description: Rules are a list of GRPC matchers, filters and actions.
items:
description: GRPCRouteRule defines the semantics for matching a gRPC request based on conditions (matches), processing it (filters), and forwarding the request to an API object (backendRefs).
properties:
backendRefs:
description: "BackendRefs defines the backend(s) where matching requests should be sent. \n Failure behavior here depends on how many BackendRefs are specified and how many are invalid. \n If *all* entries in BackendRefs are invalid, and there are also no filters specified in this route rule, *all* traffic which matches this rule MUST receive an `UNAVAILABLE` status. \n See the GRPCBackendRef definition for the rules about what makes a single GRPCBackendRef invalid. \n When a GRPCBackendRef is invalid, `UNAVAILABLE` statuses MUST be returned for requests that would have otherwise been routed to an invalid backend. If multiple backends are specified, and some are invalid, the proportion of requests that would otherwise have been routed to an invalid backend MUST receive an `UNAVAILABLE` status. \n For example, if two backends are specified with equal weights, and one is invalid, 50 percent of traffic MUST receive an `UNAVAILABLE` status. Implementations may choose how that 50 percent is determined. \n Support: Core for Kubernetes Service \n Support: Implementation-specific for any other resource \n Support for weight: Core"
items:
description: "GRPCBackendRef defines how a GRPCRoute forwards a gRPC request. \n Note that when a namespace different than the local namespace is specified, a ReferenceGrant object is required in the referent namespace to allow that namespace's owner to accept the reference. See the ReferenceGrant documentation for details. \n <gateway:experimental:description> \n When the BackendRef points to a Kubernetes Service, implementations SHOULD honor the appProtocol field if it is set for the target Service Port. \n Implementations supporting appProtocol SHOULD recognize the Kubernetes Standard Application Protocols defined in KEP-3726. \n If a Service appProtocol isn't specified, an implementation MAY infer the backend protocol through its own means. Implementations MAY infer the protocol from the Route type referring to the backend Service. \n If a Route is not able to send traffic to the backend using the specified protocol then the backend is considered invalid. Implementations MUST set the \"ResolvedRefs\" condition to \"False\" with the \"UnsupportedProtocol\" reason. \n </gateway:experimental:description>"
properties:
filters:
description: "Filters defined at this level MUST be executed if and only if the request is being forwarded to the backend defined here. \n Support: Implementation-specific (For broader support of filters, use the Filters field in GRPCRouteRule.)"
items:
description: GRPCRouteFilter defines processing steps that must be completed during the request or response lifecycle. GRPCRouteFilters are meant as an extension point to express processing that may be done in Gateway implementations. Some examples include request or response modification, implementing authentication strategies, rate-limiting, and traffic shaping. API guarantee/conformance is defined based on the type of the filter.
properties:
extensionRef:
description: "ExtensionRef is an optional, implementation-specific extension to the \"filter\" behavior. For example, resource \"myroutefilter\" in group \"networking.example.net\"). ExtensionRef MUST NOT be used for core and extended filters. \n Support: Implementation-specific \n This filter can be used multiple times within the same rule."
properties:
group:
description: Group is the group of the referent. For example, "gateway.networking.k8s.io". When unspecified or empty string, core API group is inferred.
maxLength: 253
pattern: ^$|^[a-z0-9]([-a-z0-9]*[a-z0-9])?(\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*$
type: string
kind:
description: Kind is kind of the referent. For example "HTTPRoute" or "Service".
maxLength: 63
minLength: 1
pattern: ^[a-zA-Z]([-a-zA-Z0-9]*[a-zA-Z0-9])?$
type: string
name:
description: Name is the name of the referent.
maxLength: 253
minLength: 1
type: string
required:
- group
- kind
- name
type: object
requestHeaderModifier:
description: "RequestHeaderModifier defines a schema for a filter that modifies request headers. \n Support: Core"
properties:
add:
description: "Add adds the given header(s) (name, value) to the request before the action. It appends to any existing values associated with the header name. \n Input: GET /foo HTTP/1.1 my-header: foo \n Config: add: - name: \"my-header\" value: \"bar,baz\" \n Output: GET /foo HTTP/1.1 my-header: foo,bar,baz"
items:
description: HTTPHeader represents an HTTP Header name and value as defined by RFC 7230.
properties:
name:
description: "Name is the name of the HTTP Header to be matched. Name matching MUST be case insensitive. (See https://tools.ietf.org/html/rfc7230#section-3.2). \n If multiple entries specify equivalent header names, the first entry with an equivalent name MUST be considered for a match. Subsequent entries with an equivalent header name MUST be ignored. Due to the case-insensitivity of header names, \"foo\" and \"Foo\" are considered equivalent."
maxLength: 256
minLength: 1
pattern: ^[A-Za-z0-9!#$%&'*+\-.^_\x60|~]+$
type: string
value:
description: Value is the value of HTTP Header to be matched.
maxLength: 4096
minLength: 1
type: string
required:
- name
- value
type: object
maxItems: 16
type: array
x-kubernetes-list-map-keys:
- name
x-kubernetes-list-type: map
remove:
description: "Remove the given header(s) from the HTTP request before the action. The value of Remove is a list of HTTP header names. Note that the header names are case-insensitive (see https://datatracker.ietf.org/doc/html/rfc2616#section-4.2). \n Input: GET /foo HTTP/1.1 my-header1: foo my-header2: bar my-header3: baz \n Config: remove: [\"my-header1\", \"my-header3\"] \n Output: GET /foo HTTP/1.1 my-header2: bar"
items:
type: string
maxItems: 16
type: array
x-kubernetes-list-type: set
set:
description: "Set overwrites the request with the given header (name, value) before the action. \n Input: GET /foo HTTP/1.1 my-header: foo \n Config: set: - name: \"my-header\" value: \"bar\" \n Output: GET /foo HTTP/1.1 my-header: bar"
items:
description: HTTPHeader represents an HTTP Header name and value as defined by RFC 7230.
properties:
name:
description: "Name is the name of the HTTP Header to be matched. Name matching MUST be case insensitive. (See https://tools.ietf.org/html/rfc7230#section-3.2). \n If multiple entries specify equivalent header names, the first entry with an equivalent name MUST be considered for a match. Subsequent entries with an equivalent header name MUST be ignored. Due to the case-insensitivity of header names, \"foo\" and \"Foo\" are considered equivalent."
maxLength: 256
minLength: 1
pattern: ^[A-Za-z0-9!#$%&'*+\-.^_\x60|~]+$
type: string
value:
description: Value is the value of HTTP Header to be matched.
maxLength: 4096
minLength: 1
type: string
required:
- name
- value
type: object
maxItems: 16
type: array
x-kubernetes-list-map-keys:
- name
x-kubernetes-list-type: map
type: object
requestMirror:
description: "RequestMirror defines a schema for a filter that mirrors requests. Requests are sent to the specified destination, but responses from that destination are ignored. \n This filter can be used multiple times within the same rule. Note that not all implementations will be able to support mirroring to multiple backends. \n Support: Extended"
properties:
backendRef:
description: "BackendRef references a resource where mirrored requests are sent. \n Mirrored requests must be sent only to a single destination endpoint within this BackendRef, irrespective of how many endpoints are present within this BackendRef. \n If the referent cannot be found, this BackendRef is invalid and must be dropped from the Gateway. The controller must ensure the \"ResolvedRefs\" condition on the Route status is set to `status: False` and not configure this backend in the underlying implementation. \n If there is a cross-namespace reference to an *existing* object that is not allowed by a ReferenceGrant, the controller must ensure the \"ResolvedRefs\" condition on the Route is set to `status: False`, with the \"RefNotPermitted\" reason and not configure this backend in the underlying implementation. \n In either error case, the Message of the `ResolvedRefs` Condition should be used to provide more detail about the problem. \n Support: Extended for Kubernetes Service \n Support: Implementation-specific for any other resource"
properties:
group:
default: ""
description: Group is the group of the referent. For example, "gateway.networking.k8s.io". When unspecified or empty string, core API group is inferred.
maxLength: 253
pattern: ^$|^[a-z0-9]([-a-z0-9]*[a-z0-9])?(\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*$
type: string
kind:
default: Service
description: "Kind is the Kubernetes resource kind of the referent. For example \"Service\". \n Defaults to \"Service\" when not specified. \n ExternalName services can refer to CNAME DNS records that may live outside of the cluster and as such are difficult to reason about in terms of conformance. They also may not be safe to forward to (see CVE-2021-25740 for more information). Implementations SHOULD NOT support ExternalName Services. \n Support: Core (Services with a type other than ExternalName) \n Support: Implementation-specific (Services with type ExternalName)"
maxLength: 63
minLength: 1
pattern: ^[a-zA-Z]([-a-zA-Z0-9]*[a-zA-Z0-9])?$
type: string
name:
description: Name is the name of the referent.
maxLength: 253
minLength: 1
type: string
namespace:
description: "Namespace is the namespace of the backend. When unspecified, the local namespace is inferred. \n Note that when a namespace different than the local namespace is specified, a ReferenceGrant object is required in the referent namespace to allow that namespace's owner to accept the reference. See the ReferenceGrant documentation for details. \n Support: Core"
maxLength: 63
minLength: 1
pattern: ^[a-z0-9]([-a-z0-9]*[a-z0-9])?$
type: string
port:
description: Port specifies the destination port number to use for this resource. Port is required when the referent is a Kubernetes Service. In this case, the port number is the service port number, not the target port. For other resources, destination port might be derived from the referent resource or this field.
format: int32
maximum: 65535
minimum: 1
type: integer
required:
- name
type: object
x-kubernetes-validations:
- message: Must have port for Service reference
rule: '(size(self.group) == 0 && self.kind == ''Service'') ? has(self.port) : true'
required:
- backendRef
type: object
responseHeaderModifier:
description: "ResponseHeaderModifier defines a schema for a filter that modifies response headers. \n Support: Extended"
properties:
add:
description: "Add adds the given header(s) (name, value) to the request before the action. It appends to any existing values associated with the header name. \n Input: GET /foo HTTP/1.1 my-header: foo \n Config: add: - name: \"my-header\" value: \"bar,baz\" \n Output: GET /foo HTTP/1.1 my-header: foo,bar,baz"
items:
description: HTTPHeader represents an HTTP Header name and value as defined by RFC 7230.
properties:
name:
description: "Name is the name of the HTTP Header to be matched. Name matching MUST be case insensitive. (See https://tools.ietf.org/html/rfc7230#section-3.2). \n If multiple entries specify equivalent header names, the first entry with an equivalent name MUST be considered for a match. Subsequent entries with an equivalent header name MUST be ignored. Due to the case-insensitivity of header names, \"foo\" and \"Foo\" are considered equivalent."
maxLength: 256
minLength: 1
pattern: ^[A-Za-z0-9!#$%&'*+\-.^_\x60|~]+$
type: string
value:
description: Value is the value of HTTP Header to be matched.
maxLength: 4096
minLength: 1
type: string
required:
- name
- value
type: object
maxItems: 16
type: array
x-kubernetes-list-map-keys:
- name
x-kubernetes-list-type: map
remove:
description: "Remove the given header(s) from the HTTP request before the action. The value of Remove is a list of HTTP header names. Note that the header names are case-insensitive (see https://datatracker.ietf.org/doc/html/rfc2616#section-4.2). \n Input: GET /foo HTTP/1.1 my-header1: foo my-header2: bar my-header3: baz \n Config: remove: [\"my-header1\", \"my-header3\"] \n Output: GET /foo HTTP/1.1 my-header2: bar"
items:
type: string
maxItems: 16
type: array
x-kubernetes-list-type: set
set:
description: "Set overwrites the request with the given header (name, value) before the action. \n Input: GET /foo HTTP/1.1 my-header: foo \n Config: set: - name: \"my-header\" value: \"bar\" \n Output: GET /foo HTTP/1.1 my-header: bar"
items:
description: HTTPHeader represents an HTTP Header name and value as defined by RFC 7230.
properties:
name:
description: "Name is the name of the HTTP Header to be matched. Name matching MUST be case insensitive. (See https://tools.ietf.org/html/rfc7230#section-3.2). \n If multiple entries specify equivalent header names, the first entry with an equivalent name MUST be considered for a match. Subsequent entries with an equivalent header name MUST be ignored. Due to the case-insensitivity of header names, \"foo\" and \"Foo\" are considered equivalent."
maxLength: 256
minLength: 1
pattern: ^[A-Za-z0-9!#$%&'*+\-.^_\x60|~]+$
type: string
value:
description: Value is the value of HTTP Header to be matched.
maxLength: 4096
minLength: 1
type: string
required:
- name
- value
type: object
maxItems: 16
type: array
x-kubernetes-list-map-keys:
- name
x-kubernetes-list-type: map
type: object
type:
description: "Type identifies the type of filter to apply. As with other API fields, types are classified into three conformance levels: \n - Core: Filter types and their corresponding configuration defined by \"Support: Core\" in this package, e.g. \"RequestHeaderModifier\". All implementations supporting GRPCRoute MUST support core filters. \n - Extended: Filter types and their corresponding configuration defined by \"Support: Extended\" in this package, e.g. \"RequestMirror\". Implementers are encouraged to support extended filters. \n - Implementation-specific: Filters that are defined and supported by specific vendors. In the future, filters showing convergence in behavior across multiple implementations will be considered for inclusion in extended or core conformance levels. Filter-specific configuration for such filters is specified using the ExtensionRef field. `Type` MUST be set to \"ExtensionRef\" for custom filters. \n Implementers are encouraged to define custom implementation types to extend the core API with implementation-specific behavior. \n If a reference to a custom filter type cannot be resolved, the filter MUST NOT be skipped. Instead, requests that would have been processed by that filter MUST receive a HTTP error response. \n "
enum:
- ResponseHeaderModifier
- RequestHeaderModifier
- RequestMirror
- ExtensionRef
type: string
required:
- type
type: object
x-kubernetes-validations:
- message: filter.requestHeaderModifier must be nil if the filter.type is not RequestHeaderModifier
rule: '!(has(self.requestHeaderModifier) && self.type != ''RequestHeaderModifier'')'
- message: filter.requestHeaderModifier must be specified for RequestHeaderModifier filter.type
rule: '!(!has(self.requestHeaderModifier) && self.type == ''RequestHeaderModifier'')'
- message: filter.responseHeaderModifier must be nil if the filter.type is not ResponseHeaderModifier
rule: '!(has(self.responseHeaderModifier) && self.type != ''ResponseHeaderModifier'')'
- message: filter.responseHeaderModifier must be specified for ResponseHeaderModifier filter.type
rule: '!(!has(self.responseHeaderModifier) && self.type == ''ResponseHeaderModifier'')'
- message: filter.requestMirror must be nil if the filter.type is not RequestMirror
rule: '!(has(self.requestMirror) && self.type != ''RequestMirror'')'
- message: filter.requestMirror must be specified for RequestMirror filter.type
rule: '!(!has(self.requestMirror) && self.type == ''RequestMirror'')'
- message: filter.extensionRef must be nil if the filter.type is not ExtensionRef
rule: '!(has(self.extensionRef) && self.type != ''ExtensionRef'')'
- message: filter.extensionRef must be specified for ExtensionRef filter.type
rule: '!(!has(self.extensionRef) && self.type == ''ExtensionRef'')'
maxItems: 16
type: array
x-kubernetes-validations:
- message: RequestHeaderModifier filter cannot be repeated
rule: self.filter(f, f.type == 'RequestHeaderModifier').size() <= 1
- message: ResponseHeaderModifier filter cannot be repeated
rule: self.filter(f, f.type == 'ResponseHeaderModifier').size() <= 1
group:
default: ""
description: Group is the group of the referent. For example, "gateway.networking.k8s.io". When unspecified or empty string, core API group is inferred.
maxLength: 253
pattern: ^$|^[a-z0-9]([-a-z0-9]*[a-z0-9])?(\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*$
type: string
kind:
default: Service
description: "Kind is the Kubernetes resource kind of the referent. For example \"Service\". \n Defaults to \"Service\" when not specified. \n ExternalName services can refer to CNAME DNS records that may live outside of the cluster and as such are difficult to reason about in terms of conformance. They also may not be safe to forward to (see CVE-2021-25740 for more information). Implementations SHOULD NOT support ExternalName Services. \n Support: Core (Services with a type other than ExternalName) \n Support: Implementation-specific (Services with type ExternalName)"
maxLength: 63
minLength: 1
pattern: ^[a-zA-Z]([-a-zA-Z0-9]*[a-zA-Z0-9])?$
type: string
name:
description: Name is the name of the referent.
maxLength: 253
minLength: 1
type: string
namespace:
description: "Namespace is the namespace of the backend. When unspecified, the local namespace is inferred. \n Note that when a namespace different than the local namespace is specified, a ReferenceGrant object is required in the referent namespace to allow that namespace's owner to accept the reference. See the ReferenceGrant documentation for details. \n Support: Core"
maxLength: 63
minLength: 1
pattern: ^[a-z0-9]([-a-z0-9]*[a-z0-9])?$
type: string
port:
description: Port specifies the destination port number to use for this resource. Port is required when the referent is a Kubernetes Service. In this case, the port number is the service port number, not the target port. For other resources, destination port might be derived from the referent resource or this field.
format: int32
maximum: 65535
minimum: 1
type: integer
weight:
default: 1
description: "Weight specifies the proportion of requests forwarded to the referenced backend. This is computed as weight/(sum of all weights in this BackendRefs list). For non-zero values, there may be some epsilon from the exact proportion defined here depending on the precision an implementation supports. Weight is not a percentage and the sum of weights does not need to equal 100. \n If only one backend is specified and it has a weight greater than 0, 100% of the traffic is forwarded to that backend. If weight is set to 0, no traffic should be forwarded for this entry. If unspecified, weight defaults to 1. \n Support for this field varies based on the context where used."
format: int32
maximum: 1000000
minimum: 0
type: integer
required:
- name
type: object
x-kubernetes-validations:
- message: Must have port for Service reference
rule: '(size(self.group) == 0 && self.kind == ''Service'') ? has(self.port) : true'
maxItems: 16
type: array
filters:
description: "Filters define the filters that are applied to requests that match this rule. \n The effects of ordering of multiple behaviors are currently unspecified. This can change in the future based on feedback during the alpha stage. \n Conformance-levels at this level are defined based on the type of filter: \n - ALL core filters MUST be supported by all implementations that support GRPCRoute. - Implementers are encouraged to support extended filters. - Implementation-specific custom filters have no API guarantees across implementations. \n Specifying the same filter multiple times is not supported unless explicitly indicated in the filter. \n If an implementation can not support a combination of filters, it must clearly document that limitation. In cases where incompatible or unsupported filters are specified and cause the `Accepted` condition to be set to status `False`, implementations may use the `IncompatibleFilters` reason to specify this configuration error. \n Support: Core"
items:
description: GRPCRouteFilter defines processing steps that must be completed during the request or response lifecycle. GRPCRouteFilters are meant as an extension point to express processing that may be done in Gateway implementations. Some examples include request or response modification, implementing authentication strategies, rate-limiting, and traffic shaping. API guarantee/conformance is defined based on the type of the filter.
properties:
extensionRef:
description: "ExtensionRef is an optional, implementation-specific extension to the \"filter\" behavior. For example, resource \"myroutefilter\" in group \"networking.example.net\"). ExtensionRef MUST NOT be used for core and extended filters. \n Support: Implementation-specific \n This filter can be used multiple times within the same rule."
properties:
group:
description: Group is the group of the referent. For example, "gateway.networking.k8s.io". When unspecified or empty string, core API group is inferred.
maxLength: 253
pattern: ^$|^[a-z0-9]([-a-z0-9]*[a-z0-9])?(\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*$
type: string
kind:
description: Kind is kind of the referent. For example "HTTPRoute" or "Service".
maxLength: 63
minLength: 1
pattern: ^[a-zA-Z]([-a-zA-Z0-9]*[a-zA-Z0-9])?$
type: string
name:
description: Name is the name of the referent.
maxLength: 253
minLength: 1
type: string
required:
- group
- kind
- name
type: object
requestHeaderModifier:
description: "RequestHeaderModifier defines a schema for a filter that modifies request headers. \n Support: Core"
properties:
add:
description: "Add adds the given header(s) (name, value) to the request before the action. It appends to any existing values associated with the header name. \n Input: GET /foo HTTP/1.1 my-header: foo \n Config: add: - name: \"my-header\" value: \"bar,baz\" \n Output: GET /foo HTTP/1.1 my-header: foo,bar,baz"
items:
description: HTTPHeader represents an HTTP Header name and value as defined by RFC 7230.
properties:
name:
description: "Name is the name of the HTTP Header to be matched. Name matching MUST be case insensitive. (See https://tools.ietf.org/html/rfc7230#section-3.2). \n If multiple entries specify equivalent header names, the first entry with an equivalent name MUST be considered for a match. Subsequent entries with an equivalent header name MUST be ignored. Due to the case-insensitivity of header names, \"foo\" and \"Foo\" are considered equivalent."
maxLength: 256
minLength: 1
pattern: ^[A-Za-z0-9!#$%&'*+\-.^_\x60|~]+$
type: string
value:
description: Value is the value of HTTP Header to be matched.
maxLength: 4096
minLength: 1
type: string
required:
- name
- value
type: object
maxItems: 16
type: array
x-kubernetes-list-map-keys:
- name
x-kubernetes-list-type: map
remove:
description: "Remove the given header(s) from the HTTP request before the action. The value of Remove is a list of HTTP header names. Note that the header names are case-insensitive (see https://datatracker.ietf.org/doc/html/rfc2616#section-4.2). \n Input: GET /foo HTTP/1.1 my-header1: foo my-header2: bar my-header3: baz \n Config: remove: [\"my-header1\", \"my-header3\"] \n Output: GET /foo HTTP/1.1 my-header2: bar"
items:
type: string
maxItems: 16
type: array
x-kubernetes-list-type: set
set:
description: "Set overwrites the request with the given header (name, value) before the action. \n Input: GET /foo HTTP/1.1 my-header: foo \n Config: set: - name: \"my-header\" value: \"bar\" \n Output: GET /foo HTTP/1.1 my-header: bar"
items:
description: HTTPHeader represents an HTTP Header name and value as defined by RFC 7230.
properties:
name:
description: "Name is the name of the HTTP Header to be matched. Name matching MUST be case insensitive. (See https://tools.ietf.org/html/rfc7230#section-3.2). \n If multiple entries specify equivalent header names, the first entry with an equivalent name MUST be considered for a match. Subsequent entries with an equivalent header name MUST be ignored. Due to the case-insensitivity of header names, \"foo\" and \"Foo\" are considered equivalent."
maxLength: 256
minLength: 1
pattern: ^[A-Za-z0-9!#$%&'*+\-.^_\x60|~]+$
type: string
value:
description: Value is the value of HTTP Header to be matched.
maxLength: 4096
minLength: 1
type: string
required:
- name
- value
type: object
maxItems: 16
type: array
x-kubernetes-list-map-keys:
- name
x-kubernetes-list-type: map
type: object
requestMirror:
description: "RequestMirror defines a schema for a filter that mirrors requests. Requests are sent to the specified destination, but responses from that destination are ignored. \n This filter can be used multiple times within the same rule. Note that not all implementations will be able to support mirroring to multiple backends. \n Support: Extended"
properties:
backendRef:
description: "BackendRef references a resource where mirrored requests are sent. \n Mirrored requests must be sent only to a single destination endpoint within this BackendRef, irrespective of how many endpoints are present within this BackendRef. \n If the referent cannot be found, this BackendRef is invalid and must be dropped from the Gateway. The controller must ensure the \"ResolvedRefs\" condition on the Route status is set to `status: False` and not configure this backend in the underlying implementation. \n If there is a cross-namespace reference to an *existing* object that is not allowed by a ReferenceGrant, the controller must ensure the \"ResolvedRefs\" condition on the Route is set to `status: False`, with the \"RefNotPermitted\" reason and not configure this backend in the underlying implementation. \n In either error case, the Message of the `ResolvedRefs` Condition should be used to provide more detail about the problem. \n Support: Extended for Kubernetes Service \n Support: Implementation-specific for any other resource"
properties:
group:
default: ""
description: Group is the group of the referent. For example, "gateway.networking.k8s.io". When unspecified or empty string, core API group is inferred.
maxLength: 253
pattern: ^$|^[a-z0-9]([-a-z0-9]*[a-z0-9])?(\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*$
type: string
kind:
default: Service
description: "Kind is the Kubernetes resource kind of the referent. For example \"Service\". \n Defaults to \"Service\" when not specified. \n ExternalName services can refer to CNAME DNS records that may live outside of the cluster and as such are difficult to reason about in terms of conformance. They also may not be safe to forward to (see CVE-2021-25740 for more information). Implementations SHOULD NOT support ExternalName Services. \n Support: Core (Services with a type other than ExternalName) \n Support: Implementation-specific (Services with type ExternalName)"
maxLength: 63
minLength: 1
pattern: ^[a-zA-Z]([-a-zA-Z0-9]*[a-zA-Z0-9])?$
type: string
name:
description: Name is the name of the referent.
maxLength: 253
minLength: 1
type: string
namespace:
description: "Namespace is the namespace of the backend. When unspecified, the local namespace is inferred. \n Note that when a namespace different than the local namespace is specified, a ReferenceGrant object is required in the referent namespace to allow that namespace's owner to accept the reference. See the ReferenceGrant documentation for details. \n Support: Core"
maxLength: 63
minLength: 1
pattern: ^[a-z0-9]([-a-z0-9]*[a-z0-9])?$
type: string
port:
description: Port specifies the destination port number to use for this resource. Port is required when the referent is a Kubernetes Service. In this case, the port number is the service port number, not the target port. For other resources, destination port might be derived from the referent resource or this field.
format: int32
maximum: 65535
minimum: 1
type: integer
required:
- name
type: object
x-kubernetes-validations:
- message: Must have port for Service reference
rule: '(size(self.group) == 0 && self.kind == ''Service'') ? has(self.port) : true'
required:
- backendRef
type: object
responseHeaderModifier:
description: "ResponseHeaderModifier defines a schema for a filter that modifies response headers. \n Support: Extended"
properties:
add:
description: "Add adds the given header(s) (name, value) to the request before the action. It appends to any existing values associated with the header name. \n Input: GET /foo HTTP/1.1 my-header: foo \n Config: add: - name: \"my-header\" value: \"bar,baz\" \n Output: GET /foo HTTP/1.1 my-header: foo,bar,baz"
items:
description: HTTPHeader represents an HTTP Header name and value as defined by RFC 7230.
properties:
name:
description: "Name is the name of the HTTP Header to be matched. Name matching MUST be case insensitive. (See https://tools.ietf.org/html/rfc7230#section-3.2). \n If multiple entries specify equivalent header names, the first entry with an equivalent name MUST be considered for a match. Subsequent entries with an equivalent header name MUST be ignored. Due to the case-insensitivity of header names, \"foo\" and \"Foo\" are considered equivalent."
maxLength: 256
minLength: 1
pattern: ^[A-Za-z0-9!#$%&'*+\-.^_\x60|~]+$
type: string
value:
description: Value is the value of HTTP Header to be matched.
maxLength: 4096
minLength: 1
type: string
required:
- name
- value
type: object
maxItems: 16
type: array
x-kubernetes-list-map-keys:
- name
x-kubernetes-list-type: map
remove:
description: "Remove the given header(s) from the HTTP request before the action. The value of Remove is a list of HTTP header names. Note that the header names are case-insensitive (see https://datatracker.ietf.org/doc/html/rfc2616#section-4.2). \n Input: GET /foo HTTP/1.1 my-header1: foo my-header2: bar my-header3: baz \n Config: remove: [\"my-header1\", \"my-header3\"] \n Output: GET /foo HTTP/1.1 my-header2: bar"
items:
type: string
maxItems: 16
type: array
x-kubernetes-list-type: set
set:
description: "Set overwrites the request with the given header (name, value) before the action. \n Input: GET /foo HTTP/1.1 my-header: foo \n Config: set: - name: \"my-header\" value: \"bar\" \n Output: GET /foo HTTP/1.1 my-header: bar"
items:
description: HTTPHeader represents an HTTP Header name and value as defined by RFC 7230.
properties:
name:
description: "Name is the name of the HTTP Header to be matched. Name matching MUST be case insensitive. (See https://tools.ietf.org/html/rfc7230#section-3.2). \n If multiple entries specify equivalent header names, the first entry with an equivalent name MUST be considered for a match. Subsequent entries with an equivalent header name MUST be ignored. Due to the case-insensitivity of header names, \"foo\" and \"Foo\" are considered equivalent."
maxLength: 256
minLength: 1
pattern: ^[A-Za-z0-9!#$%&'*+\-.^_\x60|~]+$
type: string
value:
description: Value is the value of HTTP Header to be matched.
maxLength: 4096
minLength: 1
type: string
required:
- name
- value
type: object
maxItems: 16
type: array
x-kubernetes-list-map-keys:
- name
x-kubernetes-list-type: map
type: object
type:
description: "Type identifies the type of filter to apply. As with other API fields, types are classified into three conformance levels: \n - Core: Filter types and their corresponding configuration defined by \"Support: Core\" in this package, e.g. \"RequestHeaderModifier\". All implementations supporting GRPCRoute MUST support core filters. \n - Extended: Filter types and their corresponding configuration defined by \"Support: Extended\" in this package, e.g. \"RequestMirror\". Implementers are encouraged to support extended filters. \n - Implementation-specific: Filters that are defined and supported by specific vendors. In the future, filters showing convergence in behavior across multiple implementations will be considered for inclusion in extended or core conformance levels. Filter-specific configuration for such filters is specified using the ExtensionRef field. `Type` MUST be set to \"ExtensionRef\" for custom filters. \n Implementers are encouraged to define custom implementation types to extend the core API with implementation-specific behavior. \n If a reference to a custom filter type cannot be resolved, the filter MUST NOT be skipped. Instead, requests that would have been processed by that filter MUST receive a HTTP error response. \n "
enum:
- ResponseHeaderModifier
- RequestHeaderModifier
- RequestMirror
- ExtensionRef
type: string
required:
- type
type: object
x-kubernetes-validations:
- message: filter.requestHeaderModifier must be nil if the filter.type is not RequestHeaderModifier
rule: '!(has(self.requestHeaderModifier) && self.type != ''RequestHeaderModifier'')'
- message: filter.requestHeaderModifier must be specified for RequestHeaderModifier filter.type
rule: '!(!has(self.requestHeaderModifier) && self.type == ''RequestHeaderModifier'')'
- message: filter.responseHeaderModifier must be nil if the filter.type is not ResponseHeaderModifier
rule: '!(has(self.responseHeaderModifier) && self.type != ''ResponseHeaderModifier'')'
- message: filter.responseHeaderModifier must be specified for ResponseHeaderModifier filter.type
rule: '!(!has(self.responseHeaderModifier) && self.type == ''ResponseHeaderModifier'')'
- message: filter.requestMirror must be nil if the filter.type is not RequestMirror
rule: '!(has(self.requestMirror) && self.type != ''RequestMirror'')'
- message: filter.requestMirror must be specified for RequestMirror filter.type
rule: '!(!has(self.requestMirror) && self.type == ''RequestMirror'')'
- message: filter.extensionRef must be nil if the filter.type is not ExtensionRef
rule: '!(has(self.extensionRef) && self.type != ''ExtensionRef'')'
- message: filter.extensionRef must be specified for ExtensionRef filter.type
rule: '!(!has(self.extensionRef) && self.type == ''ExtensionRef'')'
maxItems: 16
type: array
x-kubernetes-validations:
- message: RequestHeaderModifier filter cannot be repeated
rule: self.filter(f, f.type == 'RequestHeaderModifier').size() <= 1
- message: ResponseHeaderModifier filter cannot be repeated
rule: self.filter(f, f.type == 'ResponseHeaderModifier').size() <= 1
matches:
description: "Matches define conditions used for matching the rule against incoming gRPC requests. Each match is independent, i.e. this rule will be matched if **any** one of the matches is satisfied. \n For example, take the following matches configuration: \n ``` matches: - method: service: foo.bar headers: values: version: 2 - method: service: foo.bar.v2 ``` \n For a request to match against this rule, it MUST satisfy EITHER of the two conditions: \n - service of foo.bar AND contains the header `version: 2` - service of foo.bar.v2 \n See the documentation for GRPCRouteMatch on how to specify multiple match conditions to be ANDed together. \n If no matches are specified, the implementation MUST match every gRPC request. \n Proxy or Load Balancer routing configuration generated from GRPCRoutes MUST prioritize rules based on the following criteria, continuing on ties. Merging MUST not be done between GRPCRoutes and HTTPRoutes. Precedence MUST be given to the rule with the largest number of: \n * Characters in a matching non-wildcard hostname. * Characters in a matching hostname. * Characters in a matching service. * Characters in a matching method. * Header matches. \n If ties still exist across multiple Routes, matching precedence MUST be determined in order of the following criteria, continuing on ties: \n * The oldest Route based on creation timestamp. * The Route appearing first in alphabetical order by \"{namespace}/{name}\". \n If ties still exist within the Route that has been given precedence, matching precedence MUST be granted to the first matching rule meeting the above criteria."
items:
description: "GRPCRouteMatch defines the predicate used to match requests to a given action. Multiple match types are ANDed together, i.e. the match will evaluate to true only if all conditions are satisfied. \n For example, the match below will match a gRPC request only if its service is `foo` AND it contains the `version: v1` header: \n ``` matches: - method: type: Exact service: \"foo\" headers: - name: \"version\" value \"v1\" \n ```"
properties:
headers:
description: Headers specifies gRPC request header matchers. Multiple match values are ANDed together, meaning, a request MUST match all the specified headers to select the route.
items:
description: GRPCHeaderMatch describes how to select a gRPC route by matching gRPC request headers.
properties:
name:
description: "Name is the name of the gRPC Header to be matched. \n If multiple entries specify equivalent header names, only the first entry with an equivalent name MUST be considered for a match. Subsequent entries with an equivalent header name MUST be ignored. Due to the case-insensitivity of header names, \"foo\" and \"Foo\" are considered equivalent."
maxLength: 256
minLength: 1
pattern: ^[A-Za-z0-9!#$%&'*+\-.^_\x60|~]+$
type: string
type:
default: Exact
description: Type specifies how to match against the value of the header.
enum:
- Exact
- RegularExpression
type: string
value:
description: Value is the value of the gRPC Header to be matched.
maxLength: 4096
minLength: 1
type: string
required:
- name
- value
type: object
maxItems: 16
type: array
x-kubernetes-list-map-keys:
- name
x-kubernetes-list-type: map
method:
description: Method specifies a gRPC request service/method matcher. If this field is not specified, all services and methods will match.
properties:
method:
description: "Value of the method to match against. If left empty or omitted, will match all services. \n At least one of Service and Method MUST be a non-empty string."
maxLength: 1024
type: string
service:
description: "Value of the service to match against. If left empty or omitted, will match any service. \n At least one of Service and Method MUST be a non-empty string."
maxLength: 1024
type: string
type:
default: Exact
description: "Type specifies how to match against the service and/or method. Support: Core (Exact with service and method specified) \n Support: Implementation-specific (Exact with method specified but no service specified) \n Support: Implementation-specific (RegularExpression)"
enum:
- Exact
- RegularExpression
type: string
type: object
x-kubernetes-validations:
- message: One or both of 'service' or 'method' must be specified
rule: 'has(self.type) ? has(self.service) || has(self.method) : true'
- message: service must only contain valid characters (matching ^(?i)\.?[a-z_][a-z_0-9]*(\.[a-z_][a-z_0-9]*)*$)
rule: '(!has(self.type) || self.type == ''Exact'') && has(self.service) ? self.service.matches(r"""^(?i)\.?[a-z_][a-z_0-9]*(\.[a-z_][a-z_0-9]*)*$"""): true'
- message: method must only contain valid characters (matching ^[A-Za-z_][A-Za-z_0-9]*$)
rule: '(!has(self.type) || self.type == ''Exact'') && has(self.method) ? self.method.matches(r"""^[A-Za-z_][A-Za-z_0-9]*$"""): true'
type: object
maxItems: 8
type: array
type: object
maxItems: 16
type: array
type: object
status:
description: Status defines the current state of GRPCRoute.
properties:
parents:
description: "Parents is a list of parent resources (usually Gateways) that are associated with the route, and the status of the route with respect to each parent. When this route attaches to a parent, the controller that manages the parent must add an entry to this list when the controller first sees the route and should update the entry as appropriate when the route or gateway is modified. \n Note that parent references that cannot be resolved by an implementation of this API will not be added to this list. Implementations of this API can only populate Route status for the Gateways/parent resources they are responsible for. \n A maximum of 32 Gateways will be represented in this list. An empty list means the route has not been attached to any Gateway."
items:
description: RouteParentStatus describes the status of a route with respect to an associated Parent.
properties:
conditions:
description: "Conditions describes the status of the route with respect to the Gateway. Note that the route's availability is also subject to the Gateway's own status conditions and listener status. \n If the Route's ParentRef specifies an existing Gateway that supports Routes of this kind AND that Gateway's controller has sufficient access, then that Gateway's controller MUST set the \"Accepted\" condition on the Route, to indicate whether the route has been accepted or rejected by the Gateway, and why. \n A Route MUST be considered \"Accepted\" if at least one of the Route's rules is implemented by the Gateway. \n There are a number of cases where the \"Accepted\" condition may not be set due to lack of controller visibility, that includes when: \n * The Route refers to a non-existent parent. * The Route is of a type that the controller does not support. * The Route is in a namespace the controller does not have access to."
items:
description: "Condition contains details for one aspect of the current state of this API Resource. --- This struct is intended for direct use as an array at the field path .status.conditions. For example, \n type FooStatus struct{ // Represents the observations of a foo's current state. // Known .status.conditions.type are: \"Available\", \"Progressing\", and \"Degraded\" // +patchMergeKey=type // +patchStrategy=merge // +listType=map // +listMapKey=type Conditions []metav1.Condition `json:\"conditions,omitempty\" patchStrategy:\"merge\" patchMergeKey:\"type\" protobuf:\"bytes,1,rep,name=conditions\"` \n // other fields }"
properties:
lastTransitionTime:
description: lastTransitionTime is the last time the condition transitioned from one status to another. This should be when the underlying condition changed. If that is not known, then using the time when the API field changed is acceptable.
format: date-time
type: string
message:
description: message is a human readable message indicating details about the transition. This may be an empty string.
maxLength: 32768
type: string
observedGeneration:
description: observedGeneration represents the .metadata.generation that the condition was set based upon. For instance, if .metadata.generation is currently 12, but the .status.conditions[x].observedGeneration is 9, the condition is out of date with respect to the current state of the instance.
format: int64
minimum: 0
type: integer
reason:
description: reason contains a programmatic identifier indicating the reason for the condition's last transition. Producers of specific condition types may define expected values and meanings for this field, and whether the values are considered a guaranteed API. The value should be a CamelCase string. This field may not be empty.
maxLength: 1024
minLength: 1
pattern: ^[A-Za-z]([A-Za-z0-9_,:]*[A-Za-z0-9_])?$
type: string
status:
description: status of the condition, one of True, False, Unknown.
enum:
- "True"
- "False"
- Unknown
type: string
type:
description: type of condition in CamelCase or in foo.example.com/CamelCase. --- Many .condition.type values are consistent across resources like Available, but because arbitrary conditions can be useful (see .node.status.conditions), the ability to deconflict is important. The regex it matches is (dns1123SubdomainFmt/)?(qualifiedNameFmt)
maxLength: 316
pattern: ^([a-z0-9]([-a-z0-9]*[a-z0-9])?(\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*/)?(([A-Za-z0-9][-A-Za-z0-9_.]*)?[A-Za-z0-9])$
type: string
required:
- lastTransitionTime
- message
- reason
- status
- type
type: object
maxItems: 8
minItems: 1
type: array
x-kubernetes-list-map-keys:
- type
x-kubernetes-list-type: map
controllerName:
description: "ControllerName is a domain/path string that indicates the name of the controller that wrote this status. This corresponds with the controllerName field on GatewayClass. \n Example: \"example.net/gateway-controller\". \n The format of this field is DOMAIN \"/\" PATH, where DOMAIN and PATH are valid Kubernetes names (https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names). \n Controllers MUST populate this field when writing status. Controllers should ensure that entries to status populated with their ControllerName are cleaned up when they are no longer necessary."
maxLength: 253
minLength: 1
pattern: ^[a-z0-9]([-a-z0-9]*[a-z0-9])?(\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*\/[A-Za-z0-9\/\-._~%!$&'()*+,;=:]+$
type: string
parentRef:
description: ParentRef corresponds with a ParentRef in the spec that this RouteParentStatus struct describes the status of.
properties:
group:
default: gateway.networking.k8s.io
description: "Group is the group of the referent. When unspecified, \"gateway.networking.k8s.io\" is inferred. To set the core API group (such as for a \"Service\" kind referent), Group must be explicitly set to \"\" (empty string). \n Support: Core"
maxLength: 253
pattern: ^$|^[a-z0-9]([-a-z0-9]*[a-z0-9])?(\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*$
type: string
kind:
default: Gateway
description: "Kind is kind of the referent. \n There are two kinds of parent resources with \"Core\" support: \n * Gateway (Gateway conformance profile) * Service (Mesh conformance profile, experimental, ClusterIP Services only) \n Support for other resources is Implementation-Specific."
maxLength: 63
minLength: 1
pattern: ^[a-zA-Z]([-a-zA-Z0-9]*[a-zA-Z0-9])?$
type: string
name:
description: "Name is the name of the referent. \n Support: Core"
maxLength: 253
minLength: 1
type: string
namespace:
description: "Namespace is the namespace of the referent. When unspecified, this refers to the local namespace of the Route. \n Note that there are specific rules for ParentRefs which cross namespace boundaries. Cross-namespace references are only valid if they are explicitly allowed by something in the namespace they are referring to. For example: Gateway has the AllowedRoutes field, and ReferenceGrant provides a generic way to enable any other kind of cross-namespace reference. \n ParentRefs from a Route to a Service in the same namespace are \"producer\" routes, which apply default routing rules to inbound connections from any namespace to the Service. \n ParentRefs from a Route to a Service in a different namespace are \"consumer\" routes, and these routing rules are only applied to outbound connections originating from the same namespace as the Route, for which the intended destination of the connections are a Service targeted as a ParentRef of the Route. \n Support: Core"
maxLength: 63
minLength: 1
pattern: ^[a-z0-9]([-a-z0-9]*[a-z0-9])?$
type: string
port:
description: "Port is the network port this Route targets. It can be interpreted differently based on the type of parent resource. \n When the parent resource is a Gateway, this targets all listeners listening on the specified port that also support this kind of Route(and select this Route). It's not recommended to set `Port` unless the networking behaviors specified in a Route must apply to a specific port as opposed to a listener(s) whose port(s) may be changed. When both Port and SectionName are specified, the name and port of the selected listener must match both specified values. \n When the parent resource is a Service, this targets a specific port in the Service spec. When both Port (experimental) and SectionName are specified, the name and port of the selected port must match both specified values. \n Implementations MAY choose to support other parent resources. Implementations supporting other types of parent resources MUST clearly document how/if Port is interpreted. \n For the purpose of status, an attachment is considered successful as long as the parent resource accepts it partially. For example, Gateway listeners can restrict which Routes can attach to them by Route kind, namespace, or hostname. If 1 of 2 Gateway listeners accept attachment from the referencing Route, the Route MUST be considered successfully attached. If no Gateway listeners accept attachment from this Route, the Route MUST be considered detached from the Gateway. \n Support: Extended \n "
format: int32
maximum: 65535
minimum: 1
type: integer
sectionName:
description: "SectionName is the name of a section within the target resource. In the following resources, SectionName is interpreted as the following: \n * Gateway: Listener Name. When both Port (experimental) and SectionName are specified, the name and port of the selected listener must match both specified values. * Service: Port Name. When both Port (experimental) and SectionName are specified, the name and port of the selected listener must match both specified values. Note that attaching Routes to Services as Parents is part of experimental Mesh support and is not supported for any other purpose. \n Implementations MAY choose to support attaching Routes to other resources. If that is the case, they MUST clearly document how SectionName is interpreted. \n When unspecified (empty string), this will reference the entire resource. For the purpose of status, an attachment is considered successful if at least one section in the parent resource accepts it. For example, Gateway listeners can restrict which Routes can attach to them by Route kind, namespace, or hostname. If 1 of 2 Gateway listeners accept attachment from the referencing Route, the Route MUST be considered successfully attached. If no Gateway listeners accept attachment from this Route, the Route MUST be considered detached from the Gateway. \n Support: Core"
maxLength: 253
minLength: 1
pattern: ^[a-z0-9]([-a-z0-9]*[a-z0-9])?(\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*$
type: string
required:
- name
type: object
required:
- controllerName
- parentRef
type: object
maxItems: 32
type: array
required:
- parents
type: object
type: object
served: true
storage: true
subresources:
status: {}
status:
acceptedNames:
kind: ""
plural: ""
conditions: null
storedVersions: null

View file

@ -0,0 +1,205 @@
#
# config/crd/experimental/gateway.networking.k8s.io_referencegrants.yaml
#
apiVersion: apiextensions.k8s.io/v1
kind: CustomResourceDefinition
metadata:
annotations:
api-approved.kubernetes.io: https://github.com/kubernetes-sigs/gateway-api/pull/2466
gateway.networking.k8s.io/bundle-version: v1.0.0
gateway.networking.k8s.io/channel: experimental
creationTimestamp: null
name: referencegrants.gateway.networking.k8s.io
spec:
group: gateway.networking.k8s.io
names:
categories:
- gateway-api
kind: ReferenceGrant
listKind: ReferenceGrantList
plural: referencegrants
shortNames:
- refgrant
singular: referencegrant
scope: Namespaced
versions:
- additionalPrinterColumns:
- jsonPath: .metadata.creationTimestamp
name: Age
type: date
deprecated: true
deprecationWarning: The v1alpha2 version of ReferenceGrant has been deprecated and will be removed in a future release of the API. Please upgrade to v1beta1.
name: v1alpha2
schema:
openAPIV3Schema:
description: "ReferenceGrant identifies kinds of resources in other namespaces that are trusted to reference the specified kinds of resources in the same namespace as the policy. \n Each ReferenceGrant can be used to represent a unique trust relationship. Additional Reference Grants can be used to add to the set of trusted sources of inbound references for the namespace they are defined within. \n A ReferenceGrant is required for all cross-namespace references in Gateway API (with the exception of cross-namespace Route-Gateway attachment, which is governed by the AllowedRoutes configuration on the Gateway, and cross-namespace Service ParentRefs on a \"consumer\" mesh Route, which defines routing rules applicable only to workloads in the Route namespace). ReferenceGrants allowing a reference from a Route to a Service are only applicable to BackendRefs. \n ReferenceGrant is a form of runtime verification allowing users to assert which cross-namespace object references are permitted. Implementations that support ReferenceGrant MUST NOT permit cross-namespace references which have no grant, and MUST respond to the removal of a grant by revoking the access that the grant allowed."
properties:
apiVersion:
description: 'APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources'
type: string
kind:
description: 'Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds'
type: string
metadata:
type: object
spec:
description: Spec defines the desired state of ReferenceGrant.
properties:
from:
description: "From describes the trusted namespaces and kinds that can reference the resources described in \"To\". Each entry in this list MUST be considered to be an additional place that references can be valid from, or to put this another way, entries MUST be combined using OR. \n Support: Core"
items:
description: ReferenceGrantFrom describes trusted namespaces and kinds.
properties:
group:
description: "Group is the group of the referent. When empty, the Kubernetes core API group is inferred. \n Support: Core"
maxLength: 253
pattern: ^$|^[a-z0-9]([-a-z0-9]*[a-z0-9])?(\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*$
type: string
kind:
description: "Kind is the kind of the referent. Although implementations may support additional resources, the following types are part of the \"Core\" support level for this field. \n When used to permit a SecretObjectReference: \n * Gateway \n When used to permit a BackendObjectReference: \n * GRPCRoute * HTTPRoute * TCPRoute * TLSRoute * UDPRoute"
maxLength: 63
minLength: 1
pattern: ^[a-zA-Z]([-a-zA-Z0-9]*[a-zA-Z0-9])?$
type: string
namespace:
description: "Namespace is the namespace of the referent. \n Support: Core"
maxLength: 63
minLength: 1
pattern: ^[a-z0-9]([-a-z0-9]*[a-z0-9])?$
type: string
required:
- group
- kind
- namespace
type: object
maxItems: 16
minItems: 1
type: array
to:
description: "To describes the resources that may be referenced by the resources described in \"From\". Each entry in this list MUST be considered to be an additional place that references can be valid to, or to put this another way, entries MUST be combined using OR. \n Support: Core"
items:
description: ReferenceGrantTo describes what Kinds are allowed as targets of the references.
properties:
group:
description: "Group is the group of the referent. When empty, the Kubernetes core API group is inferred. \n Support: Core"
maxLength: 253
pattern: ^$|^[a-z0-9]([-a-z0-9]*[a-z0-9])?(\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*$
type: string
kind:
description: "Kind is the kind of the referent. Although implementations may support additional resources, the following types are part of the \"Core\" support level for this field: \n * Secret when used to permit a SecretObjectReference * Service when used to permit a BackendObjectReference"
maxLength: 63
minLength: 1
pattern: ^[a-zA-Z]([-a-zA-Z0-9]*[a-zA-Z0-9])?$
type: string
name:
description: Name is the name of the referent. When unspecified, this policy refers to all resources of the specified Group and Kind in the local namespace.
maxLength: 253
minLength: 1
type: string
required:
- group
- kind
type: object
maxItems: 16
minItems: 1
type: array
required:
- from
- to
type: object
type: object
served: true
storage: false
subresources: {}
- additionalPrinterColumns:
- jsonPath: .metadata.creationTimestamp
name: Age
type: date
name: v1beta1
schema:
openAPIV3Schema:
description: "ReferenceGrant identifies kinds of resources in other namespaces that are trusted to reference the specified kinds of resources in the same namespace as the policy. \n Each ReferenceGrant can be used to represent a unique trust relationship. Additional Reference Grants can be used to add to the set of trusted sources of inbound references for the namespace they are defined within. \n All cross-namespace references in Gateway API (with the exception of cross-namespace Gateway-route attachment) require a ReferenceGrant. \n ReferenceGrant is a form of runtime verification allowing users to assert which cross-namespace object references are permitted. Implementations that support ReferenceGrant MUST NOT permit cross-namespace references which have no grant, and MUST respond to the removal of a grant by revoking the access that the grant allowed."
properties:
apiVersion:
description: 'APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources'
type: string
kind:
description: 'Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds'
type: string
metadata:
type: object
spec:
description: Spec defines the desired state of ReferenceGrant.
properties:
from:
description: "From describes the trusted namespaces and kinds that can reference the resources described in \"To\". Each entry in this list MUST be considered to be an additional place that references can be valid from, or to put this another way, entries MUST be combined using OR. \n Support: Core"
items:
description: ReferenceGrantFrom describes trusted namespaces and kinds.
properties:
group:
description: "Group is the group of the referent. When empty, the Kubernetes core API group is inferred. \n Support: Core"
maxLength: 253
pattern: ^$|^[a-z0-9]([-a-z0-9]*[a-z0-9])?(\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*$
type: string
kind:
description: "Kind is the kind of the referent. Although implementations may support additional resources, the following types are part of the \"Core\" support level for this field. \n When used to permit a SecretObjectReference: \n * Gateway \n When used to permit a BackendObjectReference: \n * GRPCRoute * HTTPRoute * TCPRoute * TLSRoute * UDPRoute"
maxLength: 63
minLength: 1
pattern: ^[a-zA-Z]([-a-zA-Z0-9]*[a-zA-Z0-9])?$
type: string
namespace:
description: "Namespace is the namespace of the referent. \n Support: Core"
maxLength: 63
minLength: 1
pattern: ^[a-z0-9]([-a-z0-9]*[a-z0-9])?$
type: string
required:
- group
- kind
- namespace
type: object
maxItems: 16
minItems: 1
type: array
to:
description: "To describes the resources that may be referenced by the resources described in \"From\". Each entry in this list MUST be considered to be an additional place that references can be valid to, or to put this another way, entries MUST be combined using OR. \n Support: Core"
items:
description: ReferenceGrantTo describes what Kinds are allowed as targets of the references.
properties:
group:
description: "Group is the group of the referent. When empty, the Kubernetes core API group is inferred. \n Support: Core"
maxLength: 253
pattern: ^$|^[a-z0-9]([-a-z0-9]*[a-z0-9])?(\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*$
type: string
kind:
description: "Kind is the kind of the referent. Although implementations may support additional resources, the following types are part of the \"Core\" support level for this field: \n * Secret when used to permit a SecretObjectReference * Service when used to permit a BackendObjectReference"
maxLength: 63
minLength: 1
pattern: ^[a-zA-Z]([-a-zA-Z0-9]*[a-zA-Z0-9])?$
type: string
name:
description: Name is the name of the referent. When unspecified, this policy refers to all resources of the specified Group and Kind in the local namespace.
maxLength: 253
minLength: 1
type: string
required:
- group
- kind
type: object
maxItems: 16
minItems: 1
type: array
required:
- from
- to
type: object
type: object
served: true
storage: true
subresources: {}
status:
acceptedNames:
kind: ""
plural: ""
conditions: null
storedVersions: null

View file

@ -1,10 +1,13 @@
---
#
# config/crd/experimental/gateway.networking.k8s.io_tcproutes.yaml
#
apiVersion: apiextensions.k8s.io/v1
kind: CustomResourceDefinition
metadata:
annotations:
api-approved.kubernetes.io: https://github.com/kubernetes-sigs/gateway-api/pull/891
api-approved.kubernetes.io: https://github.com/kubernetes-sigs/gateway-api/pull/2466
gateway.networking.k8s.io/bundle-version: v1.0.0
gateway.networking.k8s.io/channel: experimental
creationTimestamp: null
name: tcproutes.gateway.networking.k8s.io
spec:
@ -25,19 +28,13 @@ spec:
name: v1alpha2
schema:
openAPIV3Schema:
description: TCPRoute provides a way to route TCP requests. When combined
with a Gateway listener, it can be used to forward connections on the port
specified by the listener to a set of backends specified by the TCPRoute.
description: TCPRoute provides a way to route TCP requests. When combined with a Gateway listener, it can be used to forward connections on the port specified by the listener to a set of backends specified by the TCPRoute.
properties:
apiVersion:
description: 'APIVersion defines the versioned schema of this representation
of an object. Servers should convert recognized schemas to the latest
internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources'
description: 'APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources'
type: string
kind:
description: 'Kind is a string value representing the REST resource this
object represents. Servers may infer this from the endpoint the client
submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds'
description: 'Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds'
type: string
metadata:
type: object
@ -45,79 +42,42 @@ spec:
description: Spec defines the desired state of TCPRoute.
properties:
parentRefs:
description: "ParentRefs references the resources (usually Gateways)
that a Route wants to be attached to. Note that the referenced parent
resource needs to allow this for the attachment to be complete.
For Gateways, that means the Gateway needs to allow attachment from
Routes of this kind and namespace. \n The only kind of parent resource
with \"Core\" support is Gateway. This API may be extended in the
future to support additional kinds of parent resources such as one
of the route kinds. \n It is invalid to reference an identical parent
more than once. It is valid to reference multiple distinct sections
within the same parent resource, such as 2 Listeners within a Gateway.
\n It is possible to separately reference multiple distinct objects
that may be collapsed by an implementation. For example, some implementations
may choose to merge compatible Gateway Listeners together. If that
is the case, the list of routes attached to those resources should
also be merged."
description: "ParentRefs references the resources (usually Gateways) that a Route wants to be attached to. Note that the referenced parent resource needs to allow this for the attachment to be complete. For Gateways, that means the Gateway needs to allow attachment from Routes of this kind and namespace. For Services, that means the Service must either be in the same namespace for a \"producer\" route, or the mesh implementation must support and allow \"consumer\" routes for the referenced Service. ReferenceGrant is not applicable for governing ParentRefs to Services - it is not possible to create a \"producer\" route for a Service in a different namespace from the Route. \n There are two kinds of parent resources with \"Core\" support: \n * Gateway (Gateway conformance profile) * Service (Mesh conformance profile, experimental, ClusterIP Services only) This API may be extended in the future to support additional kinds of parent resources. \n ParentRefs must be _distinct_. This means either that: \n * They select different objects. If this is the case, then parentRef entries are distinct. In terms of fields, this means that the multi-part key defined by `group`, `kind`, `namespace`, and `name` must be unique across all parentRef entries in the Route. * They do not select different objects, but for each optional field used, each ParentRef that selects the same object must set the same set of optional fields to different values. If one ParentRef sets a combination of optional fields, all must set the same combination. \n Some examples: \n * If one ParentRef sets `sectionName`, all ParentRefs referencing the same object must also set `sectionName`. * If one ParentRef sets `port`, all ParentRefs referencing the same object must also set `port`. * If one ParentRef sets `sectionName` and `port`, all ParentRefs referencing the same object must also set `sectionName` and `port`. \n It is possible to separately reference multiple distinct objects that may be collapsed by an implementation. For example, some implementations may choose to merge compatible Gateway Listeners together. If that is the case, the list of routes attached to those resources should also be merged. \n Note that for ParentRefs that cross namespace boundaries, there are specific rules. Cross-namespace references are only valid if they are explicitly allowed by something in the namespace they are referring to. For example, Gateway has the AllowedRoutes field, and ReferenceGrant provides a generic way to enable other kinds of cross-namespace reference. \n ParentRefs from a Route to a Service in the same namespace are \"producer\" routes, which apply default routing rules to inbound connections from any namespace to the Service. \n ParentRefs from a Route to a Service in a different namespace are \"consumer\" routes, and these routing rules are only applied to outbound connections originating from the same namespace as the Route, for which the intended destination of the connections are a Service targeted as a ParentRef of the Route. \n "
items:
description: "ParentRef identifies an API object (usually a Gateway)
that can be considered a parent of this resource (usually a route).
The only kind of parent resource with \"Core\" support is Gateway.
This API may be extended in the future to support additional kinds
of parent resources, such as HTTPRoute. \n The API object must
be valid in the cluster; the Group and Kind must be registered
in the cluster for this reference to be valid. \n References to
objects with invalid Group and Kind are not valid, and must be
rejected by the implementation, with appropriate Conditions set
on the containing object."
description: "ParentReference identifies an API object (usually a Gateway) that can be considered a parent of this resource (usually a route). There are two kinds of parent resources with \"Core\" support: \n * Gateway (Gateway conformance profile) * Service (Mesh conformance profile, experimental, ClusterIP Services only) \n This API may be extended in the future to support additional kinds of parent resources. \n The API object must be valid in the cluster; the Group and Kind must be registered in the cluster for this reference to be valid."
properties:
group:
default: gateway.networking.k8s.io
description: "Group is the group of the referent. \n Support:
Core"
description: "Group is the group of the referent. When unspecified, \"gateway.networking.k8s.io\" is inferred. To set the core API group (such as for a \"Service\" kind referent), Group must be explicitly set to \"\" (empty string). \n Support: Core"
maxLength: 253
pattern: ^$|^[a-z0-9]([-a-z0-9]*[a-z0-9])?(\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*$
type: string
kind:
default: Gateway
description: "Kind is kind of the referent. \n Support: Core
(Gateway) Support: Custom (Other Resources)"
description: "Kind is kind of the referent. \n There are two kinds of parent resources with \"Core\" support: \n * Gateway (Gateway conformance profile) * Service (Mesh conformance profile, experimental, ClusterIP Services only) \n Support for other resources is Implementation-Specific."
maxLength: 63
minLength: 1
pattern: ^[a-zA-Z]([-a-zA-Z0-9]*[a-zA-Z0-9])?$
type: string
name:
description: "Name is the name of the referent. \n Support:
Core"
description: "Name is the name of the referent. \n Support: Core"
maxLength: 253
minLength: 1
type: string
namespace:
description: "Namespace is the namespace of the referent. When
unspecified (or empty string), this refers to the local namespace
of the Route. \n Support: Core"
description: "Namespace is the namespace of the referent. When unspecified, this refers to the local namespace of the Route. \n Note that there are specific rules for ParentRefs which cross namespace boundaries. Cross-namespace references are only valid if they are explicitly allowed by something in the namespace they are referring to. For example: Gateway has the AllowedRoutes field, and ReferenceGrant provides a generic way to enable any other kind of cross-namespace reference. \n ParentRefs from a Route to a Service in the same namespace are \"producer\" routes, which apply default routing rules to inbound connections from any namespace to the Service. \n ParentRefs from a Route to a Service in a different namespace are \"consumer\" routes, and these routing rules are only applied to outbound connections originating from the same namespace as the Route, for which the intended destination of the connections are a Service targeted as a ParentRef of the Route. \n Support: Core"
maxLength: 63
minLength: 1
pattern: ^[a-z0-9]([-a-z0-9]*[a-z0-9])?$
type: string
port:
description: "Port is the network port this Route targets. It can be interpreted differently based on the type of parent resource. \n When the parent resource is a Gateway, this targets all listeners listening on the specified port that also support this kind of Route(and select this Route). It's not recommended to set `Port` unless the networking behaviors specified in a Route must apply to a specific port as opposed to a listener(s) whose port(s) may be changed. When both Port and SectionName are specified, the name and port of the selected listener must match both specified values. \n When the parent resource is a Service, this targets a specific port in the Service spec. When both Port (experimental) and SectionName are specified, the name and port of the selected port must match both specified values. \n Implementations MAY choose to support other parent resources. Implementations supporting other types of parent resources MUST clearly document how/if Port is interpreted. \n For the purpose of status, an attachment is considered successful as long as the parent resource accepts it partially. For example, Gateway listeners can restrict which Routes can attach to them by Route kind, namespace, or hostname. If 1 of 2 Gateway listeners accept attachment from the referencing Route, the Route MUST be considered successfully attached. If no Gateway listeners accept attachment from this Route, the Route MUST be considered detached from the Gateway. \n Support: Extended \n "
format: int32
maximum: 65535
minimum: 1
type: integer
sectionName:
description: "SectionName is the name of a section within the
target resource. In the following resources, SectionName is
interpreted as the following: \n * Gateway: Listener Name
\n Implementations MAY choose to support attaching Routes
to other resources. If that is the case, they MUST clearly
document how SectionName is interpreted. \n When unspecified
(empty string), this will reference the entire resource. For
the purpose of status, an attachment is considered successful
if at least one section in the parent resource accepts it.
For example, Gateway listeners can restrict which Routes can
attach to them by Route kind, namespace, or hostname. If 1
of 2 Gateway listeners accept attachment from the referencing
Route, the Route MUST be considered successfully attached.
If no Gateway listeners accept attachment from this Route,
the Route MUST be considered detached from the Gateway. \n
Support: Core"
description: "SectionName is the name of a section within the target resource. In the following resources, SectionName is interpreted as the following: \n * Gateway: Listener Name. When both Port (experimental) and SectionName are specified, the name and port of the selected listener must match both specified values. * Service: Port Name. When both Port (experimental) and SectionName are specified, the name and port of the selected listener must match both specified values. Note that attaching Routes to Services as Parents is part of experimental Mesh support and is not supported for any other purpose. \n Implementations MAY choose to support attaching Routes to other resources. If that is the case, they MUST clearly document how SectionName is interpreted. \n When unspecified (empty string), this will reference the entire resource. For the purpose of status, an attachment is considered successful if at least one section in the parent resource accepts it. For example, Gateway listeners can restrict which Routes can attach to them by Route kind, namespace, or hostname. If 1 of 2 Gateway listeners accept attachment from the referencing Route, the Route MUST be considered successfully attached. If no Gateway listeners accept attachment from this Route, the Route MUST be considered detached from the Gateway. \n Support: Core"
maxLength: 253
minLength: 1
pattern: ^[a-z0-9]([-a-z0-9]*[a-z0-9])?(\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*$
@ -127,41 +87,30 @@ spec:
type: object
maxItems: 32
type: array
x-kubernetes-validations:
- message: sectionName or port must be specified when parentRefs includes 2 or more references to the same parent
rule: 'self.all(p1, self.all(p2, p1.group == p2.group && p1.kind == p2.kind && p1.name == p2.name && (((!has(p1.__namespace__) || p1.__namespace__ == '''') && (!has(p2.__namespace__) || p2.__namespace__ == '''')) || (has(p1.__namespace__) && has(p2.__namespace__) && p1.__namespace__ == p2.__namespace__)) ? ((!has(p1.sectionName) || p1.sectionName == '''') == (!has(p2.sectionName) || p2.sectionName == '''') && (!has(p1.port) || p1.port == 0) == (!has(p2.port) || p2.port == 0)): true))'
- message: sectionName or port must be unique when parentRefs includes 2 or more references to the same parent
rule: self.all(p1, self.exists_one(p2, p1.group == p2.group && p1.kind == p2.kind && p1.name == p2.name && (((!has(p1.__namespace__) || p1.__namespace__ == '') && (!has(p2.__namespace__) || p2.__namespace__ == '')) || (has(p1.__namespace__) && has(p2.__namespace__) && p1.__namespace__ == p2.__namespace__ )) && (((!has(p1.sectionName) || p1.sectionName == '') && (!has(p2.sectionName) || p2.sectionName == '')) || ( has(p1.sectionName) && has(p2.sectionName) && p1.sectionName == p2.sectionName)) && (((!has(p1.port) || p1.port == 0) && (!has(p2.port) || p2.port == 0)) || (has(p1.port) && has(p2.port) && p1.port == p2.port))))
rules:
description: Rules are a list of TCP matchers and actions.
items:
description: TCPRouteRule is the configuration for a given rule.
properties:
backendRefs:
description: "BackendRefs defines the backend(s) where matching
requests should be sent. If unspecified or invalid (refers
to a non-existent resource or a Service with no endpoints),
the underlying implementation MUST actively reject connection
attempts to this backend. Connection rejections must respect
weight; if an invalid backend is requested to have 80% of
connections, then 80% of connections must be rejected instead.
\n Support: Core for Kubernetes Service Support: Custom for
any other resource \n Support for weight: Extended"
description: "BackendRefs defines the backend(s) where matching requests should be sent. If unspecified or invalid (refers to a non-existent resource or a Service with no endpoints), the underlying implementation MUST actively reject connection attempts to this backend. Connection rejections must respect weight; if an invalid backend is requested to have 80% of connections, then 80% of connections must be rejected instead. \n Support: Core for Kubernetes Service \n Support: Extended for Kubernetes ServiceImport \n Support: Implementation-specific for any other resource \n Support for weight: Extended"
items:
description: "BackendRef defines how a Route should forward
a request to a Kubernetes resource. \n Note that when a
namespace is specified, a ReferencePolicy object is required
in the referent namespace to allow that namespace's owner
to accept the reference. See the ReferencePolicy documentation
for details."
description: "BackendRef defines how a Route should forward a request to a Kubernetes resource. \n Note that when a namespace different than the local namespace is specified, a ReferenceGrant object is required in the referent namespace to allow that namespace's owner to accept the reference. See the ReferenceGrant documentation for details. \n <gateway:experimental:description> \n When the BackendRef points to a Kubernetes Service, implementations SHOULD honor the appProtocol field if it is set for the target Service Port. \n Implementations supporting appProtocol SHOULD recognize the Kubernetes Standard Application Protocols defined in KEP-3726. \n If a Service appProtocol isn't specified, an implementation MAY infer the backend protocol through its own means. Implementations MAY infer the protocol from the Route type referring to the backend Service. \n If a Route is not able to send traffic to the backend using the specified protocol then the backend is considered invalid. Implementations MUST set the \"ResolvedRefs\" condition to \"False\" with the \"UnsupportedProtocol\" reason. \n </gateway:experimental:description> \n Note that when the BackendTLSPolicy object is enabled by the implementation, there are some extra rules about validity to consider here. See the fields where this struct is used for more information about the exact behavior."
properties:
group:
default: ""
description: Group is the group of the referent. For example,
"networking.k8s.io". When unspecified (empty string),
core API group is inferred.
description: Group is the group of the referent. For example, "gateway.networking.k8s.io". When unspecified or empty string, core API group is inferred.
maxLength: 253
pattern: ^$|^[a-z0-9]([-a-z0-9]*[a-z0-9])?(\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*$
type: string
kind:
default: Service
description: Kind is kind of the referent. For example
"HTTPRoute" or "Service".
description: "Kind is the Kubernetes resource kind of the referent. For example \"Service\". \n Defaults to \"Service\" when not specified. \n ExternalName services can refer to CNAME DNS records that may live outside of the cluster and as such are difficult to reason about in terms of conformance. They also may not be safe to forward to (see CVE-2021-25740 for more information). Implementations SHOULD NOT support ExternalName Services. \n Support: Core (Services with a type other than ExternalName) \n Support: Implementation-specific (Services with type ExternalName)"
maxLength: 63
minLength: 1
pattern: ^[a-zA-Z]([-a-zA-Z0-9]*[a-zA-Z0-9])?$
@ -172,42 +121,20 @@ spec:
minLength: 1
type: string
namespace:
description: "Namespace is the namespace of the backend.
When unspecified, the local namespace is inferred. \n
Note that when a namespace is specified, a ReferencePolicy
object is required in the referent namespace to allow
that namespace's owner to accept the reference. See
the ReferencePolicy documentation for details. \n Support:
Core"
description: "Namespace is the namespace of the backend. When unspecified, the local namespace is inferred. \n Note that when a namespace different than the local namespace is specified, a ReferenceGrant object is required in the referent namespace to allow that namespace's owner to accept the reference. See the ReferenceGrant documentation for details. \n Support: Core"
maxLength: 63
minLength: 1
pattern: ^[a-z0-9]([-a-z0-9]*[a-z0-9])?$
type: string
port:
description: Port specifies the destination port number
to use for this resource. Port is required when the
referent is a Kubernetes Service. For other resources,
destination port might be derived from the referent
resource or this field.
description: Port specifies the destination port number to use for this resource. Port is required when the referent is a Kubernetes Service. In this case, the port number is the service port number, not the target port. For other resources, destination port might be derived from the referent resource or this field.
format: int32
maximum: 65535
minimum: 1
type: integer
weight:
default: 1
description: "Weight specifies the proportion of requests
forwarded to the referenced backend. This is computed
as weight/(sum of all weights in this BackendRefs list).
For non-zero values, there may be some epsilon from
the exact proportion defined here depending on the precision
an implementation supports. Weight is not a percentage
and the sum of weights does not need to equal 100. \n
If only one backend is specified and it has a weight
greater than 0, 100% of the traffic is forwarded to
that backend. If weight is set to 0, no traffic should
be forwarded for this entry. If unspecified, weight
defaults to 1. \n Support for this field varies based
on the context where used."
description: "Weight specifies the proportion of requests forwarded to the referenced backend. This is computed as weight/(sum of all weights in this BackendRefs list). For non-zero values, there may be some epsilon from the exact proportion defined here depending on the precision an implementation supports. Weight is not a percentage and the sum of weights does not need to equal 100. \n If only one backend is specified and it has a weight greater than 0, 100% of the traffic is forwarded to that backend. If weight is set to 0, no traffic should be forwarded for this entry. If unspecified, weight defaults to 1. \n Support for this field varies based on the context where used."
format: int32
maximum: 1000000
minimum: 0
@ -215,6 +142,9 @@ spec:
required:
- name
type: object
x-kubernetes-validations:
- message: Must have port for Service reference
rule: '(size(self.group) == 0 && self.kind == ''Service'') ? has(self.port) : true'
maxItems: 16
minItems: 1
type: array
@ -229,100 +159,43 @@ spec:
description: Status defines the current state of TCPRoute.
properties:
parents:
description: "Parents is a list of parent resources (usually Gateways)
that are associated with the route, and the status of the route
with respect to each parent. When this route attaches to a parent,
the controller that manages the parent must add an entry to this
list when the controller first sees the route and should update
the entry as appropriate when the route or gateway is modified.
\n Note that parent references that cannot be resolved by an implementation
of this API will not be added to this list. Implementations of this
API can only populate Route status for the Gateways/parent resources
they are responsible for. \n A maximum of 32 Gateways will be represented
in this list. An empty list means the route has not been attached
to any Gateway."
description: "Parents is a list of parent resources (usually Gateways) that are associated with the route, and the status of the route with respect to each parent. When this route attaches to a parent, the controller that manages the parent must add an entry to this list when the controller first sees the route and should update the entry as appropriate when the route or gateway is modified. \n Note that parent references that cannot be resolved by an implementation of this API will not be added to this list. Implementations of this API can only populate Route status for the Gateways/parent resources they are responsible for. \n A maximum of 32 Gateways will be represented in this list. An empty list means the route has not been attached to any Gateway."
items:
description: RouteParentStatus describes the status of a route with
respect to an associated Parent.
description: RouteParentStatus describes the status of a route with respect to an associated Parent.
properties:
conditions:
description: "Conditions describes the status of the route with
respect to the Gateway. Note that the route's availability
is also subject to the Gateway's own status conditions and
listener status. \n If the Route's ParentRef specifies an
existing Gateway that supports Routes of this kind AND that
Gateway's controller has sufficient access, then that Gateway's
controller MUST set the \"Accepted\" condition on the Route,
to indicate whether the route has been accepted or rejected
by the Gateway, and why. \n A Route MUST be considered \"Accepted\"
if at least one of the Route's rules is implemented by the
Gateway. \n There are a number of cases where the \"Accepted\"
condition may not be set due to lack of controller visibility,
that includes when: \n * The Route refers to a non-existent
parent. * The Route is of a type that the controller does
not support. * The Route is in a namespace the the controller
does not have access to."
description: "Conditions describes the status of the route with respect to the Gateway. Note that the route's availability is also subject to the Gateway's own status conditions and listener status. \n If the Route's ParentRef specifies an existing Gateway that supports Routes of this kind AND that Gateway's controller has sufficient access, then that Gateway's controller MUST set the \"Accepted\" condition on the Route, to indicate whether the route has been accepted or rejected by the Gateway, and why. \n A Route MUST be considered \"Accepted\" if at least one of the Route's rules is implemented by the Gateway. \n There are a number of cases where the \"Accepted\" condition may not be set due to lack of controller visibility, that includes when: \n * The Route refers to a non-existent parent. * The Route is of a type that the controller does not support. * The Route is in a namespace the controller does not have access to."
items:
description: "Condition contains details for one aspect of
the current state of this API Resource. --- This struct
is intended for direct use as an array at the field path
.status.conditions. For example, type FooStatus struct{
\ // Represents the observations of a foo's current state.
\ // Known .status.conditions.type are: \"Available\",
\"Progressing\", and \"Degraded\" // +patchMergeKey=type
\ // +patchStrategy=merge // +listType=map //
+listMapKey=type Conditions []metav1.Condition `json:\"conditions,omitempty\"
patchStrategy:\"merge\" patchMergeKey:\"type\" protobuf:\"bytes,1,rep,name=conditions\"`
\n // other fields }"
description: "Condition contains details for one aspect of the current state of this API Resource. --- This struct is intended for direct use as an array at the field path .status.conditions. For example, \n type FooStatus struct{ // Represents the observations of a foo's current state. // Known .status.conditions.type are: \"Available\", \"Progressing\", and \"Degraded\" // +patchMergeKey=type // +patchStrategy=merge // +listType=map // +listMapKey=type Conditions []metav1.Condition `json:\"conditions,omitempty\" patchStrategy:\"merge\" patchMergeKey:\"type\" protobuf:\"bytes,1,rep,name=conditions\"` \n // other fields }"
properties:
lastTransitionTime:
description: lastTransitionTime is the last time the condition
transitioned from one status to another. This should
be when the underlying condition changed. If that is
not known, then using the time when the API field changed
is acceptable.
description: lastTransitionTime is the last time the condition transitioned from one status to another. This should be when the underlying condition changed. If that is not known, then using the time when the API field changed is acceptable.
format: date-time
type: string
message:
description: message is a human readable message indicating
details about the transition. This may be an empty string.
description: message is a human readable message indicating details about the transition. This may be an empty string.
maxLength: 32768
type: string
observedGeneration:
description: observedGeneration represents the .metadata.generation
that the condition was set based upon. For instance,
if .metadata.generation is currently 12, but the .status.conditions[x].observedGeneration
is 9, the condition is out of date with respect to the
current state of the instance.
description: observedGeneration represents the .metadata.generation that the condition was set based upon. For instance, if .metadata.generation is currently 12, but the .status.conditions[x].observedGeneration is 9, the condition is out of date with respect to the current state of the instance.
format: int64
minimum: 0
type: integer
reason:
description: reason contains a programmatic identifier
indicating the reason for the condition's last transition.
Producers of specific condition types may define expected
values and meanings for this field, and whether the
values are considered a guaranteed API. The value should
be a CamelCase string. This field may not be empty.
description: reason contains a programmatic identifier indicating the reason for the condition's last transition. Producers of specific condition types may define expected values and meanings for this field, and whether the values are considered a guaranteed API. The value should be a CamelCase string. This field may not be empty.
maxLength: 1024
minLength: 1
pattern: ^[A-Za-z]([A-Za-z0-9_,:]*[A-Za-z0-9_])?$
type: string
status:
description: status of the condition, one of True, False,
Unknown.
description: status of the condition, one of True, False, Unknown.
enum:
- "True"
- "False"
- Unknown
type: string
type:
description: type of condition in CamelCase or in foo.example.com/CamelCase.
--- Many .condition.type values are consistent across
resources like Available, but because arbitrary conditions
can be useful (see .node.status.conditions), the ability
to deconflict is important. The regex it matches is
(dns1123SubdomainFmt/)?(qualifiedNameFmt)
description: type of condition in CamelCase or in foo.example.com/CamelCase. --- Many .condition.type values are consistent across resources like Available, but because arbitrary conditions can be useful (see .node.status.conditions), the ability to deconflict is important. The regex it matches is (dns1123SubdomainFmt/)?(qualifiedNameFmt)
maxLength: 316
pattern: ^([a-z0-9]([-a-z0-9]*[a-z0-9])?(\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*/)?(([A-Za-z0-9][-A-Za-z0-9_.]*)?[A-Za-z0-9])$
type: string
@ -340,66 +213,46 @@ spec:
- type
x-kubernetes-list-type: map
controllerName:
description: "ControllerName is a domain/path string that indicates
the name of the controller that wrote this status. This corresponds
with the controllerName field on GatewayClass. \n Example:
\"example.net/gateway-controller\". \n The format of this
field is DOMAIN \"/\" PATH, where DOMAIN and PATH are valid
Kubernetes names (https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names)."
description: "ControllerName is a domain/path string that indicates the name of the controller that wrote this status. This corresponds with the controllerName field on GatewayClass. \n Example: \"example.net/gateway-controller\". \n The format of this field is DOMAIN \"/\" PATH, where DOMAIN and PATH are valid Kubernetes names (https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names). \n Controllers MUST populate this field when writing status. Controllers should ensure that entries to status populated with their ControllerName are cleaned up when they are no longer necessary."
maxLength: 253
minLength: 1
pattern: ^[a-z0-9]([-a-z0-9]*[a-z0-9])?(\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*\/[A-Za-z0-9\/\-._~%!$&'()*+,;=:]+$
type: string
parentRef:
description: ParentRef corresponds with a ParentRef in the spec
that this RouteParentStatus struct describes the status of.
description: ParentRef corresponds with a ParentRef in the spec that this RouteParentStatus struct describes the status of.
properties:
group:
default: gateway.networking.k8s.io
description: "Group is the group of the referent. \n Support:
Core"
description: "Group is the group of the referent. When unspecified, \"gateway.networking.k8s.io\" is inferred. To set the core API group (such as for a \"Service\" kind referent), Group must be explicitly set to \"\" (empty string). \n Support: Core"
maxLength: 253
pattern: ^$|^[a-z0-9]([-a-z0-9]*[a-z0-9])?(\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*$
type: string
kind:
default: Gateway
description: "Kind is kind of the referent. \n Support:
Core (Gateway) Support: Custom (Other Resources)"
description: "Kind is kind of the referent. \n There are two kinds of parent resources with \"Core\" support: \n * Gateway (Gateway conformance profile) * Service (Mesh conformance profile, experimental, ClusterIP Services only) \n Support for other resources is Implementation-Specific."
maxLength: 63
minLength: 1
pattern: ^[a-zA-Z]([-a-zA-Z0-9]*[a-zA-Z0-9])?$
type: string
name:
description: "Name is the name of the referent. \n Support:
Core"
description: "Name is the name of the referent. \n Support: Core"
maxLength: 253
minLength: 1
type: string
namespace:
description: "Namespace is the namespace of the referent.
When unspecified (or empty string), this refers to the
local namespace of the Route. \n Support: Core"
description: "Namespace is the namespace of the referent. When unspecified, this refers to the local namespace of the Route. \n Note that there are specific rules for ParentRefs which cross namespace boundaries. Cross-namespace references are only valid if they are explicitly allowed by something in the namespace they are referring to. For example: Gateway has the AllowedRoutes field, and ReferenceGrant provides a generic way to enable any other kind of cross-namespace reference. \n ParentRefs from a Route to a Service in the same namespace are \"producer\" routes, which apply default routing rules to inbound connections from any namespace to the Service. \n ParentRefs from a Route to a Service in a different namespace are \"consumer\" routes, and these routing rules are only applied to outbound connections originating from the same namespace as the Route, for which the intended destination of the connections are a Service targeted as a ParentRef of the Route. \n Support: Core"
maxLength: 63
minLength: 1
pattern: ^[a-z0-9]([-a-z0-9]*[a-z0-9])?$
type: string
port:
description: "Port is the network port this Route targets. It can be interpreted differently based on the type of parent resource. \n When the parent resource is a Gateway, this targets all listeners listening on the specified port that also support this kind of Route(and select this Route). It's not recommended to set `Port` unless the networking behaviors specified in a Route must apply to a specific port as opposed to a listener(s) whose port(s) may be changed. When both Port and SectionName are specified, the name and port of the selected listener must match both specified values. \n When the parent resource is a Service, this targets a specific port in the Service spec. When both Port (experimental) and SectionName are specified, the name and port of the selected port must match both specified values. \n Implementations MAY choose to support other parent resources. Implementations supporting other types of parent resources MUST clearly document how/if Port is interpreted. \n For the purpose of status, an attachment is considered successful as long as the parent resource accepts it partially. For example, Gateway listeners can restrict which Routes can attach to them by Route kind, namespace, or hostname. If 1 of 2 Gateway listeners accept attachment from the referencing Route, the Route MUST be considered successfully attached. If no Gateway listeners accept attachment from this Route, the Route MUST be considered detached from the Gateway. \n Support: Extended \n "
format: int32
maximum: 65535
minimum: 1
type: integer
sectionName:
description: "SectionName is the name of a section within
the target resource. In the following resources, SectionName
is interpreted as the following: \n * Gateway: Listener
Name \n Implementations MAY choose to support attaching
Routes to other resources. If that is the case, they MUST
clearly document how SectionName is interpreted. \n When
unspecified (empty string), this will reference the entire
resource. For the purpose of status, an attachment is
considered successful if at least one section in the parent
resource accepts it. For example, Gateway listeners can
restrict which Routes can attach to them by Route kind,
namespace, or hostname. If 1 of 2 Gateway listeners accept
attachment from the referencing Route, the Route MUST
be considered successfully attached. If no Gateway listeners
accept attachment from this Route, the Route MUST be considered
detached from the Gateway. \n Support: Core"
description: "SectionName is the name of a section within the target resource. In the following resources, SectionName is interpreted as the following: \n * Gateway: Listener Name. When both Port (experimental) and SectionName are specified, the name and port of the selected listener must match both specified values. * Service: Port Name. When both Port (experimental) and SectionName are specified, the name and port of the selected listener must match both specified values. Note that attaching Routes to Services as Parents is part of experimental Mesh support and is not supported for any other purpose. \n Implementations MAY choose to support attaching Routes to other resources. If that is the case, they MUST clearly document how SectionName is interpreted. \n When unspecified (empty string), this will reference the entire resource. For the purpose of status, an attachment is considered successful if at least one section in the parent resource accepts it. For example, Gateway listeners can restrict which Routes can attach to them by Route kind, namespace, or hostname. If 1 of 2 Gateway listeners accept attachment from the referencing Route, the Route MUST be considered successfully attached. If no Gateway listeners accept attachment from this Route, the Route MUST be considered detached from the Gateway. \n Support: Core"
maxLength: 253
minLength: 1
pattern: ^[a-z0-9]([-a-z0-9]*[a-z0-9])?(\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*$
@ -427,5 +280,5 @@ status:
acceptedNames:
kind: ""
plural: ""
conditions: []
storedVersions: []
conditions: null
storedVersions: null

View file

@ -1,10 +1,13 @@
---
#
# config/crd/experimental/gateway.networking.k8s.io_tlsroutes.yaml
#
apiVersion: apiextensions.k8s.io/v1
kind: CustomResourceDefinition
metadata:
annotations:
api-approved.kubernetes.io: https://github.com/kubernetes-sigs/gateway-api/pull/891
api-approved.kubernetes.io: https://github.com/kubernetes-sigs/gateway-api/pull/2466
gateway.networking.k8s.io/bundle-version: v1.0.0
gateway.networking.k8s.io/channel: experimental
creationTimestamp: null
name: tlsroutes.gateway.networking.k8s.io
spec:
@ -25,21 +28,13 @@ spec:
name: v1alpha2
schema:
openAPIV3Schema:
description: "The TLSRoute resource is similar to TCPRoute, but can be configured
to match against TLS-specific metadata. This allows more flexibility in
matching streams for a given TLS listener. \n If you need to forward traffic
to a single target for a TLS listener, you could choose to use a TCPRoute
with a TLS listener."
description: "The TLSRoute resource is similar to TCPRoute, but can be configured to match against TLS-specific metadata. This allows more flexibility in matching streams for a given TLS listener. \n If you need to forward traffic to a single target for a TLS listener, you could choose to use a TCPRoute with a TLS listener."
properties:
apiVersion:
description: 'APIVersion defines the versioned schema of this representation
of an object. Servers should convert recognized schemas to the latest
internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources'
description: 'APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources'
type: string
kind:
description: 'Kind is a string value representing the REST resource this
object represents. Servers may infer this from the endpoint the client
submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds'
description: 'Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds'
type: string
metadata:
type: object
@ -47,43 +42,9 @@ spec:
description: Spec defines the desired state of TLSRoute.
properties:
hostnames:
description: "Hostnames defines a set of SNI names that should match
against the SNI attribute of TLS ClientHello message in TLS handshake.
This matches the RFC 1123 definition of a hostname with 2 notable
exceptions: \n 1. IPs are not allowed in SNI names per RFC 6066.
2. A hostname may be prefixed with a wildcard label (`*.`). The
wildcard label must appear by itself as the first label. \n If
a hostname is specified by both the Listener and TLSRoute, there
must be at least one intersecting hostname for the TLSRoute to be
attached to the Listener. For example: \n * A Listener with `test.example.com`
as the hostname matches TLSRoutes that have either not specified
any hostnames, or have specified at least one of `test.example.com`
or `*.example.com`. * A Listener with `*.example.com` as the hostname
matches TLSRoutes that have either not specified any hostnames
or have specified at least one hostname that matches the Listener
hostname. For example, `test.example.com` and `*.example.com`
would both match. On the other hand, `example.com` and `test.example.net`
would not match. \n If both the Listener and TLSRoute have specified
hostnames, any TLSRoute hostnames that do not match the Listener
hostname MUST be ignored. For example, if a Listener specified `*.example.com`,
and the TLSRoute specified `test.example.com` and `test.example.net`,
`test.example.net` must not be considered for a match. \n If both
the Listener and TLSRoute have specified hostnames, and none match
with the criteria above, then the TLSRoute is not accepted. The
implementation must raise an 'Accepted' Condition with a status
of `False` in the corresponding RouteParentStatus. \n Support: Core"
description: "Hostnames defines a set of SNI names that should match against the SNI attribute of TLS ClientHello message in TLS handshake. This matches the RFC 1123 definition of a hostname with 2 notable exceptions: \n 1. IPs are not allowed in SNI names per RFC 6066. 2. A hostname may be prefixed with a wildcard label (`*.`). The wildcard label must appear by itself as the first label. \n If a hostname is specified by both the Listener and TLSRoute, there must be at least one intersecting hostname for the TLSRoute to be attached to the Listener. For example: \n * A Listener with `test.example.com` as the hostname matches TLSRoutes that have either not specified any hostnames, or have specified at least one of `test.example.com` or `*.example.com`. * A Listener with `*.example.com` as the hostname matches TLSRoutes that have either not specified any hostnames or have specified at least one hostname that matches the Listener hostname. For example, `test.example.com` and `*.example.com` would both match. On the other hand, `example.com` and `test.example.net` would not match. \n If both the Listener and TLSRoute have specified hostnames, any TLSRoute hostnames that do not match the Listener hostname MUST be ignored. For example, if a Listener specified `*.example.com`, and the TLSRoute specified `test.example.com` and `test.example.net`, `test.example.net` must not be considered for a match. \n If both the Listener and TLSRoute have specified hostnames, and none match with the criteria above, then the TLSRoute is not accepted. The implementation must raise an 'Accepted' Condition with a status of `False` in the corresponding RouteParentStatus. \n Support: Core"
items:
description: "Hostname is the fully qualified domain name of a network
host. This matches the RFC 1123 definition of a hostname with
2 notable exceptions: \n 1. IPs are not allowed. 2. A hostname
may be prefixed with a wildcard label (`*.`). The wildcard label
must appear by itself as the first label. \n Hostname can be \"precise\"
which is a domain name without the terminating dot of a network
host (e.g. \"foo.example.com\") or \"wildcard\", which is a domain
name prefixed with a single wildcard label (e.g. `*.example.com`).
\n Note that as per RFC1035 and RFC1123, a *label* must consist
of lower case alphanumeric characters or '-', and must start and
end with an alphanumeric character. No other punctuation is allowed."
description: "Hostname is the fully qualified domain name of a network host. This matches the RFC 1123 definition of a hostname with 2 notable exceptions: \n 1. IPs are not allowed. 2. A hostname may be prefixed with a wildcard label (`*.`). The wildcard label must appear by itself as the first label. \n Hostname can be \"precise\" which is a domain name without the terminating dot of a network host (e.g. \"foo.example.com\") or \"wildcard\", which is a domain name prefixed with a single wildcard label (e.g. `*.example.com`). \n Note that as per RFC1035 and RFC1123, a *label* must consist of lower case alphanumeric characters or '-', and must start and end with an alphanumeric character. No other punctuation is allowed."
maxLength: 253
minLength: 1
pattern: ^(\*\.)?[a-z0-9]([-a-z0-9]*[a-z0-9])?(\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*$
@ -91,79 +52,42 @@ spec:
maxItems: 16
type: array
parentRefs:
description: "ParentRefs references the resources (usually Gateways)
that a Route wants to be attached to. Note that the referenced parent
resource needs to allow this for the attachment to be complete.
For Gateways, that means the Gateway needs to allow attachment from
Routes of this kind and namespace. \n The only kind of parent resource
with \"Core\" support is Gateway. This API may be extended in the
future to support additional kinds of parent resources such as one
of the route kinds. \n It is invalid to reference an identical parent
more than once. It is valid to reference multiple distinct sections
within the same parent resource, such as 2 Listeners within a Gateway.
\n It is possible to separately reference multiple distinct objects
that may be collapsed by an implementation. For example, some implementations
may choose to merge compatible Gateway Listeners together. If that
is the case, the list of routes attached to those resources should
also be merged."
description: "ParentRefs references the resources (usually Gateways) that a Route wants to be attached to. Note that the referenced parent resource needs to allow this for the attachment to be complete. For Gateways, that means the Gateway needs to allow attachment from Routes of this kind and namespace. For Services, that means the Service must either be in the same namespace for a \"producer\" route, or the mesh implementation must support and allow \"consumer\" routes for the referenced Service. ReferenceGrant is not applicable for governing ParentRefs to Services - it is not possible to create a \"producer\" route for a Service in a different namespace from the Route. \n There are two kinds of parent resources with \"Core\" support: \n * Gateway (Gateway conformance profile) * Service (Mesh conformance profile, experimental, ClusterIP Services only) This API may be extended in the future to support additional kinds of parent resources. \n ParentRefs must be _distinct_. This means either that: \n * They select different objects. If this is the case, then parentRef entries are distinct. In terms of fields, this means that the multi-part key defined by `group`, `kind`, `namespace`, and `name` must be unique across all parentRef entries in the Route. * They do not select different objects, but for each optional field used, each ParentRef that selects the same object must set the same set of optional fields to different values. If one ParentRef sets a combination of optional fields, all must set the same combination. \n Some examples: \n * If one ParentRef sets `sectionName`, all ParentRefs referencing the same object must also set `sectionName`. * If one ParentRef sets `port`, all ParentRefs referencing the same object must also set `port`. * If one ParentRef sets `sectionName` and `port`, all ParentRefs referencing the same object must also set `sectionName` and `port`. \n It is possible to separately reference multiple distinct objects that may be collapsed by an implementation. For example, some implementations may choose to merge compatible Gateway Listeners together. If that is the case, the list of routes attached to those resources should also be merged. \n Note that for ParentRefs that cross namespace boundaries, there are specific rules. Cross-namespace references are only valid if they are explicitly allowed by something in the namespace they are referring to. For example, Gateway has the AllowedRoutes field, and ReferenceGrant provides a generic way to enable other kinds of cross-namespace reference. \n ParentRefs from a Route to a Service in the same namespace are \"producer\" routes, which apply default routing rules to inbound connections from any namespace to the Service. \n ParentRefs from a Route to a Service in a different namespace are \"consumer\" routes, and these routing rules are only applied to outbound connections originating from the same namespace as the Route, for which the intended destination of the connections are a Service targeted as a ParentRef of the Route. \n "
items:
description: "ParentRef identifies an API object (usually a Gateway)
that can be considered a parent of this resource (usually a route).
The only kind of parent resource with \"Core\" support is Gateway.
This API may be extended in the future to support additional kinds
of parent resources, such as HTTPRoute. \n The API object must
be valid in the cluster; the Group and Kind must be registered
in the cluster for this reference to be valid. \n References to
objects with invalid Group and Kind are not valid, and must be
rejected by the implementation, with appropriate Conditions set
on the containing object."
description: "ParentReference identifies an API object (usually a Gateway) that can be considered a parent of this resource (usually a route). There are two kinds of parent resources with \"Core\" support: \n * Gateway (Gateway conformance profile) * Service (Mesh conformance profile, experimental, ClusterIP Services only) \n This API may be extended in the future to support additional kinds of parent resources. \n The API object must be valid in the cluster; the Group and Kind must be registered in the cluster for this reference to be valid."
properties:
group:
default: gateway.networking.k8s.io
description: "Group is the group of the referent. \n Support:
Core"
description: "Group is the group of the referent. When unspecified, \"gateway.networking.k8s.io\" is inferred. To set the core API group (such as for a \"Service\" kind referent), Group must be explicitly set to \"\" (empty string). \n Support: Core"
maxLength: 253
pattern: ^$|^[a-z0-9]([-a-z0-9]*[a-z0-9])?(\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*$
type: string
kind:
default: Gateway
description: "Kind is kind of the referent. \n Support: Core
(Gateway) Support: Custom (Other Resources)"
description: "Kind is kind of the referent. \n There are two kinds of parent resources with \"Core\" support: \n * Gateway (Gateway conformance profile) * Service (Mesh conformance profile, experimental, ClusterIP Services only) \n Support for other resources is Implementation-Specific."
maxLength: 63
minLength: 1
pattern: ^[a-zA-Z]([-a-zA-Z0-9]*[a-zA-Z0-9])?$
type: string
name:
description: "Name is the name of the referent. \n Support:
Core"
description: "Name is the name of the referent. \n Support: Core"
maxLength: 253
minLength: 1
type: string
namespace:
description: "Namespace is the namespace of the referent. When
unspecified (or empty string), this refers to the local namespace
of the Route. \n Support: Core"
description: "Namespace is the namespace of the referent. When unspecified, this refers to the local namespace of the Route. \n Note that there are specific rules for ParentRefs which cross namespace boundaries. Cross-namespace references are only valid if they are explicitly allowed by something in the namespace they are referring to. For example: Gateway has the AllowedRoutes field, and ReferenceGrant provides a generic way to enable any other kind of cross-namespace reference. \n ParentRefs from a Route to a Service in the same namespace are \"producer\" routes, which apply default routing rules to inbound connections from any namespace to the Service. \n ParentRefs from a Route to a Service in a different namespace are \"consumer\" routes, and these routing rules are only applied to outbound connections originating from the same namespace as the Route, for which the intended destination of the connections are a Service targeted as a ParentRef of the Route. \n Support: Core"
maxLength: 63
minLength: 1
pattern: ^[a-z0-9]([-a-z0-9]*[a-z0-9])?$
type: string
port:
description: "Port is the network port this Route targets. It can be interpreted differently based on the type of parent resource. \n When the parent resource is a Gateway, this targets all listeners listening on the specified port that also support this kind of Route(and select this Route). It's not recommended to set `Port` unless the networking behaviors specified in a Route must apply to a specific port as opposed to a listener(s) whose port(s) may be changed. When both Port and SectionName are specified, the name and port of the selected listener must match both specified values. \n When the parent resource is a Service, this targets a specific port in the Service spec. When both Port (experimental) and SectionName are specified, the name and port of the selected port must match both specified values. \n Implementations MAY choose to support other parent resources. Implementations supporting other types of parent resources MUST clearly document how/if Port is interpreted. \n For the purpose of status, an attachment is considered successful as long as the parent resource accepts it partially. For example, Gateway listeners can restrict which Routes can attach to them by Route kind, namespace, or hostname. If 1 of 2 Gateway listeners accept attachment from the referencing Route, the Route MUST be considered successfully attached. If no Gateway listeners accept attachment from this Route, the Route MUST be considered detached from the Gateway. \n Support: Extended \n "
format: int32
maximum: 65535
minimum: 1
type: integer
sectionName:
description: "SectionName is the name of a section within the
target resource. In the following resources, SectionName is
interpreted as the following: \n * Gateway: Listener Name
\n Implementations MAY choose to support attaching Routes
to other resources. If that is the case, they MUST clearly
document how SectionName is interpreted. \n When unspecified
(empty string), this will reference the entire resource. For
the purpose of status, an attachment is considered successful
if at least one section in the parent resource accepts it.
For example, Gateway listeners can restrict which Routes can
attach to them by Route kind, namespace, or hostname. If 1
of 2 Gateway listeners accept attachment from the referencing
Route, the Route MUST be considered successfully attached.
If no Gateway listeners accept attachment from this Route,
the Route MUST be considered detached from the Gateway. \n
Support: Core"
description: "SectionName is the name of a section within the target resource. In the following resources, SectionName is interpreted as the following: \n * Gateway: Listener Name. When both Port (experimental) and SectionName are specified, the name and port of the selected listener must match both specified values. * Service: Port Name. When both Port (experimental) and SectionName are specified, the name and port of the selected listener must match both specified values. Note that attaching Routes to Services as Parents is part of experimental Mesh support and is not supported for any other purpose. \n Implementations MAY choose to support attaching Routes to other resources. If that is the case, they MUST clearly document how SectionName is interpreted. \n When unspecified (empty string), this will reference the entire resource. For the purpose of status, an attachment is considered successful if at least one section in the parent resource accepts it. For example, Gateway listeners can restrict which Routes can attach to them by Route kind, namespace, or hostname. If 1 of 2 Gateway listeners accept attachment from the referencing Route, the Route MUST be considered successfully attached. If no Gateway listeners accept attachment from this Route, the Route MUST be considered detached from the Gateway. \n Support: Core"
maxLength: 253
minLength: 1
pattern: ^[a-z0-9]([-a-z0-9]*[a-z0-9])?(\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*$
@ -173,44 +97,30 @@ spec:
type: object
maxItems: 32
type: array
x-kubernetes-validations:
- message: sectionName or port must be specified when parentRefs includes 2 or more references to the same parent
rule: 'self.all(p1, self.all(p2, p1.group == p2.group && p1.kind == p2.kind && p1.name == p2.name && (((!has(p1.__namespace__) || p1.__namespace__ == '''') && (!has(p2.__namespace__) || p2.__namespace__ == '''')) || (has(p1.__namespace__) && has(p2.__namespace__) && p1.__namespace__ == p2.__namespace__)) ? ((!has(p1.sectionName) || p1.sectionName == '''') == (!has(p2.sectionName) || p2.sectionName == '''') && (!has(p1.port) || p1.port == 0) == (!has(p2.port) || p2.port == 0)): true))'
- message: sectionName or port must be unique when parentRefs includes 2 or more references to the same parent
rule: self.all(p1, self.exists_one(p2, p1.group == p2.group && p1.kind == p2.kind && p1.name == p2.name && (((!has(p1.__namespace__) || p1.__namespace__ == '') && (!has(p2.__namespace__) || p2.__namespace__ == '')) || (has(p1.__namespace__) && has(p2.__namespace__) && p1.__namespace__ == p2.__namespace__ )) && (((!has(p1.sectionName) || p1.sectionName == '') && (!has(p2.sectionName) || p2.sectionName == '')) || ( has(p1.sectionName) && has(p2.sectionName) && p1.sectionName == p2.sectionName)) && (((!has(p1.port) || p1.port == 0) && (!has(p2.port) || p2.port == 0)) || (has(p1.port) && has(p2.port) && p1.port == p2.port))))
rules:
description: Rules are a list of TLS matchers and actions.
items:
description: TLSRouteRule is the configuration for a given rule.
properties:
backendRefs:
description: "BackendRefs defines the backend(s) where matching
requests should be sent. If unspecified or invalid (refers
to a non-existent resource or a Service with no endpoints),
the rule performs no forwarding; if no filters are specified
that would result in a response being sent, the underlying
implementation must actively reject request attempts to this
backend, by rejecting the connection or returning a 503 status
code. Request rejections must respect weight; if an invalid
backend is requested to have 80% of requests, then 80% of
requests must be rejected instead. \n Support: Core for Kubernetes
Service Support: Custom for any other resource \n Support
for weight: Extended"
description: "BackendRefs defines the backend(s) where matching requests should be sent. If unspecified or invalid (refers to a non-existent resource or a Service with no endpoints), the rule performs no forwarding; if no filters are specified that would result in a response being sent, the underlying implementation must actively reject request attempts to this backend, by rejecting the connection or returning a 500 status code. Request rejections must respect weight; if an invalid backend is requested to have 80% of requests, then 80% of requests must be rejected instead. \n Support: Core for Kubernetes Service \n Support: Extended for Kubernetes ServiceImport \n Support: Implementation-specific for any other resource \n Support for weight: Extended"
items:
description: "BackendRef defines how a Route should forward
a request to a Kubernetes resource. \n Note that when a
namespace is specified, a ReferencePolicy object is required
in the referent namespace to allow that namespace's owner
to accept the reference. See the ReferencePolicy documentation
for details."
description: "BackendRef defines how a Route should forward a request to a Kubernetes resource. \n Note that when a namespace different than the local namespace is specified, a ReferenceGrant object is required in the referent namespace to allow that namespace's owner to accept the reference. See the ReferenceGrant documentation for details. \n <gateway:experimental:description> \n When the BackendRef points to a Kubernetes Service, implementations SHOULD honor the appProtocol field if it is set for the target Service Port. \n Implementations supporting appProtocol SHOULD recognize the Kubernetes Standard Application Protocols defined in KEP-3726. \n If a Service appProtocol isn't specified, an implementation MAY infer the backend protocol through its own means. Implementations MAY infer the protocol from the Route type referring to the backend Service. \n If a Route is not able to send traffic to the backend using the specified protocol then the backend is considered invalid. Implementations MUST set the \"ResolvedRefs\" condition to \"False\" with the \"UnsupportedProtocol\" reason. \n </gateway:experimental:description> \n Note that when the BackendTLSPolicy object is enabled by the implementation, there are some extra rules about validity to consider here. See the fields where this struct is used for more information about the exact behavior."
properties:
group:
default: ""
description: Group is the group of the referent. For example,
"networking.k8s.io". When unspecified (empty string),
core API group is inferred.
description: Group is the group of the referent. For example, "gateway.networking.k8s.io". When unspecified or empty string, core API group is inferred.
maxLength: 253
pattern: ^$|^[a-z0-9]([-a-z0-9]*[a-z0-9])?(\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*$
type: string
kind:
default: Service
description: Kind is kind of the referent. For example
"HTTPRoute" or "Service".
description: "Kind is the Kubernetes resource kind of the referent. For example \"Service\". \n Defaults to \"Service\" when not specified. \n ExternalName services can refer to CNAME DNS records that may live outside of the cluster and as such are difficult to reason about in terms of conformance. They also may not be safe to forward to (see CVE-2021-25740 for more information). Implementations SHOULD NOT support ExternalName Services. \n Support: Core (Services with a type other than ExternalName) \n Support: Implementation-specific (Services with type ExternalName)"
maxLength: 63
minLength: 1
pattern: ^[a-zA-Z]([-a-zA-Z0-9]*[a-zA-Z0-9])?$
@ -221,42 +131,20 @@ spec:
minLength: 1
type: string
namespace:
description: "Namespace is the namespace of the backend.
When unspecified, the local namespace is inferred. \n
Note that when a namespace is specified, a ReferencePolicy
object is required in the referent namespace to allow
that namespace's owner to accept the reference. See
the ReferencePolicy documentation for details. \n Support:
Core"
description: "Namespace is the namespace of the backend. When unspecified, the local namespace is inferred. \n Note that when a namespace different than the local namespace is specified, a ReferenceGrant object is required in the referent namespace to allow that namespace's owner to accept the reference. See the ReferenceGrant documentation for details. \n Support: Core"
maxLength: 63
minLength: 1
pattern: ^[a-z0-9]([-a-z0-9]*[a-z0-9])?$
type: string
port:
description: Port specifies the destination port number
to use for this resource. Port is required when the
referent is a Kubernetes Service. For other resources,
destination port might be derived from the referent
resource or this field.
description: Port specifies the destination port number to use for this resource. Port is required when the referent is a Kubernetes Service. In this case, the port number is the service port number, not the target port. For other resources, destination port might be derived from the referent resource or this field.
format: int32
maximum: 65535
minimum: 1
type: integer
weight:
default: 1
description: "Weight specifies the proportion of requests
forwarded to the referenced backend. This is computed
as weight/(sum of all weights in this BackendRefs list).
For non-zero values, there may be some epsilon from
the exact proportion defined here depending on the precision
an implementation supports. Weight is not a percentage
and the sum of weights does not need to equal 100. \n
If only one backend is specified and it has a weight
greater than 0, 100% of the traffic is forwarded to
that backend. If weight is set to 0, no traffic should
be forwarded for this entry. If unspecified, weight
defaults to 1. \n Support for this field varies based
on the context where used."
description: "Weight specifies the proportion of requests forwarded to the referenced backend. This is computed as weight/(sum of all weights in this BackendRefs list). For non-zero values, there may be some epsilon from the exact proportion defined here depending on the precision an implementation supports. Weight is not a percentage and the sum of weights does not need to equal 100. \n If only one backend is specified and it has a weight greater than 0, 100% of the traffic is forwarded to that backend. If weight is set to 0, no traffic should be forwarded for this entry. If unspecified, weight defaults to 1. \n Support for this field varies based on the context where used."
format: int32
maximum: 1000000
minimum: 0
@ -264,6 +152,9 @@ spec:
required:
- name
type: object
x-kubernetes-validations:
- message: Must have port for Service reference
rule: '(size(self.group) == 0 && self.kind == ''Service'') ? has(self.port) : true'
maxItems: 16
minItems: 1
type: array
@ -278,100 +169,43 @@ spec:
description: Status defines the current state of TLSRoute.
properties:
parents:
description: "Parents is a list of parent resources (usually Gateways)
that are associated with the route, and the status of the route
with respect to each parent. When this route attaches to a parent,
the controller that manages the parent must add an entry to this
list when the controller first sees the route and should update
the entry as appropriate when the route or gateway is modified.
\n Note that parent references that cannot be resolved by an implementation
of this API will not be added to this list. Implementations of this
API can only populate Route status for the Gateways/parent resources
they are responsible for. \n A maximum of 32 Gateways will be represented
in this list. An empty list means the route has not been attached
to any Gateway."
description: "Parents is a list of parent resources (usually Gateways) that are associated with the route, and the status of the route with respect to each parent. When this route attaches to a parent, the controller that manages the parent must add an entry to this list when the controller first sees the route and should update the entry as appropriate when the route or gateway is modified. \n Note that parent references that cannot be resolved by an implementation of this API will not be added to this list. Implementations of this API can only populate Route status for the Gateways/parent resources they are responsible for. \n A maximum of 32 Gateways will be represented in this list. An empty list means the route has not been attached to any Gateway."
items:
description: RouteParentStatus describes the status of a route with
respect to an associated Parent.
description: RouteParentStatus describes the status of a route with respect to an associated Parent.
properties:
conditions:
description: "Conditions describes the status of the route with
respect to the Gateway. Note that the route's availability
is also subject to the Gateway's own status conditions and
listener status. \n If the Route's ParentRef specifies an
existing Gateway that supports Routes of this kind AND that
Gateway's controller has sufficient access, then that Gateway's
controller MUST set the \"Accepted\" condition on the Route,
to indicate whether the route has been accepted or rejected
by the Gateway, and why. \n A Route MUST be considered \"Accepted\"
if at least one of the Route's rules is implemented by the
Gateway. \n There are a number of cases where the \"Accepted\"
condition may not be set due to lack of controller visibility,
that includes when: \n * The Route refers to a non-existent
parent. * The Route is of a type that the controller does
not support. * The Route is in a namespace the the controller
does not have access to."
description: "Conditions describes the status of the route with respect to the Gateway. Note that the route's availability is also subject to the Gateway's own status conditions and listener status. \n If the Route's ParentRef specifies an existing Gateway that supports Routes of this kind AND that Gateway's controller has sufficient access, then that Gateway's controller MUST set the \"Accepted\" condition on the Route, to indicate whether the route has been accepted or rejected by the Gateway, and why. \n A Route MUST be considered \"Accepted\" if at least one of the Route's rules is implemented by the Gateway. \n There are a number of cases where the \"Accepted\" condition may not be set due to lack of controller visibility, that includes when: \n * The Route refers to a non-existent parent. * The Route is of a type that the controller does not support. * The Route is in a namespace the controller does not have access to."
items:
description: "Condition contains details for one aspect of
the current state of this API Resource. --- This struct
is intended for direct use as an array at the field path
.status.conditions. For example, type FooStatus struct{
\ // Represents the observations of a foo's current state.
\ // Known .status.conditions.type are: \"Available\",
\"Progressing\", and \"Degraded\" // +patchMergeKey=type
\ // +patchStrategy=merge // +listType=map //
+listMapKey=type Conditions []metav1.Condition `json:\"conditions,omitempty\"
patchStrategy:\"merge\" patchMergeKey:\"type\" protobuf:\"bytes,1,rep,name=conditions\"`
\n // other fields }"
description: "Condition contains details for one aspect of the current state of this API Resource. --- This struct is intended for direct use as an array at the field path .status.conditions. For example, \n type FooStatus struct{ // Represents the observations of a foo's current state. // Known .status.conditions.type are: \"Available\", \"Progressing\", and \"Degraded\" // +patchMergeKey=type // +patchStrategy=merge // +listType=map // +listMapKey=type Conditions []metav1.Condition `json:\"conditions,omitempty\" patchStrategy:\"merge\" patchMergeKey:\"type\" protobuf:\"bytes,1,rep,name=conditions\"` \n // other fields }"
properties:
lastTransitionTime:
description: lastTransitionTime is the last time the condition
transitioned from one status to another. This should
be when the underlying condition changed. If that is
not known, then using the time when the API field changed
is acceptable.
description: lastTransitionTime is the last time the condition transitioned from one status to another. This should be when the underlying condition changed. If that is not known, then using the time when the API field changed is acceptable.
format: date-time
type: string
message:
description: message is a human readable message indicating
details about the transition. This may be an empty string.
description: message is a human readable message indicating details about the transition. This may be an empty string.
maxLength: 32768
type: string
observedGeneration:
description: observedGeneration represents the .metadata.generation
that the condition was set based upon. For instance,
if .metadata.generation is currently 12, but the .status.conditions[x].observedGeneration
is 9, the condition is out of date with respect to the
current state of the instance.
description: observedGeneration represents the .metadata.generation that the condition was set based upon. For instance, if .metadata.generation is currently 12, but the .status.conditions[x].observedGeneration is 9, the condition is out of date with respect to the current state of the instance.
format: int64
minimum: 0
type: integer
reason:
description: reason contains a programmatic identifier
indicating the reason for the condition's last transition.
Producers of specific condition types may define expected
values and meanings for this field, and whether the
values are considered a guaranteed API. The value should
be a CamelCase string. This field may not be empty.
description: reason contains a programmatic identifier indicating the reason for the condition's last transition. Producers of specific condition types may define expected values and meanings for this field, and whether the values are considered a guaranteed API. The value should be a CamelCase string. This field may not be empty.
maxLength: 1024
minLength: 1
pattern: ^[A-Za-z]([A-Za-z0-9_,:]*[A-Za-z0-9_])?$
type: string
status:
description: status of the condition, one of True, False,
Unknown.
description: status of the condition, one of True, False, Unknown.
enum:
- "True"
- "False"
- Unknown
type: string
type:
description: type of condition in CamelCase or in foo.example.com/CamelCase.
--- Many .condition.type values are consistent across
resources like Available, but because arbitrary conditions
can be useful (see .node.status.conditions), the ability
to deconflict is important. The regex it matches is
(dns1123SubdomainFmt/)?(qualifiedNameFmt)
description: type of condition in CamelCase or in foo.example.com/CamelCase. --- Many .condition.type values are consistent across resources like Available, but because arbitrary conditions can be useful (see .node.status.conditions), the ability to deconflict is important. The regex it matches is (dns1123SubdomainFmt/)?(qualifiedNameFmt)
maxLength: 316
pattern: ^([a-z0-9]([-a-z0-9]*[a-z0-9])?(\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*/)?(([A-Za-z0-9][-A-Za-z0-9_.]*)?[A-Za-z0-9])$
type: string
@ -389,66 +223,46 @@ spec:
- type
x-kubernetes-list-type: map
controllerName:
description: "ControllerName is a domain/path string that indicates
the name of the controller that wrote this status. This corresponds
with the controllerName field on GatewayClass. \n Example:
\"example.net/gateway-controller\". \n The format of this
field is DOMAIN \"/\" PATH, where DOMAIN and PATH are valid
Kubernetes names (https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names)."
description: "ControllerName is a domain/path string that indicates the name of the controller that wrote this status. This corresponds with the controllerName field on GatewayClass. \n Example: \"example.net/gateway-controller\". \n The format of this field is DOMAIN \"/\" PATH, where DOMAIN and PATH are valid Kubernetes names (https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names). \n Controllers MUST populate this field when writing status. Controllers should ensure that entries to status populated with their ControllerName are cleaned up when they are no longer necessary."
maxLength: 253
minLength: 1
pattern: ^[a-z0-9]([-a-z0-9]*[a-z0-9])?(\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*\/[A-Za-z0-9\/\-._~%!$&'()*+,;=:]+$
type: string
parentRef:
description: ParentRef corresponds with a ParentRef in the spec
that this RouteParentStatus struct describes the status of.
description: ParentRef corresponds with a ParentRef in the spec that this RouteParentStatus struct describes the status of.
properties:
group:
default: gateway.networking.k8s.io
description: "Group is the group of the referent. \n Support:
Core"
description: "Group is the group of the referent. When unspecified, \"gateway.networking.k8s.io\" is inferred. To set the core API group (such as for a \"Service\" kind referent), Group must be explicitly set to \"\" (empty string). \n Support: Core"
maxLength: 253
pattern: ^$|^[a-z0-9]([-a-z0-9]*[a-z0-9])?(\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*$
type: string
kind:
default: Gateway
description: "Kind is kind of the referent. \n Support:
Core (Gateway) Support: Custom (Other Resources)"
description: "Kind is kind of the referent. \n There are two kinds of parent resources with \"Core\" support: \n * Gateway (Gateway conformance profile) * Service (Mesh conformance profile, experimental, ClusterIP Services only) \n Support for other resources is Implementation-Specific."
maxLength: 63
minLength: 1
pattern: ^[a-zA-Z]([-a-zA-Z0-9]*[a-zA-Z0-9])?$
type: string
name:
description: "Name is the name of the referent. \n Support:
Core"
description: "Name is the name of the referent. \n Support: Core"
maxLength: 253
minLength: 1
type: string
namespace:
description: "Namespace is the namespace of the referent.
When unspecified (or empty string), this refers to the
local namespace of the Route. \n Support: Core"
description: "Namespace is the namespace of the referent. When unspecified, this refers to the local namespace of the Route. \n Note that there are specific rules for ParentRefs which cross namespace boundaries. Cross-namespace references are only valid if they are explicitly allowed by something in the namespace they are referring to. For example: Gateway has the AllowedRoutes field, and ReferenceGrant provides a generic way to enable any other kind of cross-namespace reference. \n ParentRefs from a Route to a Service in the same namespace are \"producer\" routes, which apply default routing rules to inbound connections from any namespace to the Service. \n ParentRefs from a Route to a Service in a different namespace are \"consumer\" routes, and these routing rules are only applied to outbound connections originating from the same namespace as the Route, for which the intended destination of the connections are a Service targeted as a ParentRef of the Route. \n Support: Core"
maxLength: 63
minLength: 1
pattern: ^[a-z0-9]([-a-z0-9]*[a-z0-9])?$
type: string
port:
description: "Port is the network port this Route targets. It can be interpreted differently based on the type of parent resource. \n When the parent resource is a Gateway, this targets all listeners listening on the specified port that also support this kind of Route(and select this Route). It's not recommended to set `Port` unless the networking behaviors specified in a Route must apply to a specific port as opposed to a listener(s) whose port(s) may be changed. When both Port and SectionName are specified, the name and port of the selected listener must match both specified values. \n When the parent resource is a Service, this targets a specific port in the Service spec. When both Port (experimental) and SectionName are specified, the name and port of the selected port must match both specified values. \n Implementations MAY choose to support other parent resources. Implementations supporting other types of parent resources MUST clearly document how/if Port is interpreted. \n For the purpose of status, an attachment is considered successful as long as the parent resource accepts it partially. For example, Gateway listeners can restrict which Routes can attach to them by Route kind, namespace, or hostname. If 1 of 2 Gateway listeners accept attachment from the referencing Route, the Route MUST be considered successfully attached. If no Gateway listeners accept attachment from this Route, the Route MUST be considered detached from the Gateway. \n Support: Extended \n "
format: int32
maximum: 65535
minimum: 1
type: integer
sectionName:
description: "SectionName is the name of a section within
the target resource. In the following resources, SectionName
is interpreted as the following: \n * Gateway: Listener
Name \n Implementations MAY choose to support attaching
Routes to other resources. If that is the case, they MUST
clearly document how SectionName is interpreted. \n When
unspecified (empty string), this will reference the entire
resource. For the purpose of status, an attachment is
considered successful if at least one section in the parent
resource accepts it. For example, Gateway listeners can
restrict which Routes can attach to them by Route kind,
namespace, or hostname. If 1 of 2 Gateway listeners accept
attachment from the referencing Route, the Route MUST
be considered successfully attached. If no Gateway listeners
accept attachment from this Route, the Route MUST be considered
detached from the Gateway. \n Support: Core"
description: "SectionName is the name of a section within the target resource. In the following resources, SectionName is interpreted as the following: \n * Gateway: Listener Name. When both Port (experimental) and SectionName are specified, the name and port of the selected listener must match both specified values. * Service: Port Name. When both Port (experimental) and SectionName are specified, the name and port of the selected listener must match both specified values. Note that attaching Routes to Services as Parents is part of experimental Mesh support and is not supported for any other purpose. \n Implementations MAY choose to support attaching Routes to other resources. If that is the case, they MUST clearly document how SectionName is interpreted. \n When unspecified (empty string), this will reference the entire resource. For the purpose of status, an attachment is considered successful if at least one section in the parent resource accepts it. For example, Gateway listeners can restrict which Routes can attach to them by Route kind, namespace, or hostname. If 1 of 2 Gateway listeners accept attachment from the referencing Route, the Route MUST be considered successfully attached. If no Gateway listeners accept attachment from this Route, the Route MUST be considered detached from the Gateway. \n Support: Core"
maxLength: 253
minLength: 1
pattern: ^[a-z0-9]([-a-z0-9]*[a-z0-9])?(\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*$
@ -476,5 +290,5 @@ status:
acceptedNames:
kind: ""
plural: ""
conditions: []
storedVersions: []
conditions: null
storedVersions: null

View file

@ -0,0 +1,284 @@
#
# config/crd/experimental/gateway.networking.k8s.io_udproutes.yaml
#
apiVersion: apiextensions.k8s.io/v1
kind: CustomResourceDefinition
metadata:
annotations:
api-approved.kubernetes.io: https://github.com/kubernetes-sigs/gateway-api/pull/2466
gateway.networking.k8s.io/bundle-version: v1.0.0
gateway.networking.k8s.io/channel: experimental
creationTimestamp: null
name: udproutes.gateway.networking.k8s.io
spec:
group: gateway.networking.k8s.io
names:
categories:
- gateway-api
kind: UDPRoute
listKind: UDPRouteList
plural: udproutes
singular: udproute
scope: Namespaced
versions:
- additionalPrinterColumns:
- jsonPath: .metadata.creationTimestamp
name: Age
type: date
name: v1alpha2
schema:
openAPIV3Schema:
description: UDPRoute provides a way to route UDP traffic. When combined with a Gateway listener, it can be used to forward traffic on the port specified by the listener to a set of backends specified by the UDPRoute.
properties:
apiVersion:
description: 'APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources'
type: string
kind:
description: 'Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds'
type: string
metadata:
type: object
spec:
description: Spec defines the desired state of UDPRoute.
properties:
parentRefs:
description: "ParentRefs references the resources (usually Gateways) that a Route wants to be attached to. Note that the referenced parent resource needs to allow this for the attachment to be complete. For Gateways, that means the Gateway needs to allow attachment from Routes of this kind and namespace. For Services, that means the Service must either be in the same namespace for a \"producer\" route, or the mesh implementation must support and allow \"consumer\" routes for the referenced Service. ReferenceGrant is not applicable for governing ParentRefs to Services - it is not possible to create a \"producer\" route for a Service in a different namespace from the Route. \n There are two kinds of parent resources with \"Core\" support: \n * Gateway (Gateway conformance profile) * Service (Mesh conformance profile, experimental, ClusterIP Services only) This API may be extended in the future to support additional kinds of parent resources. \n ParentRefs must be _distinct_. This means either that: \n * They select different objects. If this is the case, then parentRef entries are distinct. In terms of fields, this means that the multi-part key defined by `group`, `kind`, `namespace`, and `name` must be unique across all parentRef entries in the Route. * They do not select different objects, but for each optional field used, each ParentRef that selects the same object must set the same set of optional fields to different values. If one ParentRef sets a combination of optional fields, all must set the same combination. \n Some examples: \n * If one ParentRef sets `sectionName`, all ParentRefs referencing the same object must also set `sectionName`. * If one ParentRef sets `port`, all ParentRefs referencing the same object must also set `port`. * If one ParentRef sets `sectionName` and `port`, all ParentRefs referencing the same object must also set `sectionName` and `port`. \n It is possible to separately reference multiple distinct objects that may be collapsed by an implementation. For example, some implementations may choose to merge compatible Gateway Listeners together. If that is the case, the list of routes attached to those resources should also be merged. \n Note that for ParentRefs that cross namespace boundaries, there are specific rules. Cross-namespace references are only valid if they are explicitly allowed by something in the namespace they are referring to. For example, Gateway has the AllowedRoutes field, and ReferenceGrant provides a generic way to enable other kinds of cross-namespace reference. \n ParentRefs from a Route to a Service in the same namespace are \"producer\" routes, which apply default routing rules to inbound connections from any namespace to the Service. \n ParentRefs from a Route to a Service in a different namespace are \"consumer\" routes, and these routing rules are only applied to outbound connections originating from the same namespace as the Route, for which the intended destination of the connections are a Service targeted as a ParentRef of the Route. \n "
items:
description: "ParentReference identifies an API object (usually a Gateway) that can be considered a parent of this resource (usually a route). There are two kinds of parent resources with \"Core\" support: \n * Gateway (Gateway conformance profile) * Service (Mesh conformance profile, experimental, ClusterIP Services only) \n This API may be extended in the future to support additional kinds of parent resources. \n The API object must be valid in the cluster; the Group and Kind must be registered in the cluster for this reference to be valid."
properties:
group:
default: gateway.networking.k8s.io
description: "Group is the group of the referent. When unspecified, \"gateway.networking.k8s.io\" is inferred. To set the core API group (such as for a \"Service\" kind referent), Group must be explicitly set to \"\" (empty string). \n Support: Core"
maxLength: 253
pattern: ^$|^[a-z0-9]([-a-z0-9]*[a-z0-9])?(\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*$
type: string
kind:
default: Gateway
description: "Kind is kind of the referent. \n There are two kinds of parent resources with \"Core\" support: \n * Gateway (Gateway conformance profile) * Service (Mesh conformance profile, experimental, ClusterIP Services only) \n Support for other resources is Implementation-Specific."
maxLength: 63
minLength: 1
pattern: ^[a-zA-Z]([-a-zA-Z0-9]*[a-zA-Z0-9])?$
type: string
name:
description: "Name is the name of the referent. \n Support: Core"
maxLength: 253
minLength: 1
type: string
namespace:
description: "Namespace is the namespace of the referent. When unspecified, this refers to the local namespace of the Route. \n Note that there are specific rules for ParentRefs which cross namespace boundaries. Cross-namespace references are only valid if they are explicitly allowed by something in the namespace they are referring to. For example: Gateway has the AllowedRoutes field, and ReferenceGrant provides a generic way to enable any other kind of cross-namespace reference. \n ParentRefs from a Route to a Service in the same namespace are \"producer\" routes, which apply default routing rules to inbound connections from any namespace to the Service. \n ParentRefs from a Route to a Service in a different namespace are \"consumer\" routes, and these routing rules are only applied to outbound connections originating from the same namespace as the Route, for which the intended destination of the connections are a Service targeted as a ParentRef of the Route. \n Support: Core"
maxLength: 63
minLength: 1
pattern: ^[a-z0-9]([-a-z0-9]*[a-z0-9])?$
type: string
port:
description: "Port is the network port this Route targets. It can be interpreted differently based on the type of parent resource. \n When the parent resource is a Gateway, this targets all listeners listening on the specified port that also support this kind of Route(and select this Route). It's not recommended to set `Port` unless the networking behaviors specified in a Route must apply to a specific port as opposed to a listener(s) whose port(s) may be changed. When both Port and SectionName are specified, the name and port of the selected listener must match both specified values. \n When the parent resource is a Service, this targets a specific port in the Service spec. When both Port (experimental) and SectionName are specified, the name and port of the selected port must match both specified values. \n Implementations MAY choose to support other parent resources. Implementations supporting other types of parent resources MUST clearly document how/if Port is interpreted. \n For the purpose of status, an attachment is considered successful as long as the parent resource accepts it partially. For example, Gateway listeners can restrict which Routes can attach to them by Route kind, namespace, or hostname. If 1 of 2 Gateway listeners accept attachment from the referencing Route, the Route MUST be considered successfully attached. If no Gateway listeners accept attachment from this Route, the Route MUST be considered detached from the Gateway. \n Support: Extended \n "
format: int32
maximum: 65535
minimum: 1
type: integer
sectionName:
description: "SectionName is the name of a section within the target resource. In the following resources, SectionName is interpreted as the following: \n * Gateway: Listener Name. When both Port (experimental) and SectionName are specified, the name and port of the selected listener must match both specified values. * Service: Port Name. When both Port (experimental) and SectionName are specified, the name and port of the selected listener must match both specified values. Note that attaching Routes to Services as Parents is part of experimental Mesh support and is not supported for any other purpose. \n Implementations MAY choose to support attaching Routes to other resources. If that is the case, they MUST clearly document how SectionName is interpreted. \n When unspecified (empty string), this will reference the entire resource. For the purpose of status, an attachment is considered successful if at least one section in the parent resource accepts it. For example, Gateway listeners can restrict which Routes can attach to them by Route kind, namespace, or hostname. If 1 of 2 Gateway listeners accept attachment from the referencing Route, the Route MUST be considered successfully attached. If no Gateway listeners accept attachment from this Route, the Route MUST be considered detached from the Gateway. \n Support: Core"
maxLength: 253
minLength: 1
pattern: ^[a-z0-9]([-a-z0-9]*[a-z0-9])?(\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*$
type: string
required:
- name
type: object
maxItems: 32
type: array
x-kubernetes-validations:
- message: sectionName or port must be specified when parentRefs includes 2 or more references to the same parent
rule: 'self.all(p1, self.all(p2, p1.group == p2.group && p1.kind == p2.kind && p1.name == p2.name && (((!has(p1.__namespace__) || p1.__namespace__ == '''') && (!has(p2.__namespace__) || p2.__namespace__ == '''')) || (has(p1.__namespace__) && has(p2.__namespace__) && p1.__namespace__ == p2.__namespace__)) ? ((!has(p1.sectionName) || p1.sectionName == '''') == (!has(p2.sectionName) || p2.sectionName == '''') && (!has(p1.port) || p1.port == 0) == (!has(p2.port) || p2.port == 0)): true))'
- message: sectionName or port must be unique when parentRefs includes 2 or more references to the same parent
rule: self.all(p1, self.exists_one(p2, p1.group == p2.group && p1.kind == p2.kind && p1.name == p2.name && (((!has(p1.__namespace__) || p1.__namespace__ == '') && (!has(p2.__namespace__) || p2.__namespace__ == '')) || (has(p1.__namespace__) && has(p2.__namespace__) && p1.__namespace__ == p2.__namespace__ )) && (((!has(p1.sectionName) || p1.sectionName == '') && (!has(p2.sectionName) || p2.sectionName == '')) || ( has(p1.sectionName) && has(p2.sectionName) && p1.sectionName == p2.sectionName)) && (((!has(p1.port) || p1.port == 0) && (!has(p2.port) || p2.port == 0)) || (has(p1.port) && has(p2.port) && p1.port == p2.port))))
rules:
description: Rules are a list of UDP matchers and actions.
items:
description: UDPRouteRule is the configuration for a given rule.
properties:
backendRefs:
description: "BackendRefs defines the backend(s) where matching requests should be sent. If unspecified or invalid (refers to a non-existent resource or a Service with no endpoints), the underlying implementation MUST actively reject connection attempts to this backend. Packet drops must respect weight; if an invalid backend is requested to have 80% of the packets, then 80% of packets must be dropped instead. \n Support: Core for Kubernetes Service \n Support: Extended for Kubernetes ServiceImport \n Support: Implementation-specific for any other resource \n Support for weight: Extended"
items:
description: "BackendRef defines how a Route should forward a request to a Kubernetes resource. \n Note that when a namespace different than the local namespace is specified, a ReferenceGrant object is required in the referent namespace to allow that namespace's owner to accept the reference. See the ReferenceGrant documentation for details. \n <gateway:experimental:description> \n When the BackendRef points to a Kubernetes Service, implementations SHOULD honor the appProtocol field if it is set for the target Service Port. \n Implementations supporting appProtocol SHOULD recognize the Kubernetes Standard Application Protocols defined in KEP-3726. \n If a Service appProtocol isn't specified, an implementation MAY infer the backend protocol through its own means. Implementations MAY infer the protocol from the Route type referring to the backend Service. \n If a Route is not able to send traffic to the backend using the specified protocol then the backend is considered invalid. Implementations MUST set the \"ResolvedRefs\" condition to \"False\" with the \"UnsupportedProtocol\" reason. \n </gateway:experimental:description> \n Note that when the BackendTLSPolicy object is enabled by the implementation, there are some extra rules about validity to consider here. See the fields where this struct is used for more information about the exact behavior."
properties:
group:
default: ""
description: Group is the group of the referent. For example, "gateway.networking.k8s.io". When unspecified or empty string, core API group is inferred.
maxLength: 253
pattern: ^$|^[a-z0-9]([-a-z0-9]*[a-z0-9])?(\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*$
type: string
kind:
default: Service
description: "Kind is the Kubernetes resource kind of the referent. For example \"Service\". \n Defaults to \"Service\" when not specified. \n ExternalName services can refer to CNAME DNS records that may live outside of the cluster and as such are difficult to reason about in terms of conformance. They also may not be safe to forward to (see CVE-2021-25740 for more information). Implementations SHOULD NOT support ExternalName Services. \n Support: Core (Services with a type other than ExternalName) \n Support: Implementation-specific (Services with type ExternalName)"
maxLength: 63
minLength: 1
pattern: ^[a-zA-Z]([-a-zA-Z0-9]*[a-zA-Z0-9])?$
type: string
name:
description: Name is the name of the referent.
maxLength: 253
minLength: 1
type: string
namespace:
description: "Namespace is the namespace of the backend. When unspecified, the local namespace is inferred. \n Note that when a namespace different than the local namespace is specified, a ReferenceGrant object is required in the referent namespace to allow that namespace's owner to accept the reference. See the ReferenceGrant documentation for details. \n Support: Core"
maxLength: 63
minLength: 1
pattern: ^[a-z0-9]([-a-z0-9]*[a-z0-9])?$
type: string
port:
description: Port specifies the destination port number to use for this resource. Port is required when the referent is a Kubernetes Service. In this case, the port number is the service port number, not the target port. For other resources, destination port might be derived from the referent resource or this field.
format: int32
maximum: 65535
minimum: 1
type: integer
weight:
default: 1
description: "Weight specifies the proportion of requests forwarded to the referenced backend. This is computed as weight/(sum of all weights in this BackendRefs list). For non-zero values, there may be some epsilon from the exact proportion defined here depending on the precision an implementation supports. Weight is not a percentage and the sum of weights does not need to equal 100. \n If only one backend is specified and it has a weight greater than 0, 100% of the traffic is forwarded to that backend. If weight is set to 0, no traffic should be forwarded for this entry. If unspecified, weight defaults to 1. \n Support for this field varies based on the context where used."
format: int32
maximum: 1000000
minimum: 0
type: integer
required:
- name
type: object
x-kubernetes-validations:
- message: Must have port for Service reference
rule: '(size(self.group) == 0 && self.kind == ''Service'') ? has(self.port) : true'
maxItems: 16
minItems: 1
type: array
type: object
maxItems: 16
minItems: 1
type: array
required:
- rules
type: object
status:
description: Status defines the current state of UDPRoute.
properties:
parents:
description: "Parents is a list of parent resources (usually Gateways) that are associated with the route, and the status of the route with respect to each parent. When this route attaches to a parent, the controller that manages the parent must add an entry to this list when the controller first sees the route and should update the entry as appropriate when the route or gateway is modified. \n Note that parent references that cannot be resolved by an implementation of this API will not be added to this list. Implementations of this API can only populate Route status for the Gateways/parent resources they are responsible for. \n A maximum of 32 Gateways will be represented in this list. An empty list means the route has not been attached to any Gateway."
items:
description: RouteParentStatus describes the status of a route with respect to an associated Parent.
properties:
conditions:
description: "Conditions describes the status of the route with respect to the Gateway. Note that the route's availability is also subject to the Gateway's own status conditions and listener status. \n If the Route's ParentRef specifies an existing Gateway that supports Routes of this kind AND that Gateway's controller has sufficient access, then that Gateway's controller MUST set the \"Accepted\" condition on the Route, to indicate whether the route has been accepted or rejected by the Gateway, and why. \n A Route MUST be considered \"Accepted\" if at least one of the Route's rules is implemented by the Gateway. \n There are a number of cases where the \"Accepted\" condition may not be set due to lack of controller visibility, that includes when: \n * The Route refers to a non-existent parent. * The Route is of a type that the controller does not support. * The Route is in a namespace the controller does not have access to."
items:
description: "Condition contains details for one aspect of the current state of this API Resource. --- This struct is intended for direct use as an array at the field path .status.conditions. For example, \n type FooStatus struct{ // Represents the observations of a foo's current state. // Known .status.conditions.type are: \"Available\", \"Progressing\", and \"Degraded\" // +patchMergeKey=type // +patchStrategy=merge // +listType=map // +listMapKey=type Conditions []metav1.Condition `json:\"conditions,omitempty\" patchStrategy:\"merge\" patchMergeKey:\"type\" protobuf:\"bytes,1,rep,name=conditions\"` \n // other fields }"
properties:
lastTransitionTime:
description: lastTransitionTime is the last time the condition transitioned from one status to another. This should be when the underlying condition changed. If that is not known, then using the time when the API field changed is acceptable.
format: date-time
type: string
message:
description: message is a human readable message indicating details about the transition. This may be an empty string.
maxLength: 32768
type: string
observedGeneration:
description: observedGeneration represents the .metadata.generation that the condition was set based upon. For instance, if .metadata.generation is currently 12, but the .status.conditions[x].observedGeneration is 9, the condition is out of date with respect to the current state of the instance.
format: int64
minimum: 0
type: integer
reason:
description: reason contains a programmatic identifier indicating the reason for the condition's last transition. Producers of specific condition types may define expected values and meanings for this field, and whether the values are considered a guaranteed API. The value should be a CamelCase string. This field may not be empty.
maxLength: 1024
minLength: 1
pattern: ^[A-Za-z]([A-Za-z0-9_,:]*[A-Za-z0-9_])?$
type: string
status:
description: status of the condition, one of True, False, Unknown.
enum:
- "True"
- "False"
- Unknown
type: string
type:
description: type of condition in CamelCase or in foo.example.com/CamelCase. --- Many .condition.type values are consistent across resources like Available, but because arbitrary conditions can be useful (see .node.status.conditions), the ability to deconflict is important. The regex it matches is (dns1123SubdomainFmt/)?(qualifiedNameFmt)
maxLength: 316
pattern: ^([a-z0-9]([-a-z0-9]*[a-z0-9])?(\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*/)?(([A-Za-z0-9][-A-Za-z0-9_.]*)?[A-Za-z0-9])$
type: string
required:
- lastTransitionTime
- message
- reason
- status
- type
type: object
maxItems: 8
minItems: 1
type: array
x-kubernetes-list-map-keys:
- type
x-kubernetes-list-type: map
controllerName:
description: "ControllerName is a domain/path string that indicates the name of the controller that wrote this status. This corresponds with the controllerName field on GatewayClass. \n Example: \"example.net/gateway-controller\". \n The format of this field is DOMAIN \"/\" PATH, where DOMAIN and PATH are valid Kubernetes names (https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names). \n Controllers MUST populate this field when writing status. Controllers should ensure that entries to status populated with their ControllerName are cleaned up when they are no longer necessary."
maxLength: 253
minLength: 1
pattern: ^[a-z0-9]([-a-z0-9]*[a-z0-9])?(\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*\/[A-Za-z0-9\/\-._~%!$&'()*+,;=:]+$
type: string
parentRef:
description: ParentRef corresponds with a ParentRef in the spec that this RouteParentStatus struct describes the status of.
properties:
group:
default: gateway.networking.k8s.io
description: "Group is the group of the referent. When unspecified, \"gateway.networking.k8s.io\" is inferred. To set the core API group (such as for a \"Service\" kind referent), Group must be explicitly set to \"\" (empty string). \n Support: Core"
maxLength: 253
pattern: ^$|^[a-z0-9]([-a-z0-9]*[a-z0-9])?(\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*$
type: string
kind:
default: Gateway
description: "Kind is kind of the referent. \n There are two kinds of parent resources with \"Core\" support: \n * Gateway (Gateway conformance profile) * Service (Mesh conformance profile, experimental, ClusterIP Services only) \n Support for other resources is Implementation-Specific."
maxLength: 63
minLength: 1
pattern: ^[a-zA-Z]([-a-zA-Z0-9]*[a-zA-Z0-9])?$
type: string
name:
description: "Name is the name of the referent. \n Support: Core"
maxLength: 253
minLength: 1
type: string
namespace:
description: "Namespace is the namespace of the referent. When unspecified, this refers to the local namespace of the Route. \n Note that there are specific rules for ParentRefs which cross namespace boundaries. Cross-namespace references are only valid if they are explicitly allowed by something in the namespace they are referring to. For example: Gateway has the AllowedRoutes field, and ReferenceGrant provides a generic way to enable any other kind of cross-namespace reference. \n ParentRefs from a Route to a Service in the same namespace are \"producer\" routes, which apply default routing rules to inbound connections from any namespace to the Service. \n ParentRefs from a Route to a Service in a different namespace are \"consumer\" routes, and these routing rules are only applied to outbound connections originating from the same namespace as the Route, for which the intended destination of the connections are a Service targeted as a ParentRef of the Route. \n Support: Core"
maxLength: 63
minLength: 1
pattern: ^[a-z0-9]([-a-z0-9]*[a-z0-9])?$
type: string
port:
description: "Port is the network port this Route targets. It can be interpreted differently based on the type of parent resource. \n When the parent resource is a Gateway, this targets all listeners listening on the specified port that also support this kind of Route(and select this Route). It's not recommended to set `Port` unless the networking behaviors specified in a Route must apply to a specific port as opposed to a listener(s) whose port(s) may be changed. When both Port and SectionName are specified, the name and port of the selected listener must match both specified values. \n When the parent resource is a Service, this targets a specific port in the Service spec. When both Port (experimental) and SectionName are specified, the name and port of the selected port must match both specified values. \n Implementations MAY choose to support other parent resources. Implementations supporting other types of parent resources MUST clearly document how/if Port is interpreted. \n For the purpose of status, an attachment is considered successful as long as the parent resource accepts it partially. For example, Gateway listeners can restrict which Routes can attach to them by Route kind, namespace, or hostname. If 1 of 2 Gateway listeners accept attachment from the referencing Route, the Route MUST be considered successfully attached. If no Gateway listeners accept attachment from this Route, the Route MUST be considered detached from the Gateway. \n Support: Extended \n "
format: int32
maximum: 65535
minimum: 1
type: integer
sectionName:
description: "SectionName is the name of a section within the target resource. In the following resources, SectionName is interpreted as the following: \n * Gateway: Listener Name. When both Port (experimental) and SectionName are specified, the name and port of the selected listener must match both specified values. * Service: Port Name. When both Port (experimental) and SectionName are specified, the name and port of the selected listener must match both specified values. Note that attaching Routes to Services as Parents is part of experimental Mesh support and is not supported for any other purpose. \n Implementations MAY choose to support attaching Routes to other resources. If that is the case, they MUST clearly document how SectionName is interpreted. \n When unspecified (empty string), this will reference the entire resource. For the purpose of status, an attachment is considered successful if at least one section in the parent resource accepts it. For example, Gateway listeners can restrict which Routes can attach to them by Route kind, namespace, or hostname. If 1 of 2 Gateway listeners accept attachment from the referencing Route, the Route MUST be considered successfully attached. If no Gateway listeners accept attachment from this Route, the Route MUST be considered detached from the Gateway. \n Support: Core"
maxLength: 253
minLength: 1
pattern: ^[a-z0-9]([-a-z0-9]*[a-z0-9])?(\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*$
type: string
required:
- name
type: object
required:
- controllerName
- parentRef
type: object
maxItems: 32
type: array
required:
- parents
type: object
required:
- spec
type: object
served: true
storage: true
subresources:
status: {}
status:
acceptedNames:
kind: ""
plural: ""
conditions: null
storedVersions: null

View file

@ -1164,6 +1164,36 @@ spec:
description: 'IPAllowList holds the IP allowlist middleware configuration.
This middleware accepts / refuses requests based on the client IP.
More info: https://doc.traefik.io/traefik/v3.0/middlewares/http/ipallowlist/'
properties:
ipStrategy:
description: 'IPStrategy holds the IP strategy configuration used
by Traefik to determine the client IP. More info: https://doc.traefik.io/traefik/v3.0/middlewares/http/ipallowlist/#ipstrategy'
properties:
depth:
description: Depth tells Traefik to use the X-Forwarded-For
header and take the IP located at the depth position (starting
from the right).
type: integer
excludedIPs:
description: ExcludedIPs configures Traefik to scan the X-Forwarded-For
header and select the first IP not in the list.
items:
type: string
type: array
type: object
rejectStatusCode:
description: RejectStatusCode defines the HTTP status code used
for refused requests. If not set, the default is 403 (Forbidden).
type: integer
sourceRange:
description: SourceRange defines the set of allowed IPs (or ranges
of allowed IPs by using CIDR notation).
items:
type: string
type: array
type: object
ipWhiteList:
description: 'Deprecated: please use IPAllowList instead.'
properties:
ipStrategy:
description: 'IPStrategy holds the IP strategy configuration used
@ -1524,6 +1554,17 @@ spec:
type: string
type: array
type: object
ipWhiteList:
description: 'IPWhiteList defines the IPWhiteList middleware configuration.
Deprecated: please use IPAllowList instead.'
properties:
sourceRange:
description: SourceRange defines the allowed IPs (or ranges of
allowed IPs by using CIDR notation).
items:
type: string
type: array
type: object
type: object
required:
- metadata

View file

@ -1,5 +1,5 @@
---
apiVersion: gateway.networking.k8s.io/v1alpha2
apiVersion: gateway.networking.k8s.io/v1
kind: GatewayClass
metadata:
name: my-gateway-class
@ -7,7 +7,7 @@ spec:
controllerName: traefik.io/gateway-controller
---
apiVersion: gateway.networking.k8s.io/v1alpha2
apiVersion: gateway.networking.k8s.io/v1
kind: Gateway
metadata:
name: my-gateway
@ -44,7 +44,7 @@ spec:
name: mysecret
---
apiVersion: gateway.networking.k8s.io/v1alpha2
apiVersion: gateway.networking.k8s.io/v1
kind: HTTPRoute
metadata:
name: http-app

View file

@ -1,5 +1,5 @@
---
apiVersion: gateway.networking.k8s.io/v1alpha2
apiVersion: gateway.networking.k8s.io/v1
kind: GatewayClass
metadata:
name: my-gateway-class
@ -8,7 +8,7 @@ spec:
controllerName: traefik.io/gateway-controller
---
apiVersion: gateway.networking.k8s.io/v1alpha2
apiVersion: gateway.networking.k8s.io/v1
kind: Gateway
metadata:
name: my-gateway
@ -25,7 +25,7 @@ spec:
name: mysecret
---
apiVersion: gateway.networking.k8s.io/v1alpha2
apiVersion: gateway.networking.k8s.io/v1
kind: HTTPRoute
metadata:
name: http-app

View file

@ -11,11 +11,15 @@ Dynamic configuration with Kubernetes Gateway provider.
## Definitions
```yaml
--8<-- "content/reference/dynamic-configuration/gateway.networking.k8s.io_backendtlspolicies.yaml"
--8<-- "content/reference/dynamic-configuration/gateway.networking.k8s.io_gatewayclasses.yaml"
--8<-- "content/reference/dynamic-configuration/gateway.networking.k8s.io_gateways.yaml"
--8<-- "content/reference/dynamic-configuration/gateway.networking.k8s.io_grpcroutes.yaml"
--8<-- "content/reference/dynamic-configuration/gateway.networking.k8s.io_httproutes.yaml"
--8<-- "content/reference/dynamic-configuration/gateway.networking.k8s.io_referencegrants.yaml"
--8<-- "content/reference/dynamic-configuration/gateway.networking.k8s.io_tcproutes.yaml"
--8<-- "content/reference/dynamic-configuration/gateway.networking.k8s.io_tlsroutes.yaml"
--8<-- "content/reference/dynamic-configuration/gateway.networking.k8s.io_udproutes.yaml"
```
## Resources

View file

@ -78,63 +78,69 @@
| `traefik/http/middlewares/Middleware10/headers/stsIncludeSubdomains` | `true` |
| `traefik/http/middlewares/Middleware10/headers/stsPreload` | `true` |
| `traefik/http/middlewares/Middleware10/headers/stsSeconds` | `42` |
| `traefik/http/middlewares/Middleware11/ipAllowList/ipStrategy/depth` | `42` |
| `traefik/http/middlewares/Middleware11/ipAllowList/ipStrategy/excludedIPs/0` | `foobar` |
| `traefik/http/middlewares/Middleware11/ipAllowList/ipStrategy/excludedIPs/1` | `foobar` |
| `traefik/http/middlewares/Middleware11/ipAllowList/sourceRange/0` | `foobar` |
| `traefik/http/middlewares/Middleware11/ipAllowList/sourceRange/1` | `foobar` |
| `traefik/http/middlewares/Middleware12/inFlightReq/amount` | `42` |
| `traefik/http/middlewares/Middleware12/inFlightReq/sourceCriterion/ipStrategy/depth` | `42` |
| `traefik/http/middlewares/Middleware12/inFlightReq/sourceCriterion/ipStrategy/excludedIPs/0` | `foobar` |
| `traefik/http/middlewares/Middleware12/inFlightReq/sourceCriterion/ipStrategy/excludedIPs/1` | `foobar` |
| `traefik/http/middlewares/Middleware12/inFlightReq/sourceCriterion/requestHeaderName` | `foobar` |
| `traefik/http/middlewares/Middleware12/inFlightReq/sourceCriterion/requestHost` | `true` |
| `traefik/http/middlewares/Middleware13/passTLSClientCert/info/issuer/commonName` | `true` |
| `traefik/http/middlewares/Middleware13/passTLSClientCert/info/issuer/country` | `true` |
| `traefik/http/middlewares/Middleware13/passTLSClientCert/info/issuer/domainComponent` | `true` |
| `traefik/http/middlewares/Middleware13/passTLSClientCert/info/issuer/locality` | `true` |
| `traefik/http/middlewares/Middleware13/passTLSClientCert/info/issuer/organization` | `true` |
| `traefik/http/middlewares/Middleware13/passTLSClientCert/info/issuer/province` | `true` |
| `traefik/http/middlewares/Middleware13/passTLSClientCert/info/issuer/serialNumber` | `true` |
| `traefik/http/middlewares/Middleware13/passTLSClientCert/info/notAfter` | `true` |
| `traefik/http/middlewares/Middleware13/passTLSClientCert/info/notBefore` | `true` |
| `traefik/http/middlewares/Middleware13/passTLSClientCert/info/sans` | `true` |
| `traefik/http/middlewares/Middleware13/passTLSClientCert/info/serialNumber` | `true` |
| `traefik/http/middlewares/Middleware13/passTLSClientCert/info/subject/commonName` | `true` |
| `traefik/http/middlewares/Middleware13/passTLSClientCert/info/subject/country` | `true` |
| `traefik/http/middlewares/Middleware13/passTLSClientCert/info/subject/domainComponent` | `true` |
| `traefik/http/middlewares/Middleware13/passTLSClientCert/info/subject/locality` | `true` |
| `traefik/http/middlewares/Middleware13/passTLSClientCert/info/subject/organization` | `true` |
| `traefik/http/middlewares/Middleware13/passTLSClientCert/info/subject/organizationalUnit` | `true` |
| `traefik/http/middlewares/Middleware13/passTLSClientCert/info/subject/province` | `true` |
| `traefik/http/middlewares/Middleware13/passTLSClientCert/info/subject/serialNumber` | `true` |
| `traefik/http/middlewares/Middleware13/passTLSClientCert/pem` | `true` |
| `traefik/http/middlewares/Middleware14/plugin/PluginConf/foo` | `bar` |
| `traefik/http/middlewares/Middleware15/rateLimit/average` | `42` |
| `traefik/http/middlewares/Middleware15/rateLimit/burst` | `42` |
| `traefik/http/middlewares/Middleware15/rateLimit/period` | `42s` |
| `traefik/http/middlewares/Middleware15/rateLimit/sourceCriterion/ipStrategy/depth` | `42` |
| `traefik/http/middlewares/Middleware15/rateLimit/sourceCriterion/ipStrategy/excludedIPs/0` | `foobar` |
| `traefik/http/middlewares/Middleware15/rateLimit/sourceCriterion/ipStrategy/excludedIPs/1` | `foobar` |
| `traefik/http/middlewares/Middleware15/rateLimit/sourceCriterion/requestHeaderName` | `foobar` |
| `traefik/http/middlewares/Middleware15/rateLimit/sourceCriterion/requestHost` | `true` |
| `traefik/http/middlewares/Middleware16/redirectRegex/permanent` | `true` |
| `traefik/http/middlewares/Middleware16/redirectRegex/regex` | `foobar` |
| `traefik/http/middlewares/Middleware16/redirectRegex/replacement` | `foobar` |
| `traefik/http/middlewares/Middleware17/redirectScheme/permanent` | `true` |
| `traefik/http/middlewares/Middleware17/redirectScheme/port` | `foobar` |
| `traefik/http/middlewares/Middleware17/redirectScheme/scheme` | `foobar` |
| `traefik/http/middlewares/Middleware18/replacePath/path` | `foobar` |
| `traefik/http/middlewares/Middleware19/replacePathRegex/regex` | `foobar` |
| `traefik/http/middlewares/Middleware19/replacePathRegex/replacement` | `foobar` |
| `traefik/http/middlewares/Middleware20/retry/attempts` | `42` |
| `traefik/http/middlewares/Middleware20/retry/initialInterval` | `42s` |
| `traefik/http/middlewares/Middleware21/stripPrefix/prefixes/0` | `foobar` |
| `traefik/http/middlewares/Middleware21/stripPrefix/prefixes/1` | `foobar` |
| `traefik/http/middlewares/Middleware22/stripPrefixRegex/regex/0` | `foobar` |
| `traefik/http/middlewares/Middleware22/stripPrefixRegex/regex/1` | `foobar` |
| `traefik/http/middlewares/Middleware23/grpcWeb/allowOrigins/0` | `foobar` |
| `traefik/http/middlewares/Middleware23/grpcWeb/allowOrigins/1` | `foobar` |
| `traefik/http/middlewares/Middleware11/ipWhiteList/ipStrategy/depth` | `42` |
| `traefik/http/middlewares/Middleware11/ipWhiteList/ipStrategy/excludedIPs/0` | `foobar` |
| `traefik/http/middlewares/Middleware11/ipWhiteList/ipStrategy/excludedIPs/1` | `foobar` |
| `traefik/http/middlewares/Middleware11/ipWhiteList/sourceRange/0` | `foobar` |
| `traefik/http/middlewares/Middleware11/ipWhiteList/sourceRange/1` | `foobar` |
| `traefik/http/middlewares/Middleware12/ipAllowList/ipStrategy/depth` | `42` |
| `traefik/http/middlewares/Middleware12/ipAllowList/ipStrategy/excludedIPs/0` | `foobar` |
| `traefik/http/middlewares/Middleware12/ipAllowList/ipStrategy/excludedIPs/1` | `foobar` |
| `traefik/http/middlewares/Middleware12/ipAllowList/rejectStatusCode` | `404` |
| `traefik/http/middlewares/Middleware12/ipAllowList/sourceRange/0` | `foobar` |
| `traefik/http/middlewares/Middleware12/ipAllowList/sourceRange/1` | `foobar` |
| `traefik/http/middlewares/Middleware13/inFlightReq/amount` | `42` |
| `traefik/http/middlewares/Middleware13/inFlightReq/sourceCriterion/ipStrategy/depth` | `42` |
| `traefik/http/middlewares/Middleware13/inFlightReq/sourceCriterion/ipStrategy/excludedIPs/0` | `foobar` |
| `traefik/http/middlewares/Middleware13/inFlightReq/sourceCriterion/ipStrategy/excludedIPs/1` | `foobar` |
| `traefik/http/middlewares/Middleware13/inFlightReq/sourceCriterion/requestHeaderName` | `foobar` |
| `traefik/http/middlewares/Middleware13/inFlightReq/sourceCriterion/requestHost` | `true` |
| `traefik/http/middlewares/Middleware14/passTLSClientCert/info/issuer/commonName` | `true` |
| `traefik/http/middlewares/Middleware14/passTLSClientCert/info/issuer/country` | `true` |
| `traefik/http/middlewares/Middleware14/passTLSClientCert/info/issuer/domainComponent` | `true` |
| `traefik/http/middlewares/Middleware14/passTLSClientCert/info/issuer/locality` | `true` |
| `traefik/http/middlewares/Middleware14/passTLSClientCert/info/issuer/organization` | `true` |
| `traefik/http/middlewares/Middleware14/passTLSClientCert/info/issuer/province` | `true` |
| `traefik/http/middlewares/Middleware14/passTLSClientCert/info/issuer/serialNumber` | `true` |
| `traefik/http/middlewares/Middleware14/passTLSClientCert/info/notAfter` | `true` |
| `traefik/http/middlewares/Middleware14/passTLSClientCert/info/notBefore` | `true` |
| `traefik/http/middlewares/Middleware14/passTLSClientCert/info/sans` | `true` |
| `traefik/http/middlewares/Middleware14/passTLSClientCert/info/serialNumber` | `true` |
| `traefik/http/middlewares/Middleware14/passTLSClientCert/info/subject/commonName` | `true` |
| `traefik/http/middlewares/Middleware14/passTLSClientCert/info/subject/country` | `true` |
| `traefik/http/middlewares/Middleware14/passTLSClientCert/info/subject/domainComponent` | `true` |
| `traefik/http/middlewares/Middleware14/passTLSClientCert/info/subject/locality` | `true` |
| `traefik/http/middlewares/Middleware14/passTLSClientCert/info/subject/organization` | `true` |
| `traefik/http/middlewares/Middleware14/passTLSClientCert/info/subject/organizationalUnit` | `true` |
| `traefik/http/middlewares/Middleware14/passTLSClientCert/info/subject/province` | `true` |
| `traefik/http/middlewares/Middleware14/passTLSClientCert/info/subject/serialNumber` | `true` |
| `traefik/http/middlewares/Middleware14/passTLSClientCert/pem` | `true` |
| `traefik/http/middlewares/Middleware15/plugin/PluginConf/foo` | `bar` |
| `traefik/http/middlewares/Middleware16/rateLimit/average` | `42` |
| `traefik/http/middlewares/Middleware16/rateLimit/burst` | `42` |
| `traefik/http/middlewares/Middleware16/rateLimit/period` | `42s` |
| `traefik/http/middlewares/Middleware16/rateLimit/sourceCriterion/ipStrategy/depth` | `42` |
| `traefik/http/middlewares/Middleware16/rateLimit/sourceCriterion/ipStrategy/excludedIPs/0` | `foobar` |
| `traefik/http/middlewares/Middleware16/rateLimit/sourceCriterion/ipStrategy/excludedIPs/1` | `foobar` |
| `traefik/http/middlewares/Middleware16/rateLimit/sourceCriterion/requestHeaderName` | `foobar` |
| `traefik/http/middlewares/Middleware16/rateLimit/sourceCriterion/requestHost` | `true` |
| `traefik/http/middlewares/Middleware17/redirectRegex/permanent` | `true` |
| `traefik/http/middlewares/Middleware17/redirectRegex/regex` | `foobar` |
| `traefik/http/middlewares/Middleware17/redirectRegex/replacement` | `foobar` |
| `traefik/http/middlewares/Middleware18/redirectScheme/permanent` | `true` |
| `traefik/http/middlewares/Middleware18/redirectScheme/port` | `foobar` |
| `traefik/http/middlewares/Middleware18/redirectScheme/scheme` | `foobar` |
| `traefik/http/middlewares/Middleware19/replacePath/path` | `foobar` |
| `traefik/http/middlewares/Middleware20/replacePathRegex/regex` | `foobar` |
| `traefik/http/middlewares/Middleware20/replacePathRegex/replacement` | `foobar` |
| `traefik/http/middlewares/Middleware21/retry/attempts` | `42` |
| `traefik/http/middlewares/Middleware21/retry/initialInterval` | `42s` |
| `traefik/http/middlewares/Middleware22/stripPrefix/prefixes/0` | `foobar` |
| `traefik/http/middlewares/Middleware22/stripPrefix/prefixes/1` | `foobar` |
| `traefik/http/middlewares/Middleware23/stripPrefixRegex/regex/0` | `foobar` |
| `traefik/http/middlewares/Middleware23/stripPrefixRegex/regex/1` | `foobar` |
| `traefik/http/middlewares/Middleware24/grpcWeb/allowOrigins/0` | `foobar` |
| `traefik/http/middlewares/Middleware24/grpcWeb/allowOrigins/1` | `foobar` |
| `traefik/http/routers/Router0/entryPoints/0` | `foobar` |
| `traefik/http/routers/Router0/entryPoints/1` | `foobar` |
| `traefik/http/routers/Router0/middlewares/0` | `foobar` |

View file

@ -589,6 +589,36 @@ spec:
description: 'IPAllowList holds the IP allowlist middleware configuration.
This middleware accepts / refuses requests based on the client IP.
More info: https://doc.traefik.io/traefik/v3.0/middlewares/http/ipallowlist/'
properties:
ipStrategy:
description: 'IPStrategy holds the IP strategy configuration used
by Traefik to determine the client IP. More info: https://doc.traefik.io/traefik/v3.0/middlewares/http/ipallowlist/#ipstrategy'
properties:
depth:
description: Depth tells Traefik to use the X-Forwarded-For
header and take the IP located at the depth position (starting
from the right).
type: integer
excludedIPs:
description: ExcludedIPs configures Traefik to scan the X-Forwarded-For
header and select the first IP not in the list.
items:
type: string
type: array
type: object
rejectStatusCode:
description: RejectStatusCode defines the HTTP status code used
for refused requests. If not set, the default is 403 (Forbidden).
type: integer
sourceRange:
description: SourceRange defines the set of allowed IPs (or ranges
of allowed IPs by using CIDR notation).
items:
type: string
type: array
type: object
ipWhiteList:
description: 'Deprecated: please use IPAllowList instead.'
properties:
ipStrategy:
description: 'IPStrategy holds the IP strategy configuration used

View file

@ -55,6 +55,17 @@ spec:
type: string
type: array
type: object
ipWhiteList:
description: 'IPWhiteList defines the IPWhiteList middleware configuration.
Deprecated: please use IPAllowList instead.'
properties:
sourceRange:
description: SourceRange defines the allowed IPs (or ranges of
allowed IPs by using CIDR notation).
items:
type: string
type: array
type: object
type: object
required:
- metadata

View file

@ -177,6 +177,12 @@ Trust all. (Default: ```false```)
`--entrypoints.<name>.proxyprotocol.trustedips`:
Trust only selected IPs.
`--entrypoints.<name>.transport.keepalivemaxrequests`:
Maximum number of requests before closing a keep-alive connection. (Default: ```0```)
`--entrypoints.<name>.transport.keepalivemaxtime`:
Maximum duration before closing a keep-alive connection. (Default: ```0```)
`--entrypoints.<name>.transport.lifecycle.gracetimeout`:
Duration to give active requests a chance to finish before Traefik stops. (Default: ```10```)
@ -195,9 +201,6 @@ WriteTimeout is the maximum duration before timing out writes of the response. I
`--entrypoints.<name>.udp.timeout`:
Timeout defines how long to wait on an idle session before releasing the related resources. (Default: ```3```)
`--experimental.http3`:
Enable HTTP3. (Default: ```false```)
`--experimental.kubernetesgateway`:
Allow the Kubernetes gateway api provider usage. (Default: ```false```)
@ -217,7 +220,7 @@ plugin's version.
Periodically check if a new version has been released. (Default: ```true```)
`--global.sendanonymoususage`:
Periodically send anonymous usage statistics. If the option is not specified, it will be enabled by default. (Default: ```false```)
Periodically send anonymous usage statistics. If the option is not specified, it will be disabled by default. (Default: ```false```)
`--hostresolver`:
Enable CNAME Flattening. (Default: ```false```)
@ -688,7 +691,7 @@ Kubernetes namespaces.
Ingress refresh throttle duration (Default: ```0```)
`--providers.kubernetescrd.token`:
Kubernetes bearer token (not needed for in-cluster client).
Kubernetes bearer token (not needed for in-cluster client). It accepts either a token value or a file path to the token.
`--providers.kubernetesgateway`:
Enable Kubernetes gateway api provider with default settings. (Default: ```false```)
@ -709,7 +712,7 @@ Kubernetes namespaces.
Kubernetes refresh throttle duration (Default: ```0```)
`--providers.kubernetesgateway.token`:
Kubernetes bearer token (not needed for in-cluster client).
Kubernetes bearer token (not needed for in-cluster client). It accepts either a token value or a file path to the token.
`--providers.kubernetesingress`:
Enable Kubernetes backend with default settings. (Default: ```false```)
@ -751,7 +754,7 @@ Kubernetes namespaces.
Ingress refresh throttle duration (Default: ```0```)
`--providers.kubernetesingress.token`:
Kubernetes bearer token (not needed for in-cluster client).
Kubernetes bearer token (not needed for in-cluster client). It accepts either a token value or a file path to the token.
`--providers.nomad`:
Enable Nomad backend with default settings. (Default: ```false```)
@ -822,6 +825,27 @@ Password for authentication.
`--providers.redis.rootkey`:
Root key used for KV store. (Default: ```traefik```)
`--providers.redis.sentinel.latencystrategy`:
Defines whether to route commands to the closest master or replica nodes (mutually exclusive with RandomStrategy and ReplicaStrategy). (Default: ```false```)
`--providers.redis.sentinel.mastername`:
Name of the master.
`--providers.redis.sentinel.password`:
Password for Sentinel authentication.
`--providers.redis.sentinel.randomstrategy`:
Defines whether to route commands randomly to master or replica nodes (mutually exclusive with LatencyStrategy and ReplicaStrategy). (Default: ```false```)
`--providers.redis.sentinel.replicastrategy`:
Defines whether to route all commands to replica nodes (mutually exclusive with LatencyStrategy and RandomStrategy). (Default: ```false```)
`--providers.redis.sentinel.usedisconnectedreplicas`:
Use replicas disconnected with master when cannot get connected replicas. (Default: ```false```)
`--providers.redis.sentinel.username`:
Username for Sentinel authentication.
`--providers.redis.tls.ca`:
TLS CA
@ -963,170 +987,50 @@ Defines the allowed SPIFFE trust domain.
`--tracing`:
OpenTracing configuration. (Default: ```false```)
`--tracing.datadog`:
Settings for Datadog. (Default: ```false```)
`--tracing.globalattributes.<name>`:
Defines additional attributes (key:value) on all spans.
`--tracing.datadog.bagageprefixheadername`:
Sets the header name prefix used to store baggage items in a map.
`--tracing.datadog.debug`:
Enables Datadog debug. (Default: ```false```)
`--tracing.datadog.globaltags.<name>`:
Sets a list of key:value tags on all spans.
`--tracing.datadog.localagenthostport`:
Sets the Datadog Agent host:port. (Default: ```localhost:8126```)
`--tracing.datadog.localagentsocket`:
Sets the socket for the Datadog Agent.
`--tracing.datadog.parentidheadername`:
Sets the header name used to store the parent ID.
`--tracing.datadog.prioritysampling`:
Enables priority sampling. When using distributed tracing, this option must be enabled in order to get all the parts of a distributed trace sampled. (Default: ```false```)
`--tracing.datadog.samplingpriorityheadername`:
Sets the header name used to store the sampling priority.
`--tracing.datadog.traceidheadername`:
Sets the header name used to store the trace ID.
`--tracing.elastic`:
Settings for Elastic. (Default: ```false```)
`--tracing.elastic.secrettoken`:
Sets the token used to connect to Elastic APM Server.
`--tracing.elastic.serverurl`:
Sets the URL of the Elastic APM server.
`--tracing.elastic.serviceenvironment`:
Sets the name of the environment Traefik is deployed in, e.g. 'production' or 'staging'.
`--tracing.haystack`:
Settings for Haystack. (Default: ```false```)
`--tracing.haystack.baggageprefixheadername`:
Sets the header name prefix used to store baggage items in a map.
`--tracing.haystack.globaltag`:
Sets a key:value tag on all spans.
`--tracing.haystack.localagenthost`:
Sets the Haystack Agent host. (Default: ```127.0.0.1```)
`--tracing.haystack.localagentport`:
Sets the Haystack Agent port. (Default: ```35000```)
`--tracing.haystack.parentidheadername`:
Sets the header name used to store the parent ID.
`--tracing.haystack.spanidheadername`:
Sets the header name used to store the span ID.
`--tracing.haystack.traceidheadername`:
Sets the header name used to store the trace ID.
`--tracing.instana`:
Settings for Instana. (Default: ```false```)
`--tracing.instana.enableautoprofile`:
Enables automatic profiling for the Traefik process. (Default: ```false```)
`--tracing.instana.localagenthost`:
Sets the Instana Agent host.
`--tracing.instana.localagentport`:
Sets the Instana Agent port. (Default: ```42699```)
`--tracing.instana.loglevel`:
Sets the log level for the Instana tracer. ('error','warn','info','debug') (Default: ```info```)
`--tracing.jaeger`:
Settings for Jaeger. (Default: ```false```)
`--tracing.jaeger.collector.endpoint`:
Instructs reporter to send spans to jaeger-collector at this URL.
`--tracing.jaeger.collector.password`:
Password for basic http authentication when sending spans to jaeger-collector.
`--tracing.jaeger.collector.user`:
User for basic http authentication when sending spans to jaeger-collector.
`--tracing.jaeger.disableattemptreconnecting`:
Disables the periodic re-resolution of the agent's hostname and reconnection if there was a change. (Default: ```true```)
`--tracing.jaeger.gen128bit`:
Generates 128 bits span IDs. (Default: ```false```)
`--tracing.jaeger.localagenthostport`:
Sets the Jaeger Agent host:port. (Default: ```127.0.0.1:6831```)
`--tracing.jaeger.propagation`:
Sets the propagation format (jaeger/b3). (Default: ```jaeger```)
`--tracing.jaeger.samplingparam`:
Sets the sampling parameter. (Default: ```1.000000```)
`--tracing.jaeger.samplingserverurl`:
Sets the sampling server URL. (Default: ```http://localhost:5778/sampling```)
`--tracing.jaeger.samplingtype`:
Sets the sampling type. (Default: ```const```)
`--tracing.jaeger.tracecontextheadername`:
Sets the header name used to store the trace ID. (Default: ```uber-trace-id```)
`--tracing.opentelemetry`:
Settings for OpenTelemetry. (Default: ```false```)
`--tracing.opentelemetry.address`:
Sets the address (host:port) of the collector endpoint. (Default: ```localhost:4318```)
`--tracing.opentelemetry.grpc`:
gRPC specific configuration for the OpenTelemetry collector. (Default: ```true```)
`--tracing.opentelemetry.headers.<name>`:
`--tracing.headers.<name>`:
Defines additional headers to be sent with the payloads.
`--tracing.opentelemetry.insecure`:
`--tracing.otlp`:
Settings for OpenTelemetry. (Default: ```false```)
`--tracing.otlp.grpc.endpoint`:
Sets the gRPC endpoint (host:port) of the collector. (Default: ```localhost:4317```)
`--tracing.otlp.grpc.insecure`:
Disables client transport security for the exporter. (Default: ```false```)
`--tracing.opentelemetry.path`:
Sets the URL path of the collector endpoint.
`--tracing.opentelemetry.tls.ca`:
`--tracing.otlp.grpc.tls.ca`:
TLS CA
`--tracing.opentelemetry.tls.cert`:
`--tracing.otlp.grpc.tls.cert`:
TLS cert
`--tracing.opentelemetry.tls.insecureskipverify`:
`--tracing.otlp.grpc.tls.insecureskipverify`:
TLS insecure skip verify (Default: ```false```)
`--tracing.opentelemetry.tls.key`:
`--tracing.otlp.grpc.tls.key`:
TLS key
`--tracing.otlp.http.endpoint`:
Sets the HTTP endpoint (scheme://host:port/v1/traces) of the collector. (Default: ```localhost:4318```)
`--tracing.otlp.http.tls.ca`:
TLS CA
`--tracing.otlp.http.tls.cert`:
TLS cert
`--tracing.otlp.http.tls.insecureskipverify`:
TLS insecure skip verify (Default: ```false```)
`--tracing.otlp.http.tls.key`:
TLS key
`--tracing.samplerate`:
Sets the rate between 0.0 and 1.0 of requests to trace. (Default: ```1.000000```)
`--tracing.servicename`:
Set the name for this service. (Default: ```traefik```)
`--tracing.spannamelimit`:
Set the maximum character limit for Span names (default 0 = no limit). (Default: ```0```)
`--tracing.zipkin`:
Settings for Zipkin. (Default: ```false```)
`--tracing.zipkin.httpendpoint`:
Sets the HTTP Endpoint to report traces to. (Default: ```http://localhost:9411/api/v2/spans```)
`--tracing.zipkin.id128bit`:
Uses 128 bits root span IDs. (Default: ```true```)
`--tracing.zipkin.samespan`:
Uses SameSpan RPC style traces. (Default: ```false```)
`--tracing.zipkin.samplerate`:
Sets the rate between 0.0 and 1.0 of requests to trace. (Default: ```1.000000```)

View file

@ -177,6 +177,12 @@ Trust all. (Default: ```false```)
`TRAEFIK_ENTRYPOINTS_<NAME>_PROXYPROTOCOL_TRUSTEDIPS`:
Trust only selected IPs.
`TRAEFIK_ENTRYPOINTS_<NAME>_TRANSPORT_KEEPALIVEMAXREQUESTS`:
Maximum number of requests before closing a keep-alive connection. (Default: ```0```)
`TRAEFIK_ENTRYPOINTS_<NAME>_TRANSPORT_KEEPALIVEMAXTIME`:
Maximum duration before closing a keep-alive connection. (Default: ```0```)
`TRAEFIK_ENTRYPOINTS_<NAME>_TRANSPORT_LIFECYCLE_GRACETIMEOUT`:
Duration to give active requests a chance to finish before Traefik stops. (Default: ```10```)
@ -195,9 +201,6 @@ WriteTimeout is the maximum duration before timing out writes of the response. I
`TRAEFIK_ENTRYPOINTS_<NAME>_UDP_TIMEOUT`:
Timeout defines how long to wait on an idle session before releasing the related resources. (Default: ```3```)
`TRAEFIK_EXPERIMENTAL_HTTP3`:
Enable HTTP3. (Default: ```false```)
`TRAEFIK_EXPERIMENTAL_KUBERNETESGATEWAY`:
Allow the Kubernetes gateway api provider usage. (Default: ```false```)
@ -217,7 +220,7 @@ plugin's version.
Periodically check if a new version has been released. (Default: ```true```)
`TRAEFIK_GLOBAL_SENDANONYMOUSUSAGE`:
Periodically send anonymous usage statistics. If the option is not specified, it will be enabled by default. (Default: ```false```)
Periodically send anonymous usage statistics. If the option is not specified, it will be disabled by default. (Default: ```false```)
`TRAEFIK_HOSTRESOLVER`:
Enable CNAME Flattening. (Default: ```false```)
@ -688,7 +691,7 @@ Kubernetes namespaces.
Ingress refresh throttle duration (Default: ```0```)
`TRAEFIK_PROVIDERS_KUBERNETESCRD_TOKEN`:
Kubernetes bearer token (not needed for in-cluster client).
Kubernetes bearer token (not needed for in-cluster client). It accepts either a token value or a file path to the token.
`TRAEFIK_PROVIDERS_KUBERNETESGATEWAY`:
Enable Kubernetes gateway api provider with default settings. (Default: ```false```)
@ -709,7 +712,7 @@ Kubernetes namespaces.
Kubernetes refresh throttle duration (Default: ```0```)
`TRAEFIK_PROVIDERS_KUBERNETESGATEWAY_TOKEN`:
Kubernetes bearer token (not needed for in-cluster client).
Kubernetes bearer token (not needed for in-cluster client). It accepts either a token value or a file path to the token.
`TRAEFIK_PROVIDERS_KUBERNETESINGRESS`:
Enable Kubernetes backend with default settings. (Default: ```false```)
@ -751,7 +754,7 @@ Kubernetes namespaces.
Ingress refresh throttle duration (Default: ```0```)
`TRAEFIK_PROVIDERS_KUBERNETESINGRESS_TOKEN`:
Kubernetes bearer token (not needed for in-cluster client).
Kubernetes bearer token (not needed for in-cluster client). It accepts either a token value or a file path to the token.
`TRAEFIK_PROVIDERS_NOMAD`:
Enable Nomad backend with default settings. (Default: ```false```)
@ -822,6 +825,27 @@ Password for authentication.
`TRAEFIK_PROVIDERS_REDIS_ROOTKEY`:
Root key used for KV store. (Default: ```traefik```)
`TRAEFIK_PROVIDERS_REDIS_SENTINEL_LATENCYSTRATEGY`:
Defines whether to route commands to the closest master or replica nodes (mutually exclusive with RandomStrategy and ReplicaStrategy). (Default: ```false```)
`TRAEFIK_PROVIDERS_REDIS_SENTINEL_MASTERNAME`:
Name of the master.
`TRAEFIK_PROVIDERS_REDIS_SENTINEL_PASSWORD`:
Password for Sentinel authentication.
`TRAEFIK_PROVIDERS_REDIS_SENTINEL_RANDOMSTRATEGY`:
Defines whether to route commands randomly to master or replica nodes (mutually exclusive with LatencyStrategy and ReplicaStrategy). (Default: ```false```)
`TRAEFIK_PROVIDERS_REDIS_SENTINEL_REPLICASTRATEGY`:
Defines whether to route all commands to replica nodes (mutually exclusive with LatencyStrategy and RandomStrategy). (Default: ```false```)
`TRAEFIK_PROVIDERS_REDIS_SENTINEL_USEDISCONNECTEDREPLICAS`:
Use replicas disconnected with master when cannot get connected replicas. (Default: ```false```)
`TRAEFIK_PROVIDERS_REDIS_SENTINEL_USERNAME`:
Username for Sentinel authentication.
`TRAEFIK_PROVIDERS_REDIS_TLS_CA`:
TLS CA
@ -963,170 +987,50 @@ Defines the allowed SPIFFE trust domain.
`TRAEFIK_TRACING`:
OpenTracing configuration. (Default: ```false```)
`TRAEFIK_TRACING_DATADOG`:
Settings for Datadog. (Default: ```false```)
`TRAEFIK_TRACING_GLOBALATTRIBUTES_<NAME>`:
Defines additional attributes (key:value) on all spans.
`TRAEFIK_TRACING_DATADOG_BAGAGEPREFIXHEADERNAME`:
Sets the header name prefix used to store baggage items in a map.
`TRAEFIK_TRACING_DATADOG_DEBUG`:
Enables Datadog debug. (Default: ```false```)
`TRAEFIK_TRACING_DATADOG_GLOBALTAGS_<NAME>`:
Sets a list of key:value tags on all spans.
`TRAEFIK_TRACING_DATADOG_LOCALAGENTHOSTPORT`:
Sets the Datadog Agent host:port. (Default: ```localhost:8126```)
`TRAEFIK_TRACING_DATADOG_LOCALAGENTSOCKET`:
Sets the socket for the Datadog Agent.
`TRAEFIK_TRACING_DATADOG_PARENTIDHEADERNAME`:
Sets the header name used to store the parent ID.
`TRAEFIK_TRACING_DATADOG_PRIORITYSAMPLING`:
Enables priority sampling. When using distributed tracing, this option must be enabled in order to get all the parts of a distributed trace sampled. (Default: ```false```)
`TRAEFIK_TRACING_DATADOG_SAMPLINGPRIORITYHEADERNAME`:
Sets the header name used to store the sampling priority.
`TRAEFIK_TRACING_DATADOG_TRACEIDHEADERNAME`:
Sets the header name used to store the trace ID.
`TRAEFIK_TRACING_ELASTIC`:
Settings for Elastic. (Default: ```false```)
`TRAEFIK_TRACING_ELASTIC_SECRETTOKEN`:
Sets the token used to connect to Elastic APM Server.
`TRAEFIK_TRACING_ELASTIC_SERVERURL`:
Sets the URL of the Elastic APM server.
`TRAEFIK_TRACING_ELASTIC_SERVICEENVIRONMENT`:
Sets the name of the environment Traefik is deployed in, e.g. 'production' or 'staging'.
`TRAEFIK_TRACING_HAYSTACK`:
Settings for Haystack. (Default: ```false```)
`TRAEFIK_TRACING_HAYSTACK_BAGGAGEPREFIXHEADERNAME`:
Sets the header name prefix used to store baggage items in a map.
`TRAEFIK_TRACING_HAYSTACK_GLOBALTAG`:
Sets a key:value tag on all spans.
`TRAEFIK_TRACING_HAYSTACK_LOCALAGENTHOST`:
Sets the Haystack Agent host. (Default: ```127.0.0.1```)
`TRAEFIK_TRACING_HAYSTACK_LOCALAGENTPORT`:
Sets the Haystack Agent port. (Default: ```35000```)
`TRAEFIK_TRACING_HAYSTACK_PARENTIDHEADERNAME`:
Sets the header name used to store the parent ID.
`TRAEFIK_TRACING_HAYSTACK_SPANIDHEADERNAME`:
Sets the header name used to store the span ID.
`TRAEFIK_TRACING_HAYSTACK_TRACEIDHEADERNAME`:
Sets the header name used to store the trace ID.
`TRAEFIK_TRACING_INSTANA`:
Settings for Instana. (Default: ```false```)
`TRAEFIK_TRACING_INSTANA_ENABLEAUTOPROFILE`:
Enables automatic profiling for the Traefik process. (Default: ```false```)
`TRAEFIK_TRACING_INSTANA_LOCALAGENTHOST`:
Sets the Instana Agent host.
`TRAEFIK_TRACING_INSTANA_LOCALAGENTPORT`:
Sets the Instana Agent port. (Default: ```42699```)
`TRAEFIK_TRACING_INSTANA_LOGLEVEL`:
Sets the log level for the Instana tracer. ('error','warn','info','debug') (Default: ```info```)
`TRAEFIK_TRACING_JAEGER`:
Settings for Jaeger. (Default: ```false```)
`TRAEFIK_TRACING_JAEGER_COLLECTOR_ENDPOINT`:
Instructs reporter to send spans to jaeger-collector at this URL.
`TRAEFIK_TRACING_JAEGER_COLLECTOR_PASSWORD`:
Password for basic http authentication when sending spans to jaeger-collector.
`TRAEFIK_TRACING_JAEGER_COLLECTOR_USER`:
User for basic http authentication when sending spans to jaeger-collector.
`TRAEFIK_TRACING_JAEGER_DISABLEATTEMPTRECONNECTING`:
Disables the periodic re-resolution of the agent's hostname and reconnection if there was a change. (Default: ```true```)
`TRAEFIK_TRACING_JAEGER_GEN128BIT`:
Generates 128 bits span IDs. (Default: ```false```)
`TRAEFIK_TRACING_JAEGER_LOCALAGENTHOSTPORT`:
Sets the Jaeger Agent host:port. (Default: ```127.0.0.1:6831```)
`TRAEFIK_TRACING_JAEGER_PROPAGATION`:
Sets the propagation format (jaeger/b3). (Default: ```jaeger```)
`TRAEFIK_TRACING_JAEGER_SAMPLINGPARAM`:
Sets the sampling parameter. (Default: ```1.000000```)
`TRAEFIK_TRACING_JAEGER_SAMPLINGSERVERURL`:
Sets the sampling server URL. (Default: ```http://localhost:5778/sampling```)
`TRAEFIK_TRACING_JAEGER_SAMPLINGTYPE`:
Sets the sampling type. (Default: ```const```)
`TRAEFIK_TRACING_JAEGER_TRACECONTEXTHEADERNAME`:
Sets the header name used to store the trace ID. (Default: ```uber-trace-id```)
`TRAEFIK_TRACING_OPENTELEMETRY`:
Settings for OpenTelemetry. (Default: ```false```)
`TRAEFIK_TRACING_OPENTELEMETRY_ADDRESS`:
Sets the address (host:port) of the collector endpoint. (Default: ```localhost:4318```)
`TRAEFIK_TRACING_OPENTELEMETRY_GRPC`:
gRPC specific configuration for the OpenTelemetry collector. (Default: ```true```)
`TRAEFIK_TRACING_OPENTELEMETRY_HEADERS_<NAME>`:
`TRAEFIK_TRACING_HEADERS_<NAME>`:
Defines additional headers to be sent with the payloads.
`TRAEFIK_TRACING_OPENTELEMETRY_INSECURE`:
`TRAEFIK_TRACING_OTLP`:
Settings for OpenTelemetry. (Default: ```false```)
`TRAEFIK_TRACING_OTLP_GRPC_ENDPOINT`:
Sets the gRPC endpoint (host:port) of the collector. (Default: ```localhost:4317```)
`TRAEFIK_TRACING_OTLP_GRPC_INSECURE`:
Disables client transport security for the exporter. (Default: ```false```)
`TRAEFIK_TRACING_OPENTELEMETRY_PATH`:
Sets the URL path of the collector endpoint.
`TRAEFIK_TRACING_OPENTELEMETRY_TLS_CA`:
`TRAEFIK_TRACING_OTLP_GRPC_TLS_CA`:
TLS CA
`TRAEFIK_TRACING_OPENTELEMETRY_TLS_CERT`:
`TRAEFIK_TRACING_OTLP_GRPC_TLS_CERT`:
TLS cert
`TRAEFIK_TRACING_OPENTELEMETRY_TLS_INSECURESKIPVERIFY`:
`TRAEFIK_TRACING_OTLP_GRPC_TLS_INSECURESKIPVERIFY`:
TLS insecure skip verify (Default: ```false```)
`TRAEFIK_TRACING_OPENTELEMETRY_TLS_KEY`:
`TRAEFIK_TRACING_OTLP_GRPC_TLS_KEY`:
TLS key
`TRAEFIK_TRACING_OTLP_HTTP_ENDPOINT`:
Sets the HTTP endpoint (scheme://host:port/v1/traces) of the collector. (Default: ```localhost:4318```)
`TRAEFIK_TRACING_OTLP_HTTP_TLS_CA`:
TLS CA
`TRAEFIK_TRACING_OTLP_HTTP_TLS_CERT`:
TLS cert
`TRAEFIK_TRACING_OTLP_HTTP_TLS_INSECURESKIPVERIFY`:
TLS insecure skip verify (Default: ```false```)
`TRAEFIK_TRACING_OTLP_HTTP_TLS_KEY`:
TLS key
`TRAEFIK_TRACING_SAMPLERATE`:
Sets the rate between 0.0 and 1.0 of requests to trace. (Default: ```1.000000```)
`TRAEFIK_TRACING_SERVICENAME`:
Set the name for this service. (Default: ```traefik```)
`TRAEFIK_TRACING_SPANNAMELIMIT`:
Set the maximum character limit for Span names (default 0 = no limit). (Default: ```0```)
`TRAEFIK_TRACING_ZIPKIN`:
Settings for Zipkin. (Default: ```false```)
`TRAEFIK_TRACING_ZIPKIN_HTTPENDPOINT`:
Sets the HTTP Endpoint to report traces to. (Default: ```http://localhost:9411/api/v2/spans```)
`TRAEFIK_TRACING_ZIPKIN_ID128BIT`:
Uses 128 bits root span IDs. (Default: ```true```)
`TRAEFIK_TRACING_ZIPKIN_SAMESPAN`:
Uses SameSpan RPC style traces. (Default: ```false```)
`TRAEFIK_TRACING_ZIPKIN_SAMPLERATE`:
Sets the rate between 0.0 and 1.0 of requests to trace. (Default: ```1.000000```)

View file

@ -35,6 +35,8 @@
address = "foobar"
asDefault = true
[entryPoints.EntryPoint0.transport]
keepAliveMaxRequests = 42
keepAliveMaxTime = "42s"
[entryPoints.EntryPoint0.transport.lifeCycle]
requestAcceptGraceTimeout = "42s"
graceTimeOut = "42s"
@ -242,6 +244,14 @@
cert = "foobar"
key = "foobar"
insecureSkipVerify = true
[providers.redis.sentinel]
masterName = "foobar"
username = "foobar"
password = "foobar"
latencyStrategy = true
randomStrategy = true
replicaStrategy = true
useDisconnectedReplicas = true
[providers.http]
endpoint = "foobar"
pollInterval = "42s"
@ -357,68 +367,28 @@
[tracing]
serviceName = "foobar"
spanNameLimit = 42
[tracing.jaeger]
samplingServerURL = "foobar"
samplingType = "foobar"
samplingParam = 42.0
localAgentHostPort = "foobar"
gen128Bit = true
propagation = "foobar"
traceContextHeaderName = "foobar"
disableAttemptReconnecting = true
[tracing.jaeger.collector]
sampleRate = 42
[tracing.headers]
header1 = "foobar"
header2 = "foobar"
[tracing.globalAttributes]
attr1 = "foobar"
attr2 = "foobar"
[tracing.otlp.grpc]
endpoint = "foobar"
user = "foobar"
password = "foobar"
[tracing.zipkin]
httpEndpoint = "foobar"
sameSpan = true
id128Bit = true
sampleRate = 42.0
[tracing.datadog]
localAgentHostPort = "foobar"
localAgentSocket = "foobar"
[tracing.datadog.globalTags]
tag1 = "foobar"
tag2 = "foobar"
debug = true
prioritySampling = true
traceIDHeaderName = "foobar"
parentIDHeaderName = "foobar"
samplingPriorityHeaderName = "foobar"
bagagePrefixHeaderName = "foobar"
[tracing.instana]
localAgentHost = "foobar"
localAgentPort = 42
logLevel = "foobar"
enableAutoProfile = true
[tracing.haystack]
localAgentHost = "foobar"
localAgentPort = 42
globalTag = "foobar"
traceIDHeaderName = "foobar"
parentIDHeaderName = "foobar"
spanIDHeaderName = "foobar"
baggagePrefixHeaderName = "foobar"
[tracing.elastic]
serverURL = "foobar"
secretToken = "foobar"
serviceEnvironment = "foobar"
[tracing.openTelemetry]
address = "foobar"
insecure = true
path = "foobar"
[tracing.openTelemetry.headers]
name0 = "foobar"
name1 = "foobar"
[tracing.openTelemetry.tls]
[tracing.otlp.grpc.tls]
ca = "foobar"
cert = "foobar"
key = "foobar"
insecureSkipVerify = true
[tracing.otlp.http]
endpoint = "foobar"
[tracing.otlp.http.tls]
ca = "foobar"
caOptional = true
cert = "foobar"
key = "foobar"
insecureSkipVerify = true
[tracing.openTelemetry.grpc]
[hostResolver]
cnameFlattening = true
@ -449,7 +419,6 @@
[experimental]
kubernetesGateway = true
http3 = true
[experimental.plugins]
[experimental.plugins.Descriptor0]
moduleName = "foobar"

View file

@ -36,6 +36,8 @@ entryPoints:
address: foobar
asDefault: true
transport:
keepAliveMaxRequests: 42
keepAliveMaxTime: 42s
lifeCycle:
requestAcceptGraceTimeout: 42s
graceTimeOut: 42s
@ -271,6 +273,14 @@ providers:
cert: foobar
key: foobar
insecureSkipVerify: true
sentinel:
masterName: foobar
username: foobar
password: foobar
latencyStrategy: true
randomStrategy: true
replicaStrategy: true
useDisconnectedReplicas: true
http:
endpoint: foobar
pollInterval: 42s
@ -387,68 +397,29 @@ accessLog:
bufferingSize: 42
tracing:
serviceName: foobar
spanNameLimit: 42
jaeger:
samplingServerURL: foobar
samplingType: foobar
samplingParam: 42
localAgentHostPort: foobar
gen128Bit: true
propagation: foobar
traceContextHeaderName: foobar
disableAttemptReconnecting: true
collector:
endpoint: foobar
user: foobar
password: foobar
zipkin:
httpEndpoint: foobar
sameSpan: true
id128Bit: true
sampleRate: 42
datadog:
localAgentHostPort: foobar
localAgentSocket: foobar
globalTags:
tag1: foobar
tag2: foobar
debug: true
prioritySampling: true
traceIDHeaderName: foobar
parentIDHeaderName: foobar
samplingPriorityHeaderName: foobar
bagagePrefixHeaderName: foobar
instana:
localAgentHost: foobar
localAgentPort: 42
logLevel: foobar
enableAutoProfile: true
haystack:
localAgentHost: foobar
localAgentPort: 42
globalTag: foobar
traceIDHeaderName: foobar
parentIDHeaderName: foobar
spanIDHeaderName: foobar
baggagePrefixHeaderName: foobar
elastic:
serverURL: foobar
secretToken: foobar
serviceEnvironment: foobar
openTelemetry:
address: foobar
headers:
name0: foobar
name1: foobar
header1: foobar
header2: foobar
globalAttributes:
attr1: foobar
attr2: foobar
otlp:
grpc:
endpoint: foobar
insecure: true
path: foobar
tls:
ca: foobar
caOptional: true
cert: foobar
key: foobar
insecureSkipVerify: true
grpc: {}
http:
endpoint: foobar
tls:
ca: foobar
cert: foobar
key: foobar
insecureSkipVerify: true
hostResolver:
cnameFlattening: true
resolvConfig: foobar
@ -479,7 +450,6 @@ certificatesResolvers:
tailscale: {}
experimental:
kubernetesGateway: true
http3: true
plugins:
Descriptor0:
moduleName: foobar

View file

@ -623,17 +623,77 @@ Controls the behavior of Traefik during the shutdown phase.
--entryPoints.name.transport.lifeCycle.graceTimeOut=42
```
#### `keepAliveMaxRequests`
_Optional, Default=0_
The maximum number of requests Traefik can handle before sending a `Connection: Close` header to the client (for HTTP2, Traefik sends a GOAWAY). Zero means no limit.
```yaml tab="File (YAML)"
## Static configuration
entryPoints:
name:
address: ":8888"
transport:
keepAliveMaxRequests: 42
```
```toml tab="File (TOML)"
## Static configuration
[entryPoints]
[entryPoints.name]
address = ":8888"
[entryPoints.name.transport]
keepAliveMaxRequests = 42
```
```bash tab="CLI"
## Static configuration
--entryPoints.name.address=:8888
--entryPoints.name.transport.keepAliveRequests=42
```
#### `keepAliveMaxTime`
_Optional, Default=0s_
The maximum duration Traefik can handle requests before sending a `Connection: Close` header to the client (for HTTP2, Traefik sends a GOAWAY). Zero means no limit.
```yaml tab="File (YAML)"
## Static configuration
entryPoints:
name:
address: ":8888"
transport:
keepAliveMaxTime: 42s
```
```toml tab="File (TOML)"
## Static configuration
[entryPoints]
[entryPoints.name]
address = ":8888"
[entryPoints.name.transport]
keepAliveMaxTime = 42s
```
```bash tab="CLI"
## Static configuration
--entryPoints.name.address=:8888
--entryPoints.name.transport.keepAliveTime=42s
```
### ProxyProtocol
Traefik supports [ProxyProtocol](https://www.haproxy.org/download/2.0/doc/proxy-protocol.txt) version 1 and 2.
Traefik supports [PROXY protocol](https://www.haproxy.org/download/2.0/doc/proxy-protocol.txt) version 1 and 2.
If Proxy Protocol header parsing is enabled for the entry point, this entry point can accept connections with or without Proxy Protocol headers.
If PROXY protocol header parsing is enabled for the entry point, this entry point can accept connections with or without PROXY protocol headers.
If the Proxy Protocol header is passed, then the version is determined automatically.
If the PROXY protocol header is passed, then the version is determined automatically.
??? info "`proxyProtocol.trustedIPs`"
Enabling Proxy Protocol with Trusted IPs.
Enabling PROXY protocol with Trusted IPs.
```yaml tab="File (YAML)"
## Static configuration
@ -696,7 +756,7 @@ If the Proxy Protocol header is passed, then the version is determined automatic
!!! warning "Queuing Traefik behind Another Load Balancer"
When queuing Traefik behind another load-balancer, make sure to configure Proxy Protocol on both sides.
When queuing Traefik behind another load-balancer, make sure to configure PROXY protocol on both sides.
Not doing so could introduce a security risk in your system (enabling request forgery).
## HTTP Options

View file

@ -40,18 +40,18 @@ The Kubernetes Gateway API, The Experimental Way. {: .subtitle }
You can find an excerpt of the supported Kubernetes Gateway API resources in the table below:
| Kind | Purpose | Concept Behind |
|------------------------------------|---------------------------------------------------------------------------|---------------------------------------------------------------------------------|
| [GatewayClass](#kind-gatewayclass) | Defines a set of Gateways that share a common configuration and behaviour | [GatewayClass](https://gateway-api.sigs.k8s.io/v1alpha2/api-types/gatewayclass) |
| [Gateway](#kind-gateway) | Describes how traffic can be translated to Services within the cluster | [Gateway](https://gateway-api.sigs.k8s.io/v1alpha2/api-types/gateway) |
| [HTTPRoute](#kind-httproute) | HTTP rules for mapping requests from a Gateway to Kubernetes Services | [Route](https://gateway-api.sigs.k8s.io/v1alpha2/api-types/httproute) |
| [TCPRoute](#kind-tcproute) | Allows mapping TCP requests from a Gateway to Kubernetes Services | [Route](https://gateway-api.sigs.k8s.io/v1alpha2/guides/tcp/) |
| [TLSRoute](#kind-tlsroute) | Allows mapping TLS requests from a Gateway to Kubernetes Services | [Route](https://gateway-api.sigs.k8s.io/v1alpha2/guides/tls/) |
|------------------------------------|---------------------------------------------------------------------------|------------------------------------------------------------------------|
| [GatewayClass](#kind-gatewayclass) | Defines a set of Gateways that share a common configuration and behaviour | [GatewayClass](https://gateway-api.sigs.k8s.io/api-types/gatewayclass) |
| [Gateway](#kind-gateway) | Describes how traffic can be translated to Services within the cluster | [Gateway](https://gateway-api.sigs.k8s.io/api-types/gateway) |
| [HTTPRoute](#kind-httproute) | HTTP rules for mapping requests from a Gateway to Kubernetes Services | [Route](https://gateway-api.sigs.k8s.io/api-types/httproute) |
| [TCPRoute](#kind-tcproute) | Allows mapping TCP requests from a Gateway to Kubernetes Services | [Route](https://gateway-api.sigs.k8s.io/guides/tcp/) |
| [TLSRoute](#kind-tlsroute) | Allows mapping TLS requests from a Gateway to Kubernetes Services | [Route](https://gateway-api.sigs.k8s.io/guides/tls/) |
### Kind: `GatewayClass`
`GatewayClass` is cluster-scoped resource defined by the infrastructure provider. This resource represents a class of
Gateways that can be instantiated. More details on the
GatewayClass [official documentation](https://gateway-api.sigs.k8s.io/v1alpha2/api-types/gatewayclass/).
GatewayClass [official documentation](https://gateway-api.sigs.k8s.io/api-types/gatewayclass/).
The `GatewayClass` should be declared by the infrastructure provider, otherwise please register the `GatewayClass`
[definition](../../reference/dynamic-configuration/kubernetes-gateway.md#definitions) in the Kubernetes cluster before
@ -60,7 +60,7 @@ creating `GatewayClass` objects.
!!! info "Declaring GatewayClass"
```yaml
apiVersion: gateway.networking.k8s.io/v1alpha2
apiVersion: gateway.networking.k8s.io/v1
kind: GatewayClass
metadata:
name: my-gateway-class
@ -92,7 +92,7 @@ Depending on the Listener Protocol, different modes and Route types are supporte
!!! info "Declaring Gateway"
```yaml tab="HTTP Listener"
apiVersion: gateway.networking.k8s.io/v1alpha2
apiVersion: gateway.networking.k8s.io/v1
kind: Gateway
metadata:
name: my-http-gateway
@ -114,7 +114,7 @@ Depending on the Listener Protocol, different modes and Route types are supporte
```
```yaml tab="HTTPS Listener"
apiVersion: gateway.networking.k8s.io/v1alpha2
apiVersion: gateway.networking.k8s.io/v1
kind: Gateway
metadata:
name: my-https-gateway
@ -140,7 +140,7 @@ Depending on the Listener Protocol, different modes and Route types are supporte
```
```yaml tab="TCP Listener"
apiVersion: gateway.networking.k8s.io/v1alpha2
apiVersion: gateway.networking.k8s.io/v1
kind: Gateway
metadata:
name: my-tcp-gateway
@ -162,7 +162,7 @@ Depending on the Listener Protocol, different modes and Route types are supporte
```
```yaml tab="TLS Listener"
apiVersion: gateway.networking.k8s.io/v1alpha2
apiVersion: gateway.networking.k8s.io/v1
kind: Gateway
metadata:
name: my-tls-gateway
@ -213,7 +213,7 @@ Kubernetes cluster before creating `HTTPRoute` objects.
!!! info "Declaring HTTPRoute"
```yaml
apiVersion: gateway.networking.k8s.io/v1alpha2
apiVersion: gateway.networking.k8s.io/v1
kind: HTTPRoute
metadata:
name: http-app
@ -274,7 +274,7 @@ Kubernetes cluster before creating `TCPRoute` objects.
!!! info "Declaring TCPRoute"
```yaml
apiVersion: gateway.networking.k8s.io/v1alpha2
apiVersion: gateway.networking.k8s.io/v1
kind: TCPRoute
metadata:
name: tcp-app
@ -318,7 +318,7 @@ Kubernetes cluster before creating `TLSRoute` objects.
!!! info "Declaring TLSRoute"
```yaml
apiVersion: gateway.networking.k8s.io/v1alpha2
apiVersion: gateway.networking.k8s.io/v1
kind: TLSRoute
metadata:
name: tls-app

View file

@ -3,9 +3,9 @@ title: "Traefik Docker DNS Challenge Documentation"
description: "Learn how to create a certificate with the Let's Encrypt DNS challenge to use HTTPS on a Service exposed with Traefik Proxy. Read the tehnical documentation."
---
# Docker-compose with let's encrypt: DNS Challenge
# Docker-compose with Let's Encrypt: DNS Challenge
This guide aim to demonstrate how to create a certificate with the let's encrypt DNS challenge to use https on a simple service exposed with Traefik.
This guide aim to demonstrate how to create a certificate with the Let's Encrypt DNS challenge to use https on a simple service exposed with Traefik.
Please also read the [basic example](../basic-example) for details on how to expose such a service.
## Prerequisite
@ -52,7 +52,7 @@ For the DNS challenge, you'll need:
!!! Note
If you uncommented the `acme.caserver` line, you will get an SSL error, but if you display the certificate and see it was emitted by `Fake LE Intermediate X1` then it means all is good.
(It is the staging environment intermediate certificate used by let's encrypt).
(It is the staging environment intermediate certificate used by Let's Encrypt).
You can now safely comment the `acme.caserver` line, remove the `letsencrypt/acme.json` file and restart Traefik to issue a valid certificate.
## Explanation
@ -69,7 +69,7 @@ ports:
- "443:443"
```
- We configure the DNS let's encrypt challenge:
- We configure the DNS Let's Encrypt challenge:
```yaml
command:
@ -77,7 +77,7 @@ command:
- "--certificatesresolvers.myresolver.acme.dnschallenge=true"
# Tell which provider to use
- "--certificatesresolvers.myresolver.acme.dnschallenge.provider=ovh"
# The email to provide to let's encrypt
# The email to provide to Let's Encrypt
- "--certificatesresolvers.myresolver.acme.email=postmaster@example.com"
```
@ -175,7 +175,7 @@ services:
- "ovh_consumer_key"
```
- The environment variable within our `whoami` service are suffixed by `_FILE` which allow us to point to files containing the value, instead of exposing the value itself.
- The environment variable within our `traefik` service are suffixed by `_FILE` which allow us to point to files containing the value, instead of exposing the value itself.
The acme client will read the content of those file to get the required configuration values.
```yaml

View file

@ -3,9 +3,9 @@ title: "Traefik Docker HTTP Challenge Documentation"
description: "Learn how to create a certificate with the Let's Encrypt HTTP challenge to use HTTPS on a Service exposed with Traefik Proxy. Read the technical documentation."
---
# Docker-compose with let's encrypt : HTTP Challenge
# Docker-compose with Let's Encrypt : HTTP Challenge
This guide aim to demonstrate how to create a certificate with the let's encrypt HTTP challenge to use https on a simple service exposed with Traefik.
This guide aim to demonstrate how to create a certificate with the Let's Encrypt HTTP challenge to use https on a simple service exposed with Traefik.
Please also read the [basic example](../basic-example) for details on how to expose such a service.
## Prerequisite
@ -38,7 +38,7 @@ For the HTTP challenge you will need:
!!! Note
If you uncommented the `acme.caserver` line, you will get an SSL error, but if you display the certificate and see it was emitted by `Fake LE Intermediate X1` then it means all is good.
(It is the staging environment intermediate certificate used by let's encrypt).
(It is the staging environment intermediate certificate used by Let's Encrypt).
You can now safely comment the `acme.caserver` line, remove the `letsencrypt/acme.json` file and restart Traefik to issue a valid certificate.
## Explanation
@ -55,7 +55,7 @@ ports:
- "443:443"
```
- We configure the HTTPS let's encrypt challenge:
- We configure the HTTPS Let's Encrypt challenge:
```yaml
command:
@ -63,7 +63,7 @@ command:
- "--certificatesresolvers.myresolver.acme.httpchallenge=true"
# Tell it to use our predefined entrypoint named "web"
- "--certificatesresolvers.myresolver.acme.httpchallenge.entrypoint=web"
# The email to provide to let's encrypt
# The email to provide to Let's Encrypt
- "--certificatesresolvers.myresolver.acme.email=postmaster@example.com"
```

View file

@ -3,9 +3,9 @@ title: "Traefik Docker TLS Challenge Documentation"
description: "Learn how to create a certificate with the Let's Encrypt TLS challenge to use HTTPS on a service exposed with Traefik Proxy. Read the technical documentation."
---
# Docker-compose with let's encrypt: TLS Challenge
# Docker-compose with Let's Encrypt: TLS Challenge
This guide aim to demonstrate how to create a certificate with the let's encrypt TLS challenge to use https on a simple service exposed with Traefik.
This guide aim to demonstrate how to create a certificate with the Let's Encrypt TLS challenge to use https on a simple service exposed with Traefik.
Please also read the [basic example](../basic-example) for details on how to expose such a service.
## Prerequisite
@ -38,7 +38,7 @@ For the TLS challenge you will need:
!!! Note
If you uncommented the `acme.caserver` line, you will get an SSL error, but if you display the certificate and see it was emitted by `Fake LE Intermediate X1` then it means all is good.
(It is the staging environment intermediate certificate used by let's encrypt).
(It is the staging environment intermediate certificate used by Let's Encrypt).
You can now safely comment the `acme.caserver` line, remove the `letsencrypt/acme.json` file and restart Traefik to issue a valid certificate.
## Explanation
@ -55,7 +55,7 @@ ports:
- "443:443"
```
- We configure the Https let's encrypt challenge:
- We configure the TLS Let's Encrypt challenge:
```yaml
command:

View file

@ -1,16 +1,15 @@
---
title: "Traefik Docker Documentation"
description: "This guide covers a Docker Compose file exposing a service using the Docker provider in Traefik Proxy. Read the technical documentation."
description: "Learn how to use Docker Compose to expose a service with Traefik Proxy."
---
# Docker Compose example
In this section, we quickly go over a Docker Compose file exposing a service using the Docker provider.
This will also be used as a starting point for the other Docker Compose guides.
In this section, you will learn how to use [Docker Compose](https://docs.docker.com/compose/ "Link to Docker Compose") to expose a service using the Docker provider.
## Setup
- Edit a `docker-compose.yml` file with the following content:
Create a `docker-compose.yml` file with the following content:
```yaml
--8<-- "content/user-guides/docker-compose/basic-example/docker-compose.yml"
@ -45,10 +44,19 @@ This will also be used as a starting point for the other Docker Compose guides.
```
- Replace `whoami.localhost` by your **own domain** within the `traefik.http.routers.whoami.rule` label of the `whoami` service.
- Run `docker-compose up -d` within the folder where you created the previous file.
- Wait a bit and visit `http://your_own_domain` to confirm everything went fine.
You should see the output of the whoami service. Something similar to:
Replace `whoami.localhost` by your **own domain** within the `traefik.http.routers.whoami.rule` label of the `whoami` service.
Now run `docker-compose up -d` within the folder where you created the previous file.
This will start Docker Compose in background mode.
!!! info "This can take a moment"
Docker Compose will now create and start the services declared in the `docker-compose.yml`.
Wait a bit and visit `http://your_own_domain` to confirm everything went fine.
You should see the output of the whoami service.
It should be similar to the following example:
```text
Hostname: d7f919e54651
@ -69,9 +77,11 @@ This will also be used as a starting point for the other Docker Compose guides.
## Details
- As an example, we use [whoami](https://github.com/traefik/whoami "Link to the GitHub repo of whoami") (a tiny Go server that prints OS information and HTTP request to output) which was used to define our `simple-service` container.
Let's break it down and go through it, step-by-step.
- We define an entry point, along with the exposure of the matching port within Docker Compose, which allow us to "open and accept" HTTP traffic:
You use [whoami](https://github.com/traefik/whoami "Link to the GitHub repo of whoami"), a tiny Go server that prints OS information and HTTP request to output as service container.
Second, you define an entry point, along with the exposure of the matching port within Docker Compose, which allows to "open and accept" HTTP traffic:
```yaml
command:
@ -82,7 +92,7 @@ ports:
- "80:80"
```
- We expose the Traefik API to be able to check the configuration if needed:
Third, you expose the Traefik API to be able to check the configuration if needed:
```yaml
command:
@ -101,7 +111,7 @@ ports:
curl -s 127.0.0.1:8080/api/rawdata | jq .
```
- We allow Traefik to gather configuration from Docker:
Fourth, you allow Traefik to gather configuration from Docker:
```yaml
traefik:

View file

@ -27,7 +27,7 @@ theme:
prev: 'Previous'
next: 'Next'
copyright: 'Traefik Labs • Copyright &copy; 2016-2023'
copyright: 'Traefik Labs • Copyright &copy; 2016-2024'
extra_javascript:
- assets/js/hljs/highlight.pack.js # Download from https://highlightjs.org/download/ and enable YAML, TOML and Dockerfile
@ -125,7 +125,8 @@ nav:
- 'ForwardAuth': 'middlewares/http/forwardauth.md'
- 'GrpcWeb': 'middlewares/http/grpcweb.md'
- 'Headers': 'middlewares/http/headers.md'
- 'IpAllowList': 'middlewares/http/ipallowlist.md'
- 'IPWhiteList': 'middlewares/http/ipwhitelist.md'
- 'IPAllowList': 'middlewares/http/ipallowlist.md'
- 'InFlightReq': 'middlewares/http/inflightreq.md'
- 'PassTLSClientCert': 'middlewares/http/passtlsclientcert.md'
- 'RateLimit': 'middlewares/http/ratelimit.md'
@ -139,7 +140,8 @@ nav:
- 'TCP':
- 'Overview': 'middlewares/tcp/overview.md'
- 'InFlightConn': 'middlewares/tcp/inflightconn.md'
- 'IpAllowList': 'middlewares/tcp/ipallowlist.md'
- 'IPWhiteList': 'middlewares/tcp/ipwhitelist.md'
- 'IPAllowList': 'middlewares/tcp/ipallowlist.md'
- 'Plugins & Plugin Catalog': 'plugins/index.md'
- 'Operations':
- 'CLI': 'operations/cli.md'
@ -151,19 +153,9 @@ nav:
- 'Access Logs': 'observability/access-logs.md'
- 'Metrics':
- 'Overview': 'observability/metrics/overview.md'
- 'Datadog': 'observability/metrics/datadog.md'
- 'InfluxDB2': 'observability/metrics/influxdb2.md'
- 'OpenTelemetry': 'observability/metrics/opentelemetry.md'
- 'Prometheus': 'observability/metrics/prometheus.md'
- 'StatsD': 'observability/metrics/statsd.md'
- 'Tracing':
- 'Overview': 'observability/tracing/overview.md'
- 'Jaeger': 'observability/tracing/jaeger.md'
- 'Zipkin': 'observability/tracing/zipkin.md'
- 'Datadog': 'observability/tracing/datadog.md'
- 'Instana': 'observability/tracing/instana.md'
- 'Haystack': 'observability/tracing/haystack.md'
- 'Elastic': 'observability/tracing/elastic.md'
- 'OpenTelemetry': 'observability/tracing/opentelemetry.md'
- 'User Guides':
- 'Kubernetes and Let''s Encrypt': 'user-guides/crd-acme/index.md'

258
go.mod
View file

@ -4,23 +4,20 @@ go 1.21
require (
github.com/BurntSushi/toml v1.3.2
github.com/ExpediaDotCom/haystack-client-go v0.0.0-20190315171017-e7edbdf53a61
github.com/Masterminds/sprig/v3 v3.2.3
github.com/abbot/go-http-auth v0.0.0-00010101000000-000000000000
github.com/andybalholm/brotli v1.0.6
github.com/aws/aws-sdk-go v1.44.327
github.com/cenkalti/backoff/v4 v4.2.1
github.com/compose-spec/compose-go v1.0.3
github.com/containous/alice v0.0.0-20181107144136-d83ebdd94cbd
github.com/coreos/go-systemd v0.0.0-20191104093116-d3cd4ed1dbcf
github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc
github.com/docker/cli v20.10.11+incompatible
github.com/docker/compose/v2 v2.0.1
github.com/docker/docker v20.10.21+incompatible
github.com/docker/cli v24.0.7+incompatible
github.com/docker/docker v24.0.7+incompatible
github.com/docker/go-connections v0.4.0
github.com/fatih/structs v1.1.0
github.com/fsnotify/fsnotify v1.7.0
github.com/go-acme/lego/v4 v4.14.0
github.com/go-check/check v0.0.0-00010101000000-000000000000
github.com/go-kit/kit v0.10.1-0.20200915143503-439c4d2ed3ea
github.com/golang/protobuf v1.5.3
github.com/google/go-github/v28 v28.1.1
@ -35,11 +32,10 @@ require (
github.com/http-wasm/http-wasm-host-go v0.5.2
github.com/influxdata/influxdb-client-go/v2 v2.7.0
github.com/influxdata/influxdb1-client v0.0.0-20191209144304-8bf82d3c094d
github.com/instana/go-sensor v1.38.3
github.com/klauspost/compress v1.17.1
github.com/klauspost/compress v1.17.2
github.com/kvtools/consul v1.0.2
github.com/kvtools/etcdv3 v1.0.2
github.com/kvtools/redis v1.0.2
github.com/kvtools/redis v1.1.0
github.com/kvtools/valkeyrie v1.0.0
github.com/kvtools/zookeeper v1.0.2
github.com/mailgun/ttlmap v0.0.0-20170619185759-c1c17f74874f
@ -48,70 +44,61 @@ require (
github.com/mitchellh/hashstructure v1.0.0
github.com/mitchellh/mapstructure v1.5.0
github.com/natefinch/lumberjack v0.0.0-20201021141957-47ffae23317c
github.com/opentracing/opentracing-go v1.2.0
github.com/openzipkin-contrib/zipkin-go-opentracing v0.4.5
github.com/openzipkin/zipkin-go v0.2.2
github.com/patrickmn/go-cache v2.1.0+incompatible
github.com/pires/go-proxyproto v0.6.1
github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2
github.com/prometheus/client_golang v1.14.0
github.com/prometheus/client_model v0.3.0
github.com/quic-go/quic-go v0.39.1
github.com/rs/zerolog v1.28.0
github.com/prometheus/client_golang v1.17.0
github.com/prometheus/client_model v0.5.0
github.com/quic-go/quic-go v0.40.1
github.com/rs/zerolog v1.29.0
github.com/sirupsen/logrus v1.9.3
github.com/spiffe/go-spiffe/v2 v2.1.1
github.com/stretchr/testify v1.8.4
github.com/stvp/go-udp-testing v0.0.0-20191102171040-06b61409b154
github.com/tailscale/tscert v0.0.0-20220316030059-54bbcb9f74e2
github.com/testcontainers/testcontainers-go v0.27.0
github.com/tetratelabs/wazero v1.5.0
github.com/tidwall/gjson v1.17.0
github.com/traefik/grpc-web v0.16.0
github.com/traefik/paerser v0.2.0
github.com/traefik/yaegi v0.15.1
github.com/uber/jaeger-client-go v2.30.0+incompatible
github.com/uber/jaeger-lib v2.2.0+incompatible
github.com/unrolled/render v1.0.2
github.com/unrolled/secure v1.0.9
github.com/vdemeester/shakers v0.1.0
github.com/vulcand/oxy/v2 v2.0.0-20230427132221-be5cf38f3c1c
github.com/vulcand/predicate v1.2.0
go.elastic.co/apm v1.13.1
go.elastic.co/apm/module/apmot v1.13.1
go.opentelemetry.io/collector/pdata v0.66.0
go.opentelemetry.io/otel v1.19.0
go.opentelemetry.io/otel/bridge/opentracing v1.19.0
go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc v0.42.0
go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp v0.42.0
go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.19.0
go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.19.0
go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.19.0
go.opentelemetry.io/otel/metric v1.19.0
go.opentelemetry.io/otel/sdk v1.19.0
go.opentelemetry.io/otel/sdk/metric v1.19.0
go.opentelemetry.io/otel/trace v1.19.0
golang.org/x/exp v0.0.0-20230817173708-d852ddb80c63
golang.org/x/mod v0.12.0
go.opentelemetry.io/otel v1.21.0
go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc v0.44.0
go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp v0.44.0
go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.21.0
go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.21.0
go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.21.0
go.opentelemetry.io/otel/metric v1.21.0
go.opentelemetry.io/otel/sdk v1.21.0
go.opentelemetry.io/otel/sdk/metric v1.21.0
go.opentelemetry.io/otel/trace v1.21.0
golang.org/x/exp v0.0.0-20231006140011-7918f672742d
golang.org/x/mod v0.13.0
golang.org/x/net v0.17.0
golang.org/x/text v0.13.0
golang.org/x/time v0.3.0
golang.org/x/tools v0.12.1-0.20230815132531-74c255bcf846
google.golang.org/grpc v1.58.3
gopkg.in/DataDog/dd-trace-go.v1 v1.56.1
gopkg.in/fsnotify.v1 v1.4.7
golang.org/x/tools v0.14.0
google.golang.org/grpc v1.59.0
gopkg.in/yaml.v3 v3.0.1
k8s.io/api v0.26.3
k8s.io/apiextensions-apiserver v0.26.3
k8s.io/apimachinery v0.26.3
k8s.io/client-go v0.26.3
k8s.io/utils v0.0.0-20230313181309-38a27ef9d749
k8s.io/api v0.28.3
k8s.io/apiextensions-apiserver v0.28.3
k8s.io/apimachinery v0.28.3
k8s.io/client-go v0.28.3
k8s.io/utils v0.0.0-20230726121419-3b25d923346b
mvdan.cc/xurls/v2 v2.5.0
sigs.k8s.io/gateway-api v0.4.0
sigs.k8s.io/gateway-api v1.0.0
)
require (
cloud.google.com/go/compute v1.23.0 // indirect
cloud.google.com/go/compute/metadata v0.2.3 // indirect
dario.cat/mergo v1.0.0 // indirect
github.com/AdamSLevy/jsonrpc2/v14 v14.1.0 // indirect
github.com/AlecAivazis/survey/v2 v2.2.3 // indirect
github.com/Azure/azure-sdk-for-go v68.0.0+incompatible // indirect
github.com/Azure/azure-sdk-for-go/sdk/azcore v1.6.0 // indirect
github.com/Azure/azure-sdk-for-go/sdk/azidentity v1.3.0 // indirect
@ -129,26 +116,17 @@ require (
github.com/Azure/go-autorest/logger v0.2.1 // indirect
github.com/Azure/go-autorest/tracing v0.6.0 // indirect
github.com/AzureAD/microsoft-authentication-library-for-go v1.0.0 // indirect
github.com/DataDog/appsec-internal-go v1.0.0 // indirect
github.com/DataDog/datadog-agent/pkg/obfuscate v0.48.0 // indirect
github.com/DataDog/datadog-agent/pkg/remoteconfig/state v0.48.0-devel.0.20230725154044-2549ba9058df // indirect
github.com/DataDog/datadog-go/v5 v5.3.0 // indirect
github.com/DataDog/go-libddwaf v1.5.0 // indirect
github.com/DataDog/go-tuf v1.0.2-0.5.2 // indirect
github.com/DataDog/sketches-go v1.4.2 // indirect
github.com/HdrHistogram/hdrhistogram-go v1.1.2 // indirect
github.com/Masterminds/goutils v1.1.1 // indirect
github.com/Masterminds/semver/v3 v3.2.0 // indirect
github.com/Masterminds/semver/v3 v3.2.1 // indirect
github.com/Microsoft/go-winio v0.6.1 // indirect
github.com/Microsoft/hcsshim v0.8.25 // indirect
github.com/Microsoft/hcsshim v0.11.4 // indirect
github.com/OpenDNS/vegadns2client v0.0.0-20180418235048-a3fa4a771d87 // indirect
github.com/VividCortex/gohistogram v1.0.0 // indirect
github.com/agl/ed25519 v0.0.0-20170116200512-5312a6153412 // indirect
github.com/akamai/AkamaiOPEN-edgegrid-golang v1.2.2 // indirect
github.com/aliyun/alibaba-cloud-sdk-go v1.61.1755 // indirect
github.com/andres-erbsen/clock v0.0.0-20160526145045-9e14626cd129 // indirect
github.com/armon/go-metrics v0.4.1 // indirect
github.com/armon/go-radix v1.0.0 // indirect
github.com/aws/aws-sdk-go-v2 v1.20.3 // indirect
github.com/aws/aws-sdk-go-v2/config v1.18.28 // indirect
github.com/aws/aws-sdk-go-v2/credentials v1.13.27 // indirect
@ -165,77 +143,59 @@ require (
github.com/aws/smithy-go v1.14.2 // indirect
github.com/beorn7/perks v1.0.1 // indirect
github.com/boombuler/barcode v1.0.1-0.20190219062509-6c824513bacc // indirect
github.com/buger/goterm v1.0.0 // indirect
github.com/bytedance/sonic v1.10.0 // indirect
github.com/cespare/xxhash/v2 v2.2.0 // indirect
github.com/civo/civogo v0.3.11 // indirect
github.com/cloudflare/cloudflare-go v0.70.0 // indirect
github.com/compose-spec/godotenv v1.0.0 // indirect
github.com/containerd/cgroups v1.0.3 // indirect
github.com/containerd/console v1.0.3 // indirect
github.com/containerd/containerd v1.5.17 // indirect
github.com/containerd/continuity v0.3.0 // indirect
github.com/containerd/typeurl v1.0.2 // indirect
github.com/coreos/go-semver v0.3.0 // indirect
github.com/coreos/go-systemd/v22 v22.3.3-0.20220203105225-a9a7ef127534 // indirect
github.com/containerd/containerd v1.7.11 // indirect
github.com/containerd/log v0.1.0 // indirect
github.com/coreos/go-semver v0.3.1 // indirect
github.com/coreos/go-systemd/v22 v22.5.0 // indirect
github.com/cpu/goacmedns v0.1.1 // indirect
github.com/cpuguy83/dockercfg v0.3.1 // indirect
github.com/deepmap/oapi-codegen v1.9.1 // indirect
github.com/desertbit/timer v0.0.0-20180107155436-c41aec40b27f // indirect
github.com/dgryski/go-rendezvous v0.0.0-20200823014737-9f7001d12a5f // indirect
github.com/dimchansky/utfbom v1.1.1 // indirect
github.com/distribution/distribution/v3 v3.0.0-20210316161203-a01c71e2477e // indirect
github.com/dnsimple/dnsimple-go v1.2.0 // indirect
github.com/docker/buildx v0.5.2-0.20210422185057-908a856079fc // indirect
github.com/docker/distribution v2.8.2+incompatible // indirect
github.com/docker/docker-credential-helpers v0.6.4-0.20210125172408-38bea2ce277a // indirect
github.com/docker/go v1.5.1-1.0.20160303222718-d30aec9fd63c // indirect
github.com/docker/go-metrics v0.0.1 // indirect
github.com/docker/go-units v0.4.0 // indirect
github.com/dustin/go-humanize v1.0.1 // indirect
github.com/ebitengine/purego v0.5.0-alpha.1 // indirect
github.com/elastic/go-licenser v0.3.1 // indirect
github.com/elastic/go-sysinfo v1.1.1 // indirect
github.com/elastic/go-windows v1.0.0 // indirect
github.com/docker/go-units v0.5.0 // indirect
github.com/emicklei/go-restful/v3 v3.11.0 // indirect
github.com/evanphx/json-patch v4.12.0+incompatible // indirect
github.com/evanphx/json-patch v5.7.0+incompatible // indirect
github.com/exoscale/egoscale v0.100.1 // indirect
github.com/fatih/color v1.15.0 // indirect
github.com/fsnotify/fsnotify v1.6.0 // indirect
github.com/fvbommel/sortorder v1.0.1 // indirect
github.com/ghodss/yaml v1.0.0 // indirect
github.com/gin-gonic/gin v1.9.1 // indirect
github.com/go-errors/errors v1.0.1 // indirect
github.com/go-jose/go-jose/v3 v3.0.0 // indirect
github.com/go-logfmt/logfmt v0.5.1 // indirect
github.com/go-logr/logr v1.2.4 // indirect
github.com/go-logr/logr v1.3.0 // indirect
github.com/go-logr/stdr v1.2.2 // indirect
github.com/go-openapi/jsonpointer v0.19.5 // indirect
github.com/go-openapi/jsonreference v0.20.0 // indirect
github.com/go-openapi/swag v0.19.14 // indirect
github.com/go-redis/redis/v8 v8.11.5 // indirect
github.com/go-ole/go-ole v1.2.6 // indirect
github.com/go-openapi/jsonpointer v0.20.0 // indirect
github.com/go-openapi/jsonreference v0.20.2 // indirect
github.com/go-openapi/swag v0.22.4 // indirect
github.com/go-playground/validator/v10 v10.15.1 // indirect
github.com/go-resty/resty/v2 v2.7.0 // indirect
github.com/go-task/slim-sprig v0.0.0-20230315185526-52ccab3ef572 // indirect
github.com/go-zookeeper/zk v1.0.3 // indirect
github.com/gofrs/flock v0.8.0 // indirect
github.com/gogo/googleapis v1.4.0 // indirect
github.com/gofrs/uuid v4.4.0+incompatible // indirect
github.com/gogo/protobuf v1.3.2 // indirect
github.com/golang-jwt/jwt/v4 v4.5.0 // indirect
github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da // indirect
github.com/golang/mock v1.6.0 // indirect
github.com/google/gnostic v0.5.7-v3refs // indirect
github.com/google/go-cmp v0.5.9 // indirect
github.com/google/gnostic-models v0.6.8 // indirect
github.com/google/go-cmp v0.6.0 // indirect
github.com/google/go-querystring v1.1.0 // indirect
github.com/google/gofuzz v1.2.0 // indirect
github.com/google/pprof v0.0.0-20230817174616-7a8ec2ada47b // indirect
github.com/google/s2a-go v0.1.5 // indirect
github.com/google/shlex v0.0.0-20191202100458-e7afc7fbc510 // indirect
github.com/google/uuid v1.3.1 // indirect
github.com/google/uuid v1.4.0 // indirect
github.com/googleapis/enterprise-certificate-proxy v0.2.5 // indirect
github.com/googleapis/gax-go/v2 v2.11.0 // indirect
github.com/gophercloud/gophercloud v1.0.0 // indirect
github.com/gophercloud/utils v0.0.0-20210216074907-f6de111f2eae // indirect
github.com/gravitational/trace v1.1.16-0.20220114165159-14a9a7dd6aaf // indirect
github.com/grpc-ecosystem/go-grpc-middleware v1.3.0 // indirect
github.com/grpc-ecosystem/grpc-gateway/v2 v2.16.0 // indirect
github.com/grpc-ecosystem/grpc-opentracing v0.0.0-20180507213350-8e809c8a8645 // indirect
github.com/hashicorp/cronexpr v1.1.1 // indirect
github.com/hashicorp/errwrap v1.1.0 // indirect
github.com/hashicorp/go-cleanhttp v0.5.2 // indirect
@ -245,19 +205,15 @@ require (
github.com/hashicorp/serf v0.10.1 // indirect
github.com/huandu/xstrings v1.4.0 // indirect
github.com/iij/doapi v0.0.0-20190504054126-0bbf12d6d7df // indirect
github.com/imdario/mergo v0.3.12 // indirect
github.com/inconshreveable/mousetrap v1.0.1 // indirect
github.com/imdario/mergo v0.3.16 // indirect
github.com/influxdata/line-protocol v0.0.0-20200327222509-2487e7298839 // indirect
github.com/infobloxopen/infoblox-go-client v1.1.1 // indirect
github.com/jaguilar/vt100 v0.0.0-20150826170717-2703a27b14ea // indirect
github.com/jcchavezs/porto v0.1.0 // indirect
github.com/jmespath/go-jmespath v0.4.0 // indirect
github.com/joeshaw/multierror v0.0.0-20140124173710-69b34d4ec901 // indirect
github.com/jonboulle/clockwork v0.2.2 // indirect
github.com/jonboulle/clockwork v0.4.0 // indirect
github.com/josharian/intern v1.0.0 // indirect
github.com/json-iterator/go v1.1.12 // indirect
github.com/k0kubun/go-ansi v0.0.0-20180517002512-3bf9e2903213 // indirect
github.com/kballard/go-shellquote v0.0.0-20180428030007-95032a82bc51 // indirect
github.com/klauspost/cpuid/v2 v2.2.5 // indirect
github.com/kolo/xmlrpc v0.0.0-20220921171641-a4b6fa1dd06b // indirect
github.com/kylelemons/godebug v1.1.0 // indirect
github.com/labbsr0x/bindman-dns-webhook v1.0.2 // indirect
@ -266,27 +222,22 @@ require (
github.com/liquidweb/go-lwApi v0.0.5 // indirect
github.com/liquidweb/liquidweb-cli v0.6.9 // indirect
github.com/liquidweb/liquidweb-go v1.6.3 // indirect
github.com/looplab/fsm v0.1.0 // indirect
github.com/magiconair/properties v1.8.6 // indirect
github.com/lufia/plan9stats v0.0.0-20211012122336-39d0f177ccd0 // indirect
github.com/magiconair/properties v1.8.7 // indirect
github.com/mailgun/minheap v0.0.0-20170619185613-3dbe6c6bf55f // indirect
github.com/mailgun/multibuf v0.1.2 // indirect
github.com/mailgun/timetools v0.0.0-20141028012446-7e6055773c51 // indirect
github.com/mailru/easyjson v0.7.7 // indirect
github.com/mattn/go-colorable v0.1.13 // indirect
github.com/mattn/go-isatty v0.0.20 // indirect
github.com/mattn/go-shellwords v1.0.12 // indirect
github.com/matttproud/golang_protobuf_extensions v1.0.4 // indirect
github.com/mgutz/ansi v0.0.0-20170206155736-9520e82c474b // indirect
github.com/miekg/pkcs11 v1.0.3 // indirect
github.com/matttproud/golang_protobuf_extensions/v2 v2.0.0 // indirect
github.com/mimuret/golang-iij-dpf v0.9.1 // indirect
github.com/mitchellh/go-homedir v1.1.0 // indirect
github.com/mitchellh/go-ps v1.0.0 // indirect
github.com/mitchellh/reflectwalk v1.0.2 // indirect
github.com/moby/buildkit v0.8.2-0.20210401015549-df49b648c8bf // indirect
github.com/moby/locker v1.0.1 // indirect
github.com/moby/sys/mount v0.2.0 // indirect
github.com/moby/sys/mountinfo v0.5.0 // indirect
github.com/moby/term v0.0.0-20210619224110-3f7ff695adc6 // indirect
github.com/moby/patternmatcher v0.6.0 // indirect
github.com/moby/sys/sequential v0.5.0 // indirect
github.com/moby/term v0.5.0 // indirect
github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd // indirect
github.com/modern-go/reflect2 v1.0.2 // indirect
github.com/morikuni/aec v1.0.0 // indirect
@ -301,103 +252,93 @@ require (
github.com/nrdcg/nodion v0.1.0 // indirect
github.com/nrdcg/porkbun v0.2.0 // indirect
github.com/nzdjb/go-metaname v1.0.0 // indirect
github.com/onsi/ginkgo/v2 v2.9.5 // indirect
github.com/onsi/ginkgo v1.16.5 // indirect
github.com/onsi/ginkgo/v2 v2.11.0 // indirect
github.com/onsi/gomega v1.27.10 // indirect
github.com/opencontainers/go-digest v1.0.0 // indirect
github.com/opencontainers/image-spec v1.0.2 // indirect
github.com/opencontainers/image-spec v1.1.0-rc5 // indirect
github.com/opencontainers/runc v1.1.5 // indirect
github.com/opentracing-contrib/go-observer v0.0.0-20170622124052-a52f23424492 // indirect
github.com/oracle/oci-go-sdk v24.3.0+incompatible // indirect
github.com/outcaste-io/ristretto v0.2.3 // indirect
github.com/ovh/go-ovh v1.4.1 // indirect
github.com/philhofer/fwd v1.1.2 // indirect
github.com/pelletier/go-toml/v2 v2.0.9 // indirect
github.com/pkg/browser v0.0.0-20210911075715-681adbf594b8 // indirect
github.com/pkg/errors v0.9.1 // indirect
github.com/power-devops/perfstat v0.0.0-20210106213030-5aafc221ea8c // indirect
github.com/pquerna/otp v1.4.0 // indirect
github.com/prometheus/common v0.37.0 // indirect
github.com/prometheus/procfs v0.8.0 // indirect
github.com/prometheus/common v0.45.0 // indirect
github.com/prometheus/procfs v0.12.0 // indirect
github.com/quic-go/qpack v0.4.0 // indirect
github.com/quic-go/qtls-go1-20 v0.3.4 // indirect
github.com/quic-go/qtls-go1-20 v0.4.1 // indirect
github.com/redis/go-redis/v9 v9.2.1 // indirect
github.com/rs/cors v1.7.0 // indirect
github.com/sacloud/api-client-go v0.2.8 // indirect
github.com/sacloud/go-http v0.1.6 // indirect
github.com/sacloud/iaas-api-go v1.11.1 // indirect
github.com/sacloud/packages-go v0.0.9 // indirect
github.com/sanathkr/go-yaml v0.0.0-20170819195128-ed9d249f429b // indirect
github.com/santhosh-tekuri/jsonschema v1.2.4 // indirect
github.com/scaleway/scaleway-sdk-go v1.0.0-beta.17 // indirect
github.com/secure-systems-lab/go-securesystemslib v0.7.0 // indirect
github.com/shirou/gopsutil/v3 v3.23.11 // indirect
github.com/shoenig/go-m1cpu v0.1.6 // indirect
github.com/shopspring/decimal v1.2.0 // indirect
github.com/simplesurance/bunny-go v0.0.0-20221115111006-e11d9dc91f04 // indirect
github.com/smartystreets/go-aws-auth v0.0.0-20180515143844-0c1422d1fdb9 // indirect
github.com/softlayer/softlayer-go v1.1.2 // indirect
github.com/softlayer/xmlrpc v0.0.0-20200409220501-5f089df7cb7e // indirect
github.com/spf13/cast v1.3.1 // indirect
github.com/spf13/cobra v1.6.0 // indirect
github.com/spf13/pflag v1.0.5 // indirect
github.com/stretchr/objx v0.5.1 // indirect
github.com/tencentcloud/tencentcloud-sdk-go/tencentcloud/common v1.0.490 // indirect
github.com/tencentcloud/tencentcloud-sdk-go/tencentcloud/dnspod v1.0.490 // indirect
github.com/theupdateframework/notary v0.6.1 // indirect
github.com/tinylib/msgp v1.1.8 // indirect
github.com/tonistiigi/fsutil v0.0.0-20201103201449-0834f99b7b85 // indirect
github.com/tonistiigi/units v0.0.0-20180711220420-6950e57a87ea // indirect
github.com/tidwall/match v1.1.1 // indirect
github.com/tidwall/pretty v1.2.1 // indirect
github.com/tklauser/go-sysconf v0.3.12 // indirect
github.com/tklauser/numcpus v0.6.1 // indirect
github.com/transip/gotransip/v6 v6.20.0 // indirect
github.com/ultradns/ultradns-go-sdk v1.5.0-20230427130837-23c9b0c // indirect
github.com/vinyldns/go-vinyldns v0.9.16 // indirect
github.com/vultr/govultr/v2 v2.17.2 // indirect
github.com/xeipuuv/gojsonpointer v0.0.0-20190905194746-02993c407bfb // indirect
github.com/xeipuuv/gojsonreference v0.0.0-20180127040603-bd5ef7bd5415 // indirect
github.com/xeipuuv/gojsonschema v1.2.0 // indirect
github.com/yandex-cloud/go-genproto v0.0.0-20220805142335-27b56ddae16f // indirect
github.com/yandex-cloud/go-sdk v0.0.0-20220805164847-cf028e604997 // indirect
github.com/yusufpapurcu/wmi v1.2.3 // indirect
github.com/zeebo/errs v1.2.2 // indirect
go.elastic.co/apm/module/apmhttp v1.13.1 // indirect
go.elastic.co/fastjson v1.1.0 // indirect
go.etcd.io/etcd/api/v3 v3.5.5 // indirect
go.etcd.io/etcd/client/pkg/v3 v3.5.5 // indirect
go.etcd.io/etcd/client/v3 v3.5.5 // indirect
go.etcd.io/etcd/api/v3 v3.5.9 // indirect
go.etcd.io/etcd/client/pkg/v3 v3.5.9 // indirect
go.etcd.io/etcd/client/v3 v3.5.9 // indirect
go.opencensus.io v0.24.0 // indirect
go.opentelemetry.io/otel/exporters/otlp/otlpmetric v0.42.0 // indirect
go.opentelemetry.io/proto/otlp v1.0.0 // indirect
go.uber.org/atomic v1.11.0 // indirect
go.uber.org/mock v0.3.0 // indirect
go.uber.org/multierr v1.8.0 // indirect
go.uber.org/multierr v1.11.0 // indirect
go.uber.org/ratelimit v0.2.0 // indirect
go.uber.org/zap v1.21.0 // indirect
go4.org/intern v0.0.0-20230525184215-6c62f75575cb // indirect
go4.org/unsafe/assume-no-moving-gc v0.0.0-20230525183740-e7c30c78aeb2 // indirect
go.uber.org/zap v1.26.0 // indirect
golang.org/x/arch v0.4.0 // indirect
golang.org/x/crypto v0.14.0 // indirect
golang.org/x/lint v0.0.0-20210508222113-6edffad5e616 // indirect
golang.org/x/oauth2 v0.10.0 // indirect
golang.org/x/sync v0.3.0 // indirect
golang.org/x/sys v0.13.0 // indirect
golang.org/x/oauth2 v0.13.0 // indirect
golang.org/x/sys v0.15.0 // indirect
golang.org/x/term v0.13.0 // indirect
golang.org/x/xerrors v0.0.0-20220907171357-04be3eba64a2 // indirect
google.golang.org/api v0.128.0 // indirect
google.golang.org/appengine v1.6.7 // indirect
google.golang.org/genproto v0.0.0-20230711160842-782d3b101e98 // indirect
google.golang.org/genproto/googleapis/api v0.0.0-20230711160842-782d3b101e98 // indirect
google.golang.org/genproto/googleapis/rpc v0.0.0-20230711160842-782d3b101e98 // indirect
google.golang.org/appengine v1.6.8 // indirect
google.golang.org/genproto v0.0.0-20230822172742-b8732ec3820d // indirect
google.golang.org/genproto/googleapis/api v0.0.0-20230822172742-b8732ec3820d // indirect
google.golang.org/genproto/googleapis/rpc v0.0.0-20230822172742-b8732ec3820d // indirect
google.golang.org/protobuf v1.31.0 // indirect
gopkg.in/h2non/gock.v1 v1.0.16 // indirect
gopkg.in/inf.v0 v0.9.1 // indirect
gopkg.in/ini.v1 v1.67.0 // indirect
gopkg.in/ns1/ns1-go.v2 v2.7.6 // indirect
gopkg.in/square/go-jose.v2 v2.5.1 // indirect
gopkg.in/yaml.v2 v2.4.0 // indirect
howett.net/plist v0.0.0-20181124034731-591f970eefbb // indirect
inet.af/netaddr v0.0.0-20230525184311-b8eac61e914a // indirect
k8s.io/klog/v2 v2.80.1 // indirect
k8s.io/kube-openapi v0.0.0-20221012153701-172d655c2280 // indirect
k8s.io/klog/v2 v2.100.1 // indirect
k8s.io/kube-openapi v0.0.0-20231010175941-2dd684a91f00 // indirect
nhooyr.io/websocket v1.8.7 // indirect
sigs.k8s.io/json v0.0.0-20220713155537-f223a00ba0e2 // indirect
sigs.k8s.io/structured-merge-diff/v4 v4.2.3 // indirect
sigs.k8s.io/yaml v1.3.0 // indirect
sigs.k8s.io/json v0.0.0-20221116044647-bc3834ca7abd // indirect
sigs.k8s.io/structured-merge-diff/v4 v4.3.0 // indirect
sigs.k8s.io/yaml v1.4.0 // indirect
)
// Containous forks
replace (
github.com/abbot/go-http-auth => github.com/containous/go-http-auth v0.4.1-0.20200324110947-a37a7636d23e
github.com/go-check/check => github.com/containous/check v0.0.0-20170915194414-ca0bf163426a
github.com/gorilla/mux => github.com/containous/mux v0.0.0-20220627093034-b2dd784e613f
github.com/mailgun/minheap => github.com/containous/minheap v0.0.0-20190809180810-6e71eb837595
)
@ -407,3 +348,6 @@ replace github.com/jaguilar/vt100 => github.com/tonistiigi/vt100 v0.0.0-20190402
// ambiguous import: found package github.com/tencentcloud/tencentcloud-sdk-go/tencentcloud/common/http in multiple modules
// tencentcloud uses monorepo with multimodule but the go.mod files are incomplete.
exclude github.com/tencentcloud/tencentcloud-sdk-go v3.0.83+incompatible
// https://github.com/docker/compose/blob/v2.19.0/go.mod#L12
replace github.com/cucumber/godog => github.com/cucumber/godog v0.13.0

1590
go.sum

File diff suppressed because it is too large Load diff

View file

@ -11,13 +11,15 @@ import (
"os"
"strconv"
"strings"
"testing"
"time"
"github.com/go-check/check"
"github.com/rs/zerolog/log"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
"github.com/stretchr/testify/suite"
"github.com/traefik/traefik/v3/integration/try"
"github.com/traefik/traefik/v3/pkg/middlewares/accesslog"
checker "github.com/vdemeester/shakers"
)
const (
@ -28,6 +30,10 @@ const (
// AccessLogSuite tests suite.
type AccessLogSuite struct{ BaseSuite }
func TestAccessLogSuite(t *testing.T) {
suite.Run(t, new(AccessLogSuite))
}
type accessLogValue struct {
formatOnly bool
code string
@ -36,67 +42,67 @@ type accessLogValue struct {
serviceURL string
}
func (s *AccessLogSuite) SetUpSuite(c *check.C) {
s.createComposeProject(c, "access_log")
s.composeUp(c)
func (s *AccessLogSuite) SetupSuite() {
s.BaseSuite.SetupSuite()
s.createComposeProject("access_log")
s.composeUp()
}
func (s *AccessLogSuite) TearDownTest(c *check.C) {
displayTraefikLogFile(c, traefikTestLogFile)
func (s *AccessLogSuite) TearDownSuite() {
s.BaseSuite.TearDownSuite()
}
func (s *AccessLogSuite) TearDownTest() {
s.displayTraefikLogFile(traefikTestLogFile)
_ = os.Remove(traefikTestAccessLogFile)
}
func (s *AccessLogSuite) TestAccessLog(c *check.C) {
func (s *AccessLogSuite) TestAccessLog() {
ensureWorkingDirectoryIsClean()
// Start Traefik
cmd, display := s.traefikCmd(withConfigFile("fixtures/access_log_config.toml"))
defer display(c)
s.traefikCmd(withConfigFile("fixtures/access_log_config.toml"))
defer func() {
traefikLog, err := os.ReadFile(traefikTestLogFile)
c.Assert(err, checker.IsNil)
require.NoError(s.T(), err)
log.Info().Msg(string(traefikLog))
}()
err := cmd.Start()
c.Assert(err, checker.IsNil)
defer s.killCmd(cmd)
s.waitForTraefik("server1")
waitForTraefik(c, "server1")
checkStatsForLogFile(c)
s.checkStatsForLogFile()
// Verify Traefik started OK
checkTraefikStarted(c)
s.checkTraefikStarted()
// Make some requests
req, err := http.NewRequest(http.MethodGet, "http://127.0.0.1:8000/", nil)
c.Assert(err, checker.IsNil)
require.NoError(s.T(), err)
req.Host = "frontend1.docker.local"
err = try.Request(req, 500*time.Millisecond, try.StatusCodeIs(http.StatusOK), try.HasBody())
c.Assert(err, checker.IsNil)
require.NoError(s.T(), err)
req, err = http.NewRequest(http.MethodGet, "http://127.0.0.1:8000/", nil)
c.Assert(err, checker.IsNil)
require.NoError(s.T(), err)
req.Host = "frontend2.docker.local"
err = try.Request(req, 500*time.Millisecond, try.StatusCodeIs(http.StatusOK), try.HasBody())
c.Assert(err, checker.IsNil)
require.NoError(s.T(), err)
err = try.Request(req, 500*time.Millisecond, try.StatusCodeIs(http.StatusOK), try.HasBody())
c.Assert(err, checker.IsNil)
require.NoError(s.T(), err)
// Verify access.log output as expected
count := checkAccessLogOutput(c)
count := s.checkAccessLogOutput()
c.Assert(count, checker.GreaterOrEqualThan, 3)
assert.Equal(s.T(), 3, count)
// Verify no other Traefik problems
checkNoOtherTraefikProblems(c)
s.checkNoOtherTraefikProblems()
}
func (s *AccessLogSuite) TestAccessLogAuthFrontend(c *check.C) {
func (s *AccessLogSuite) TestAccessLogAuthFrontend() {
ensureWorkingDirectoryIsClean()
expected := []accessLogValue{
@ -124,48 +130,43 @@ func (s *AccessLogSuite) TestAccessLogAuthFrontend(c *check.C) {
}
// Start Traefik
cmd, display := s.traefikCmd(withConfigFile("fixtures/access_log_config.toml"))
defer display(c)
s.traefikCmd(withConfigFile("fixtures/access_log_config.toml"))
err := cmd.Start()
c.Assert(err, checker.IsNil)
defer s.killCmd(cmd)
s.checkStatsForLogFile()
checkStatsForLogFile(c)
waitForTraefik(c, "authFrontend")
s.waitForTraefik("authFrontend")
// Verify Traefik started OK
checkTraefikStarted(c)
s.checkTraefikStarted()
// Test auth entrypoint
req, err := http.NewRequest(http.MethodGet, "http://127.0.0.1:8006/", nil)
c.Assert(err, checker.IsNil)
require.NoError(s.T(), err)
req.Host = "frontend.auth.docker.local"
err = try.Request(req, 500*time.Millisecond, try.StatusCodeIs(http.StatusUnauthorized), try.HasBody())
c.Assert(err, checker.IsNil)
require.NoError(s.T(), err)
req.SetBasicAuth("test", "")
err = try.Request(req, 500*time.Millisecond, try.StatusCodeIs(http.StatusUnauthorized), try.HasBody())
c.Assert(err, checker.IsNil)
require.NoError(s.T(), err)
req.SetBasicAuth("test", "test")
err = try.Request(req, 500*time.Millisecond, try.StatusCodeIs(http.StatusOK), try.HasBody())
c.Assert(err, checker.IsNil)
require.NoError(s.T(), err)
// Verify access.log output as expected
count := checkAccessLogExactValuesOutput(c, expected)
count := s.checkAccessLogExactValuesOutput(expected)
c.Assert(count, checker.GreaterOrEqualThan, len(expected))
assert.GreaterOrEqual(s.T(), count, len(expected))
// Verify no other Traefik problems
checkNoOtherTraefikProblems(c)
s.checkNoOtherTraefikProblems()
}
func (s *AccessLogSuite) TestAccessLogDigestAuthMiddleware(c *check.C) {
func (s *AccessLogSuite) TestAccessLogDigestAuthMiddleware() {
ensureWorkingDirectoryIsClean()
expected := []accessLogValue{
@ -193,27 +194,22 @@ func (s *AccessLogSuite) TestAccessLogDigestAuthMiddleware(c *check.C) {
}
// Start Traefik
cmd, display := s.traefikCmd(withConfigFile("fixtures/access_log_config.toml"))
defer display(c)
s.traefikCmd(withConfigFile("fixtures/access_log_config.toml"))
err := cmd.Start()
c.Assert(err, checker.IsNil)
defer s.killCmd(cmd)
s.checkStatsForLogFile()
checkStatsForLogFile(c)
waitForTraefik(c, "digestAuthMiddleware")
s.waitForTraefik("digestAuthMiddleware")
// Verify Traefik started OK
checkTraefikStarted(c)
s.checkTraefikStarted()
// Test auth entrypoint
req, err := http.NewRequest(http.MethodGet, "http://127.0.0.1:8008/", nil)
c.Assert(err, checker.IsNil)
require.NoError(s.T(), err)
req.Host = "entrypoint.digest.auth.docker.local"
resp, err := try.ResponseUntilStatusCode(req, 500*time.Millisecond, http.StatusUnauthorized)
c.Assert(err, checker.IsNil)
require.NoError(s.T(), err)
digest := digestParts(resp)
digest["uri"] = "/"
@ -225,22 +221,22 @@ func (s *AccessLogSuite) TestAccessLogDigestAuthMiddleware(c *check.C) {
req.Header.Set("Content-Type", "application/json")
err = try.Request(req, 500*time.Millisecond, try.StatusCodeIs(http.StatusUnauthorized), try.HasBody())
c.Assert(err, checker.IsNil)
require.NoError(s.T(), err)
digest["password"] = "test"
req.Header.Set("Authorization", getDigestAuthorization(digest))
err = try.Request(req, 500*time.Millisecond, try.StatusCodeIs(http.StatusOK), try.HasBody())
c.Assert(err, checker.IsNil)
require.NoError(s.T(), err)
// Verify access.log output as expected
count := checkAccessLogExactValuesOutput(c, expected)
count := s.checkAccessLogExactValuesOutput(expected)
c.Assert(count, checker.GreaterOrEqualThan, len(expected))
assert.GreaterOrEqual(s.T(), count, len(expected))
// Verify no other Traefik problems
checkNoOtherTraefikProblems(c)
s.checkNoOtherTraefikProblems()
}
// Thanks to mvndaai for digest authentication
@ -291,7 +287,7 @@ func getDigestAuthorization(digestParts map[string]string) string {
return authorization
}
func (s *AccessLogSuite) TestAccessLogFrontendRedirect(c *check.C) {
func (s *AccessLogSuite) TestAccessLogFrontendRedirect() {
ensureWorkingDirectoryIsClean()
expected := []accessLogValue{
@ -308,38 +304,33 @@ func (s *AccessLogSuite) TestAccessLogFrontendRedirect(c *check.C) {
}
// Start Traefik
cmd, display := s.traefikCmd(withConfigFile("fixtures/access_log_config.toml"))
defer display(c)
s.traefikCmd(withConfigFile("fixtures/access_log_config.toml"))
err := cmd.Start()
c.Assert(err, checker.IsNil)
defer s.killCmd(cmd)
s.checkStatsForLogFile()
checkStatsForLogFile(c)
waitForTraefik(c, "frontendRedirect")
s.waitForTraefik("frontendRedirect")
// Verify Traefik started OK
checkTraefikStarted(c)
s.checkTraefikStarted()
// Test frontend redirect
req, err := http.NewRequest(http.MethodGet, "http://127.0.0.1:8005/test", nil)
c.Assert(err, checker.IsNil)
require.NoError(s.T(), err)
req.Host = ""
err = try.Request(req, 500*time.Millisecond, try.StatusCodeIs(http.StatusOK), try.HasBody())
c.Assert(err, checker.IsNil)
require.NoError(s.T(), err)
// Verify access.log output as expected
count := checkAccessLogExactValuesOutput(c, expected)
count := s.checkAccessLogExactValuesOutput(expected)
c.Assert(count, checker.GreaterOrEqualThan, len(expected))
assert.GreaterOrEqual(s.T(), count, len(expected))
// Verify no other Traefik problems
checkNoOtherTraefikProblems(c)
s.checkNoOtherTraefikProblems()
}
func (s *AccessLogSuite) TestAccessLogJSONFrontendRedirect(c *check.C) {
func (s *AccessLogSuite) TestAccessLogJSONFrontendRedirect() {
ensureWorkingDirectoryIsClean()
type logLine struct {
@ -365,30 +356,25 @@ func (s *AccessLogSuite) TestAccessLogJSONFrontendRedirect(c *check.C) {
}
// Start Traefik
cmd, display := s.traefikCmd(withConfigFile("fixtures/access_log_json_config.toml"))
defer display(c)
s.traefikCmd(withConfigFile("fixtures/access_log_json_config.toml"))
err := cmd.Start()
c.Assert(err, checker.IsNil)
defer s.killCmd(cmd)
s.checkStatsForLogFile()
checkStatsForLogFile(c)
waitForTraefik(c, "frontendRedirect")
s.waitForTraefik("frontendRedirect")
// Verify Traefik started OK
checkTraefikStarted(c)
s.checkTraefikStarted()
// Test frontend redirect
req, err := http.NewRequest(http.MethodGet, "http://127.0.0.1:8005/test", nil)
c.Assert(err, checker.IsNil)
require.NoError(s.T(), err)
req.Host = ""
err = try.Request(req, 500*time.Millisecond, try.StatusCodeIs(http.StatusOK), try.HasBody())
c.Assert(err, checker.IsNil)
require.NoError(s.T(), err)
lines := extractLines(c)
c.Assert(len(lines), checker.GreaterOrEqualThan, len(expected))
lines := s.extractLines()
assert.GreaterOrEqual(s.T(), len(lines), len(expected))
for i, line := range lines {
if line == "" {
@ -396,15 +382,15 @@ func (s *AccessLogSuite) TestAccessLogJSONFrontendRedirect(c *check.C) {
}
var logline logLine
err := json.Unmarshal([]byte(line), &logline)
c.Assert(err, checker.IsNil)
c.Assert(logline.DownstreamStatus, checker.Equals, expected[i].DownstreamStatus)
c.Assert(logline.OriginStatus, checker.Equals, expected[i].OriginStatus)
c.Assert(logline.RouterName, checker.Equals, expected[i].RouterName)
c.Assert(logline.ServiceName, checker.Equals, expected[i].ServiceName)
require.NoError(s.T(), err)
assert.Equal(s.T(), expected[i].DownstreamStatus, logline.DownstreamStatus)
assert.Equal(s.T(), expected[i].OriginStatus, logline.OriginStatus)
assert.Equal(s.T(), expected[i].RouterName, logline.RouterName)
assert.Equal(s.T(), expected[i].ServiceName, logline.ServiceName)
}
}
func (s *AccessLogSuite) TestAccessLogRateLimit(c *check.C) {
func (s *AccessLogSuite) TestAccessLogRateLimit() {
ensureWorkingDirectoryIsClean()
expected := []accessLogValue{
@ -424,42 +410,37 @@ func (s *AccessLogSuite) TestAccessLogRateLimit(c *check.C) {
}
// Start Traefik
cmd, display := s.traefikCmd(withConfigFile("fixtures/access_log_config.toml"))
defer display(c)
s.traefikCmd(withConfigFile("fixtures/access_log_config.toml"))
err := cmd.Start()
c.Assert(err, checker.IsNil)
defer s.killCmd(cmd)
s.checkStatsForLogFile()
checkStatsForLogFile(c)
waitForTraefik(c, "rateLimit")
s.waitForTraefik("rateLimit")
// Verify Traefik started OK
checkTraefikStarted(c)
s.checkTraefikStarted()
// Test rate limit
req, err := http.NewRequest(http.MethodGet, "http://127.0.0.1:8007/", nil)
c.Assert(err, checker.IsNil)
require.NoError(s.T(), err)
req.Host = "ratelimit.docker.local"
err = try.Request(req, 500*time.Millisecond, try.StatusCodeIs(http.StatusOK), try.HasBody())
c.Assert(err, checker.IsNil)
require.NoError(s.T(), err)
err = try.Request(req, 500*time.Millisecond, try.StatusCodeIs(http.StatusOK), try.HasBody())
c.Assert(err, checker.IsNil)
require.NoError(s.T(), err)
err = try.Request(req, 500*time.Millisecond, try.StatusCodeIs(http.StatusTooManyRequests), try.HasBody())
c.Assert(err, checker.IsNil)
require.NoError(s.T(), err)
// Verify access.log output as expected
count := checkAccessLogExactValuesOutput(c, expected)
count := s.checkAccessLogExactValuesOutput(expected)
c.Assert(count, checker.GreaterOrEqualThan, len(expected))
assert.GreaterOrEqual(s.T(), count, len(expected))
// Verify no other Traefik problems
checkNoOtherTraefikProblems(c)
s.checkNoOtherTraefikProblems()
}
func (s *AccessLogSuite) TestAccessLogBackendNotFound(c *check.C) {
func (s *AccessLogSuite) TestAccessLogBackendNotFound() {
ensureWorkingDirectoryIsClean()
expected := []accessLogValue{
@ -473,38 +454,33 @@ func (s *AccessLogSuite) TestAccessLogBackendNotFound(c *check.C) {
}
// Start Traefik
cmd, display := s.traefikCmd(withConfigFile("fixtures/access_log_config.toml"))
defer display(c)
s.traefikCmd(withConfigFile("fixtures/access_log_config.toml"))
err := cmd.Start()
c.Assert(err, checker.IsNil)
defer s.killCmd(cmd)
s.waitForTraefik("server1")
waitForTraefik(c, "server1")
checkStatsForLogFile(c)
s.checkStatsForLogFile()
// Verify Traefik started OK
checkTraefikStarted(c)
s.checkTraefikStarted()
// Test rate limit
req, err := http.NewRequest(http.MethodGet, "http://127.0.0.1:8000/", nil)
c.Assert(err, checker.IsNil)
require.NoError(s.T(), err)
req.Host = "backendnotfound.docker.local"
err = try.Request(req, 500*time.Millisecond, try.StatusCodeIs(http.StatusNotFound), try.HasBody())
c.Assert(err, checker.IsNil)
require.NoError(s.T(), err)
// Verify access.log output as expected
count := checkAccessLogExactValuesOutput(c, expected)
count := s.checkAccessLogExactValuesOutput(expected)
c.Assert(count, checker.GreaterOrEqualThan, len(expected))
assert.GreaterOrEqual(s.T(), count, len(expected))
// Verify no other Traefik problems
checkNoOtherTraefikProblems(c)
s.checkNoOtherTraefikProblems()
}
func (s *AccessLogSuite) TestAccessLogFrontendAllowlist(c *check.C) {
func (s *AccessLogSuite) TestAccessLogFrontendAllowlist() {
ensureWorkingDirectoryIsClean()
expected := []accessLogValue{
@ -518,38 +494,33 @@ func (s *AccessLogSuite) TestAccessLogFrontendAllowlist(c *check.C) {
}
// Start Traefik
cmd, display := s.traefikCmd(withConfigFile("fixtures/access_log_config.toml"))
defer display(c)
s.traefikCmd(withConfigFile("fixtures/access_log_config.toml"))
err := cmd.Start()
c.Assert(err, checker.IsNil)
defer s.killCmd(cmd)
s.checkStatsForLogFile()
checkStatsForLogFile(c)
waitForTraefik(c, "frontendAllowlist")
s.waitForTraefik("frontendAllowlist")
// Verify Traefik started OK
checkTraefikStarted(c)
s.checkTraefikStarted()
// Test rate limit
req, err := http.NewRequest(http.MethodGet, "http://127.0.0.1:8000/", nil)
c.Assert(err, checker.IsNil)
require.NoError(s.T(), err)
req.Host = "frontend.allowlist.docker.local"
err = try.Request(req, 500*time.Millisecond, try.StatusCodeIs(http.StatusForbidden), try.HasBody())
c.Assert(err, checker.IsNil)
require.NoError(s.T(), err)
// Verify access.log output as expected
count := checkAccessLogExactValuesOutput(c, expected)
count := s.checkAccessLogExactValuesOutput(expected)
c.Assert(count, checker.GreaterOrEqualThan, len(expected))
assert.GreaterOrEqual(s.T(), count, len(expected))
// Verify no other Traefik problems
checkNoOtherTraefikProblems(c)
s.checkNoOtherTraefikProblems()
}
func (s *AccessLogSuite) TestAccessLogAuthFrontendSuccess(c *check.C) {
func (s *AccessLogSuite) TestAccessLogAuthFrontendSuccess() {
ensureWorkingDirectoryIsClean()
expected := []accessLogValue{
@ -563,39 +534,34 @@ func (s *AccessLogSuite) TestAccessLogAuthFrontendSuccess(c *check.C) {
}
// Start Traefik
cmd, display := s.traefikCmd(withConfigFile("fixtures/access_log_config.toml"))
defer display(c)
s.traefikCmd(withConfigFile("fixtures/access_log_config.toml"))
err := cmd.Start()
c.Assert(err, checker.IsNil)
defer s.killCmd(cmd)
s.checkStatsForLogFile()
checkStatsForLogFile(c)
waitForTraefik(c, "authFrontend")
s.waitForTraefik("authFrontend")
// Verify Traefik started OK
checkTraefikStarted(c)
s.checkTraefikStarted()
// Test auth entrypoint
req, err := http.NewRequest(http.MethodGet, "http://127.0.0.1:8006/", nil)
c.Assert(err, checker.IsNil)
require.NoError(s.T(), err)
req.Host = "frontend.auth.docker.local"
req.SetBasicAuth("test", "test")
err = try.Request(req, 500*time.Millisecond, try.StatusCodeIs(http.StatusOK), try.HasBody())
c.Assert(err, checker.IsNil)
require.NoError(s.T(), err)
// Verify access.log output as expected
count := checkAccessLogExactValuesOutput(c, expected)
count := s.checkAccessLogExactValuesOutput(expected)
c.Assert(count, checker.GreaterOrEqualThan, len(expected))
assert.GreaterOrEqual(s.T(), count, len(expected))
// Verify no other Traefik problems
checkNoOtherTraefikProblems(c)
s.checkNoOtherTraefikProblems()
}
func (s *AccessLogSuite) TestAccessLogPreflightHeadersMiddleware(c *check.C) {
func (s *AccessLogSuite) TestAccessLogPreflightHeadersMiddleware() {
ensureWorkingDirectoryIsClean()
expected := []accessLogValue{
@ -609,79 +575,74 @@ func (s *AccessLogSuite) TestAccessLogPreflightHeadersMiddleware(c *check.C) {
}
// Start Traefik
cmd, display := s.traefikCmd(withConfigFile("fixtures/access_log_config.toml"))
defer display(c)
s.traefikCmd(withConfigFile("fixtures/access_log_config.toml"))
err := cmd.Start()
c.Assert(err, checker.IsNil)
defer s.killCmd(cmd)
s.checkStatsForLogFile()
checkStatsForLogFile(c)
waitForTraefik(c, "preflightCORS")
s.waitForTraefik("preflightCORS")
// Verify Traefik started OK
checkTraefikStarted(c)
s.checkTraefikStarted()
// Test preflight response
req, err := http.NewRequest(http.MethodOptions, "http://127.0.0.1:8009/", nil)
c.Assert(err, checker.IsNil)
require.NoError(s.T(), err)
req.Host = "preflight.docker.local"
req.Header.Set("Origin", "whatever")
req.Header.Set("Access-Control-Request-Method", "GET")
err = try.Request(req, 500*time.Millisecond, try.StatusCodeIs(http.StatusOK))
c.Assert(err, checker.IsNil)
require.NoError(s.T(), err)
// Verify access.log output as expected
count := checkAccessLogExactValuesOutput(c, expected)
count := s.checkAccessLogExactValuesOutput(expected)
c.Assert(count, checker.GreaterOrEqualThan, len(expected))
assert.GreaterOrEqual(s.T(), count, len(expected))
// Verify no other Traefik problems
checkNoOtherTraefikProblems(c)
s.checkNoOtherTraefikProblems()
}
func checkNoOtherTraefikProblems(c *check.C) {
func (s *AccessLogSuite) checkNoOtherTraefikProblems() {
traefikLog, err := os.ReadFile(traefikTestLogFile)
c.Assert(err, checker.IsNil)
require.NoError(s.T(), err)
if len(traefikLog) > 0 {
fmt.Printf("%s\n", string(traefikLog))
}
}
func checkAccessLogOutput(c *check.C) int {
lines := extractLines(c)
func (s *AccessLogSuite) checkAccessLogOutput() int {
lines := s.extractLines()
count := 0
for i, line := range lines {
if len(line) > 0 {
count++
CheckAccessLogFormat(c, line, i)
s.CheckAccessLogFormat(line, i)
}
}
return count
}
func checkAccessLogExactValuesOutput(c *check.C, values []accessLogValue) int {
lines := extractLines(c)
func (s *AccessLogSuite) checkAccessLogExactValuesOutput(values []accessLogValue) int {
lines := s.extractLines()
count := 0
for i, line := range lines {
fmt.Println(line)
if len(line) > 0 {
count++
if values[i].formatOnly {
CheckAccessLogFormat(c, line, i)
s.CheckAccessLogFormat(line, i)
} else {
checkAccessLogExactValues(c, line, i, values[i])
s.checkAccessLogExactValues(line, i, values[i])
}
}
}
return count
}
func extractLines(c *check.C) []string {
func (s *AccessLogSuite) extractLines() []string {
accessLog, err := os.ReadFile(traefikTestAccessLogFile)
c.Assert(err, checker.IsNil)
require.NoError(s.T(), err)
lines := strings.Split(string(accessLog), "\n")
@ -694,14 +655,14 @@ func extractLines(c *check.C) []string {
return clean
}
func checkStatsForLogFile(c *check.C) {
func (s *AccessLogSuite) checkStatsForLogFile() {
err := try.Do(1*time.Second, func() error {
if _, errStat := os.Stat(traefikTestLogFile); errStat != nil {
return fmt.Errorf("could not get stats for log file: %w", errStat)
}
return nil
})
c.Assert(err, checker.IsNil)
require.NoError(s.T(), err)
}
func ensureWorkingDirectoryIsClean() {
@ -709,69 +670,38 @@ func ensureWorkingDirectoryIsClean() {
os.Remove(traefikTestLogFile)
}
func checkTraefikStarted(c *check.C) []byte {
func (s *AccessLogSuite) checkTraefikStarted() []byte {
traefikLog, err := os.ReadFile(traefikTestLogFile)
c.Assert(err, checker.IsNil)
require.NoError(s.T(), err)
if len(traefikLog) > 0 {
fmt.Printf("%s\n", string(traefikLog))
}
return traefikLog
}
func CheckAccessLogFormat(c *check.C, line string, i int) {
func (s *BaseSuite) CheckAccessLogFormat(line string, i int) {
results, err := accesslog.ParseAccessLog(line)
c.Assert(err, checker.IsNil)
c.Assert(results, checker.HasLen, 14)
c.Assert(results[accesslog.OriginStatus], checker.Matches, `^(-|\d{3})$`)
require.NoError(s.T(), err)
assert.Len(s.T(), results, 14)
assert.Regexp(s.T(), `^(-|\d{3})$`, results[accesslog.OriginStatus])
count, _ := strconv.Atoi(results[accesslog.RequestCount])
c.Assert(count, checker.GreaterOrEqualThan, i+1)
c.Assert(results[accesslog.RouterName], checker.Matches, `"(rt-.+@docker|api@internal)"`)
c.Assert(results[accesslog.ServiceURL], checker.HasPrefix, `"http://`)
c.Assert(results[accesslog.Duration], checker.Matches, `^\d+ms$`)
assert.GreaterOrEqual(s.T(), count, i+1)
assert.Regexp(s.T(), `"(rt-.+@docker|api@internal)"`, results[accesslog.RouterName])
assert.True(s.T(), strings.HasPrefix(results[accesslog.ServiceURL], `"http://`))
assert.Regexp(s.T(), `^\d+ms$`, results[accesslog.Duration])
}
func checkAccessLogExactValues(c *check.C, line string, i int, v accessLogValue) {
func (s *AccessLogSuite) checkAccessLogExactValues(line string, i int, v accessLogValue) {
results, err := accesslog.ParseAccessLog(line)
c.Assert(err, checker.IsNil)
c.Assert(results, checker.HasLen, 14)
require.NoError(s.T(), err)
assert.Len(s.T(), results, 14)
if len(v.user) > 0 {
c.Assert(results[accesslog.ClientUsername], checker.Equals, v.user)
assert.Equal(s.T(), v.user, results[accesslog.ClientUsername])
}
c.Assert(results[accesslog.OriginStatus], checker.Equals, v.code)
assert.Equal(s.T(), v.code, results[accesslog.OriginStatus])
count, _ := strconv.Atoi(results[accesslog.RequestCount])
c.Assert(count, checker.GreaterOrEqualThan, i+1)
c.Assert(results[accesslog.RouterName], checker.Matches, `^"?`+v.routerName+`.*(@docker)?$`)
c.Assert(results[accesslog.ServiceURL], checker.Matches, `^"?`+v.serviceURL+`.*$`)
c.Assert(results[accesslog.Duration], checker.Matches, `^\d+ms$`)
}
func waitForTraefik(c *check.C, containerName string) {
time.Sleep(1 * time.Second)
// Wait for Traefik to turn ready.
req, err := http.NewRequest(http.MethodGet, "http://127.0.0.1:8080/api/rawdata", nil)
c.Assert(err, checker.IsNil)
err = try.Request(req, 2*time.Second, try.StatusCodeIs(http.StatusOK), try.BodyContains(containerName))
c.Assert(err, checker.IsNil)
}
func displayTraefikLogFile(c *check.C, path string) {
if c.Failed() {
if _, err := os.Stat(path); !os.IsNotExist(err) {
content, errRead := os.ReadFile(path)
fmt.Printf("%s: Traefik logs: \n", c.TestName())
if errRead == nil {
fmt.Println(content)
} else {
fmt.Println(errRead)
}
} else {
fmt.Printf("%s: No Traefik logs.\n", c.TestName())
}
errRemove := os.Remove(path)
if errRemove != nil {
fmt.Println(errRemove)
}
}
assert.GreaterOrEqual(s.T(), count, i+1)
assert.Regexp(s.T(), `^"?`+v.routerName+`.*(@docker)?$`, results[accesslog.RouterName])
assert.Regexp(s.T(), `^"?`+v.serviceURL+`.*$`, results[accesslog.ServiceURL])
assert.Regexp(s.T(), `^\d+ms$`, results[accesslog.Duration])
}

View file

@ -8,16 +8,19 @@ import (
"net/http"
"os"
"path/filepath"
"testing"
"time"
"github.com/go-check/check"
"github.com/miekg/dns"
"github.com/rs/zerolog/log"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
"github.com/stretchr/testify/suite"
"github.com/traefik/traefik/v3/integration/try"
"github.com/traefik/traefik/v3/pkg/config/static"
"github.com/traefik/traefik/v3/pkg/provider/acme"
"github.com/traefik/traefik/v3/pkg/testhelpers"
"github.com/traefik/traefik/v3/pkg/types"
checker "github.com/vdemeester/shakers"
)
// ACME test suites.
@ -27,6 +30,10 @@ type AcmeSuite struct {
fakeDNSServer *dns.Server
}
func TestAcmeSuite(t *testing.T) {
suite.Run(t, new(AcmeSuite))
}
type subCases struct {
host string
expectedCommonName string
@ -87,17 +94,18 @@ func setupPebbleRootCA() (*http.Transport, error) {
}, nil
}
func (s *AcmeSuite) SetUpSuite(c *check.C) {
s.createComposeProject(c, "pebble")
s.composeUp(c)
func (s *AcmeSuite) SetupSuite() {
s.BaseSuite.SetupSuite()
s.fakeDNSServer = startFakeDNSServer(s.getContainerIP(c, "traefik"))
s.pebbleIP = s.getComposeServiceIP(c, "pebble")
s.createComposeProject("pebble")
s.composeUp()
// Retrieving the Docker host ip.
s.fakeDNSServer = startFakeDNSServer(s.hostIP)
s.pebbleIP = s.getComposeServiceIP("pebble")
pebbleTransport, err := setupPebbleRootCA()
if err != nil {
c.Fatal(err)
}
require.NoError(s.T(), err)
// wait for pebble
req := testhelpers.MustNewRequest(http.MethodGet, s.getAcmeURL(), nil)
@ -113,21 +121,24 @@ func (s *AcmeSuite) SetUpSuite(c *check.C) {
}
return try.StatusCodeIs(http.StatusOK)(resp)
})
c.Assert(err, checker.IsNil)
require.NoError(s.T(), err)
}
func (s *AcmeSuite) TearDownSuite(c *check.C) {
func (s *AcmeSuite) TearDownSuite() {
s.BaseSuite.TearDownSuite()
if s.fakeDNSServer != nil {
err := s.fakeDNSServer.Shutdown()
if err != nil {
c.Log(err)
log.Info().Msg(err.Error())
}
}
s.composeDown(c)
s.composeDown()
}
func (s *AcmeSuite) TestHTTP01Domains(c *check.C) {
func (s *AcmeSuite) TestHTTP01Domains() {
testCase := acmeTestCase{
traefikConfFilePath: "fixtures/acme/acme_domains.toml",
subCases: []subCases{{
@ -147,10 +158,10 @@ func (s *AcmeSuite) TestHTTP01Domains(c *check.C) {
},
}
s.retrieveAcmeCertificate(c, testCase)
s.retrieveAcmeCertificate(testCase)
}
func (s *AcmeSuite) TestHTTP01StoreDomains(c *check.C) {
func (s *AcmeSuite) TestHTTP01StoreDomains() {
testCase := acmeTestCase{
traefikConfFilePath: "fixtures/acme/acme_store_domains.toml",
subCases: []subCases{{
@ -170,10 +181,10 @@ func (s *AcmeSuite) TestHTTP01StoreDomains(c *check.C) {
},
}
s.retrieveAcmeCertificate(c, testCase)
s.retrieveAcmeCertificate(testCase)
}
func (s *AcmeSuite) TestHTTP01DomainsInSAN(c *check.C) {
func (s *AcmeSuite) TestHTTP01DomainsInSAN() {
testCase := acmeTestCase{
traefikConfFilePath: "fixtures/acme/acme_domains.toml",
subCases: []subCases{{
@ -194,10 +205,10 @@ func (s *AcmeSuite) TestHTTP01DomainsInSAN(c *check.C) {
},
}
s.retrieveAcmeCertificate(c, testCase)
s.retrieveAcmeCertificate(testCase)
}
func (s *AcmeSuite) TestHTTP01OnHostRule(c *check.C) {
func (s *AcmeSuite) TestHTTP01OnHostRule() {
testCase := acmeTestCase{
traefikConfFilePath: "fixtures/acme/acme_base.toml",
subCases: []subCases{{
@ -214,10 +225,10 @@ func (s *AcmeSuite) TestHTTP01OnHostRule(c *check.C) {
},
}
s.retrieveAcmeCertificate(c, testCase)
s.retrieveAcmeCertificate(testCase)
}
func (s *AcmeSuite) TestMultipleResolver(c *check.C) {
func (s *AcmeSuite) TestMultipleResolver() {
testCase := acmeTestCase{
traefikConfFilePath: "fixtures/acme/acme_multiple_resolvers.toml",
subCases: []subCases{
@ -245,10 +256,10 @@ func (s *AcmeSuite) TestMultipleResolver(c *check.C) {
},
}
s.retrieveAcmeCertificate(c, testCase)
s.retrieveAcmeCertificate(testCase)
}
func (s *AcmeSuite) TestHTTP01OnHostRuleECDSA(c *check.C) {
func (s *AcmeSuite) TestHTTP01OnHostRuleECDSA() {
testCase := acmeTestCase{
traefikConfFilePath: "fixtures/acme/acme_base.toml",
subCases: []subCases{{
@ -266,10 +277,10 @@ func (s *AcmeSuite) TestHTTP01OnHostRuleECDSA(c *check.C) {
},
}
s.retrieveAcmeCertificate(c, testCase)
s.retrieveAcmeCertificate(testCase)
}
func (s *AcmeSuite) TestHTTP01OnHostRuleInvalidAlgo(c *check.C) {
func (s *AcmeSuite) TestHTTP01OnHostRuleInvalidAlgo() {
testCase := acmeTestCase{
traefikConfFilePath: "fixtures/acme/acme_base.toml",
subCases: []subCases{{
@ -287,10 +298,10 @@ func (s *AcmeSuite) TestHTTP01OnHostRuleInvalidAlgo(c *check.C) {
},
}
s.retrieveAcmeCertificate(c, testCase)
s.retrieveAcmeCertificate(testCase)
}
func (s *AcmeSuite) TestHTTP01OnHostRuleDefaultDynamicCertificatesWithWildcard(c *check.C) {
func (s *AcmeSuite) TestHTTP01OnHostRuleDefaultDynamicCertificatesWithWildcard() {
testCase := acmeTestCase{
traefikConfFilePath: "fixtures/acme/acme_tls.toml",
subCases: []subCases{{
@ -307,10 +318,10 @@ func (s *AcmeSuite) TestHTTP01OnHostRuleDefaultDynamicCertificatesWithWildcard(c
},
}
s.retrieveAcmeCertificate(c, testCase)
s.retrieveAcmeCertificate(testCase)
}
func (s *AcmeSuite) TestHTTP01OnHostRuleDynamicCertificatesWithWildcard(c *check.C) {
func (s *AcmeSuite) TestHTTP01OnHostRuleDynamicCertificatesWithWildcard() {
testCase := acmeTestCase{
traefikConfFilePath: "fixtures/acme/acme_tls_dynamic.toml",
subCases: []subCases{{
@ -327,10 +338,10 @@ func (s *AcmeSuite) TestHTTP01OnHostRuleDynamicCertificatesWithWildcard(c *check
},
}
s.retrieveAcmeCertificate(c, testCase)
s.retrieveAcmeCertificate(testCase)
}
func (s *AcmeSuite) TestTLSALPN01OnHostRuleTCP(c *check.C) {
func (s *AcmeSuite) TestTLSALPN01OnHostRuleTCP() {
testCase := acmeTestCase{
traefikConfFilePath: "fixtures/acme/acme_tcp.toml",
subCases: []subCases{{
@ -347,10 +358,10 @@ func (s *AcmeSuite) TestTLSALPN01OnHostRuleTCP(c *check.C) {
},
}
s.retrieveAcmeCertificate(c, testCase)
s.retrieveAcmeCertificate(testCase)
}
func (s *AcmeSuite) TestTLSALPN01OnHostRule(c *check.C) {
func (s *AcmeSuite) TestTLSALPN01OnHostRule() {
testCase := acmeTestCase{
traefikConfFilePath: "fixtures/acme/acme_base.toml",
subCases: []subCases{{
@ -367,10 +378,10 @@ func (s *AcmeSuite) TestTLSALPN01OnHostRule(c *check.C) {
},
}
s.retrieveAcmeCertificate(c, testCase)
s.retrieveAcmeCertificate(testCase)
}
func (s *AcmeSuite) TestTLSALPN01Domains(c *check.C) {
func (s *AcmeSuite) TestTLSALPN01Domains() {
testCase := acmeTestCase{
traefikConfFilePath: "fixtures/acme/acme_domains.toml",
subCases: []subCases{{
@ -390,10 +401,10 @@ func (s *AcmeSuite) TestTLSALPN01Domains(c *check.C) {
},
}
s.retrieveAcmeCertificate(c, testCase)
s.retrieveAcmeCertificate(testCase)
}
func (s *AcmeSuite) TestTLSALPN01DomainsInSAN(c *check.C) {
func (s *AcmeSuite) TestTLSALPN01DomainsInSAN() {
testCase := acmeTestCase{
traefikConfFilePath: "fixtures/acme/acme_domains.toml",
subCases: []subCases{{
@ -414,12 +425,12 @@ func (s *AcmeSuite) TestTLSALPN01DomainsInSAN(c *check.C) {
},
}
s.retrieveAcmeCertificate(c, testCase)
s.retrieveAcmeCertificate(testCase)
}
// Test Let's encrypt down.
func (s *AcmeSuite) TestNoValidLetsEncryptServer(c *check.C) {
file := s.adaptFile(c, "fixtures/acme/acme_base.toml", templateModel{
func (s *AcmeSuite) TestNoValidLetsEncryptServer() {
file := s.adaptFile("fixtures/acme/acme_base.toml", templateModel{
Acme: map[string]static.CertificateResolver{
"default": {ACME: &acme.Configuration{
CAServer: "http://wrongurl:4001/directory",
@ -427,21 +438,16 @@ func (s *AcmeSuite) TestNoValidLetsEncryptServer(c *check.C) {
}},
},
})
defer os.Remove(file)
cmd, display := s.traefikCmd(withConfigFile(file))
defer display(c)
err := cmd.Start()
c.Assert(err, checker.IsNil)
defer s.killCmd(cmd)
s.traefikCmd(withConfigFile(file))
// Expected traefik works
err = try.GetRequest("http://127.0.0.1:8080/api/rawdata", 10*time.Second, try.StatusCodeIs(http.StatusOK))
c.Assert(err, checker.IsNil)
err := try.GetRequest("http://127.0.0.1:8080/api/rawdata", 10*time.Second, try.StatusCodeIs(http.StatusOK))
require.NoError(s.T(), err)
}
// Doing an HTTPS request and test the response certificate.
func (s *AcmeSuite) retrieveAcmeCertificate(c *check.C, testCase acmeTestCase) {
func (s *AcmeSuite) retrieveAcmeCertificate(testCase acmeTestCase) {
if len(testCase.template.PortHTTP) == 0 {
testCase.template.PortHTTP = ":5002"
}
@ -456,14 +462,10 @@ func (s *AcmeSuite) retrieveAcmeCertificate(c *check.C, testCase acmeTestCase) {
}
}
file := s.adaptFile(c, testCase.traefikConfFilePath, testCase.template)
defer os.Remove(file)
file := s.adaptFile(testCase.traefikConfFilePath, testCase.template)
s.traefikCmd(withConfigFile(file))
cmd, display := s.traefikCmd(withConfigFile(file))
defer display(c)
err := cmd.Start()
c.Assert(err, checker.IsNil)
defer s.killCmd(cmd)
// A real file is needed to have the right mode on acme.json file
defer os.Remove("/tmp/acme.json")
@ -477,11 +479,11 @@ func (s *AcmeSuite) retrieveAcmeCertificate(c *check.C, testCase acmeTestCase) {
}
// wait for traefik (generating acme account take some seconds)
err = try.Do(60*time.Second, func() error {
err := try.Do(60*time.Second, func() error {
_, errGet := client.Get("https://127.0.0.1:5001")
return errGet
})
c.Assert(err, checker.IsNil)
require.NoError(s.T(), err)
for _, sub := range testCase.subCases {
client = &http.Client{
@ -503,7 +505,7 @@ func (s *AcmeSuite) retrieveAcmeCertificate(c *check.C, testCase acmeTestCase) {
var resp *http.Response
// Retry to send a Request which uses the LE generated certificate
err = try.Do(60*time.Second, func() error {
err := try.Do(60*time.Second, func() error {
resp, err = client.Do(req)
if err != nil {
return err
@ -517,10 +519,10 @@ func (s *AcmeSuite) retrieveAcmeCertificate(c *check.C, testCase acmeTestCase) {
return nil
})
c.Assert(err, checker.IsNil)
c.Assert(resp.StatusCode, checker.Equals, http.StatusOK)
require.NoError(s.T(), err)
assert.Equal(s.T(), http.StatusOK, resp.StatusCode)
// Check Domain into response certificate
c.Assert(resp.TLS.PeerCertificates[0].Subject.CommonName, checker.Equals, sub.expectedCommonName)
c.Assert(resp.TLS.PeerCertificates[0].PublicKeyAlgorithm, checker.Equals, sub.expectedAlgorithm)
assert.Equal(s.T(), sub.expectedCommonName, resp.TLS.PeerCertificates[0].Subject.CommonName)
assert.Equal(s.T(), sub.expectedAlgorithm, resp.TLS.PeerCertificates[0].PublicKeyAlgorithm)
}
}

View file

@ -8,36 +8,38 @@ import (
"net/http"
"regexp"
"strconv"
"testing"
"time"
"github.com/go-check/check"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
"github.com/stretchr/testify/suite"
"github.com/traefik/traefik/v3/integration/try"
"github.com/traefik/traefik/v3/pkg/config/dynamic"
checker "github.com/vdemeester/shakers"
)
type ThrottlingSuite struct{ BaseSuite }
func (s *ThrottlingSuite) SetUpSuite(c *check.C) {
s.createComposeProject(c, "rest")
s.composeUp(c)
func TestThrottlingSuite(t *testing.T) {
suite.Run(t, new(ThrottlingSuite))
}
func (s *ThrottlingSuite) TestThrottleConfReload(c *check.C) {
cmd, display := s.traefikCmd(withConfigFile("fixtures/throttling/simple.toml"))
func (s *ThrottlingSuite) SetupSuite() {
s.BaseSuite.SetupSuite()
s.createComposeProject("rest")
s.composeUp()
}
defer display(c)
err := cmd.Start()
c.Assert(err, checker.IsNil)
defer s.killCmd(cmd)
func (s *ThrottlingSuite) TestThrottleConfReload() {
s.traefikCmd(withConfigFile("fixtures/throttling/simple.toml"))
// wait for Traefik
err = try.GetRequest("http://127.0.0.1:8080/api/rawdata", 1000*time.Millisecond, try.BodyContains("rest@internal"))
c.Assert(err, checker.IsNil)
err := try.GetRequest("http://127.0.0.1:8080/api/rawdata", 1000*time.Millisecond, try.BodyContains("rest@internal"))
require.NoError(s.T(), err)
// Expected a 404 as we did not configure anything.
err = try.GetRequest("http://127.0.0.1:8000/", 1000*time.Millisecond, try.StatusCodeIs(http.StatusNotFound))
c.Assert(err, checker.IsNil)
require.NoError(s.T(), err)
config := &dynamic.Configuration{
HTTP: &dynamic.HTTPConfiguration{
@ -47,7 +49,7 @@ func (s *ThrottlingSuite) TestThrottleConfReload(c *check.C) {
LoadBalancer: &dynamic.ServersLoadBalancer{
Servers: []dynamic.Server{
{
URL: "http://" + s.getComposeServiceIP(c, "whoami1") + ":80",
URL: "http://" + s.getComposeServiceIP("whoami1") + ":80",
},
},
},
@ -68,28 +70,28 @@ func (s *ThrottlingSuite) TestThrottleConfReload(c *check.C) {
for i := 0; i < confChanges; i++ {
config.HTTP.Routers[fmt.Sprintf("routerHTTP%d", i)] = router
data, err := json.Marshal(config)
c.Assert(err, checker.IsNil)
require.NoError(s.T(), err)
request, err := http.NewRequest(http.MethodPut, "http://127.0.0.1:8080/api/providers/rest", bytes.NewReader(data))
c.Assert(err, checker.IsNil)
require.NoError(s.T(), err)
response, err := http.DefaultClient.Do(request)
c.Assert(err, checker.IsNil)
c.Assert(response.StatusCode, checker.Equals, http.StatusOK)
require.NoError(s.T(), err)
assert.Equal(s.T(), http.StatusOK, response.StatusCode)
time.Sleep(200 * time.Millisecond)
}
reloadsRegexp := regexp.MustCompile(`traefik_config_reloads_total (\d*)\n`)
resp, err := http.Get("http://127.0.0.1:8080/metrics")
c.Assert(err, checker.IsNil)
require.NoError(s.T(), err)
defer resp.Body.Close()
body, err := io.ReadAll(resp.Body)
c.Assert(err, checker.IsNil)
require.NoError(s.T(), err)
fields := reloadsRegexp.FindStringSubmatch(string(body))
c.Assert(len(fields), checker.Equals, 2)
assert.Len(s.T(), fields, 2)
reloads, err := strconv.Atoi(fields[1])
if err != nil {
@ -101,5 +103,5 @@ func (s *ThrottlingSuite) TestThrottleConfReload(c *check.C) {
// Therefore the throttling (set at 400ms for this test) should only let
// (2s / 400 ms =) 5 config reloads happen in theory.
// In addition, we have to take into account the extra config reload from the internal provider (5 + 1).
c.Assert(reloads, checker.LessOrEqualThan, 6)
assert.LessOrEqual(s.T(), reloads, 6)
}

View file

@ -4,13 +4,14 @@ import (
"fmt"
"net"
"net/http"
"os"
"testing"
"time"
"github.com/go-check/check"
"github.com/hashicorp/consul/api"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
"github.com/stretchr/testify/suite"
"github.com/traefik/traefik/v3/integration/try"
checker "github.com/vdemeester/shakers"
)
type ConsulCatalogSuite struct {
@ -20,26 +21,36 @@ type ConsulCatalogSuite struct {
consulURL string
}
func (s *ConsulCatalogSuite) SetUpSuite(c *check.C) {
s.createComposeProject(c, "consul_catalog")
s.composeUp(c)
func TestConsulCatalogSuite(t *testing.T) {
suite.Run(t, new(ConsulCatalogSuite))
}
s.consulURL = "http://" + net.JoinHostPort(s.getComposeServiceIP(c, "consul"), "8500")
func (s *ConsulCatalogSuite) SetupSuite() {
s.BaseSuite.SetupSuite()
s.createComposeProject("consul_catalog")
s.composeUp()
s.consulURL = "http://" + net.JoinHostPort(s.getComposeServiceIP("consul"), "8500")
var err error
s.consulClient, err = api.NewClient(&api.Config{
Address: s.consulURL,
})
c.Check(err, check.IsNil)
require.NoError(s.T(), err)
// Wait for consul to elect itself leader
err = s.waitToElectConsulLeader()
c.Assert(err, checker.IsNil)
require.NoError(s.T(), err)
s.consulAgentClient, err = api.NewClient(&api.Config{
Address: "http://" + net.JoinHostPort(s.getComposeServiceIP(c, "consul-agent"), "8500"),
Address: "http://" + net.JoinHostPort(s.getComposeServiceIP("consul-agent"), "8500"),
})
c.Check(err, check.IsNil)
require.NoError(s.T(), err)
}
func (s *ConsulCatalogSuite) TearDownSuite() {
s.BaseSuite.TearDownSuite()
}
func (s *ConsulCatalogSuite) waitToElectConsulLeader() error {
@ -83,36 +94,36 @@ func (s *ConsulCatalogSuite) deregisterService(id string, onAgent bool) error {
return client.Agent().ServiceDeregister(id)
}
func (s *ConsulCatalogSuite) TestWithNotExposedByDefaultAndDefaultsSettings(c *check.C) {
func (s *ConsulCatalogSuite) TestWithNotExposedByDefaultAndDefaultsSettings() {
reg1 := &api.AgentServiceRegistration{
ID: "whoami1",
Name: "whoami",
Tags: []string{"traefik.enable=true"},
Port: 80,
Address: s.getComposeServiceIP(c, "whoami1"),
Address: s.getComposeServiceIP("whoami1"),
}
err := s.registerService(reg1, false)
c.Assert(err, checker.IsNil)
require.NoError(s.T(), err)
reg2 := &api.AgentServiceRegistration{
ID: "whoami2",
Name: "whoami",
Tags: []string{"traefik.enable=true"},
Port: 80,
Address: s.getComposeServiceIP(c, "whoami2"),
Address: s.getComposeServiceIP("whoami2"),
}
err = s.registerService(reg2, false)
c.Assert(err, checker.IsNil)
require.NoError(s.T(), err)
reg3 := &api.AgentServiceRegistration{
ID: "whoami3",
Name: "whoami",
Tags: []string{"traefik.enable=true"},
Port: 80,
Address: s.getComposeServiceIP(c, "whoami3"),
Address: s.getComposeServiceIP("whoami3"),
}
err = s.registerService(reg3, false)
c.Assert(err, checker.IsNil)
require.NoError(s.T(), err)
tempObjects := struct {
ConsulAddress string
@ -120,23 +131,18 @@ func (s *ConsulCatalogSuite) TestWithNotExposedByDefaultAndDefaultsSettings(c *c
ConsulAddress: s.consulURL,
}
file := s.adaptFile(c, "fixtures/consul_catalog/default_not_exposed.toml", tempObjects)
defer os.Remove(file)
file := s.adaptFile("fixtures/consul_catalog/default_not_exposed.toml", tempObjects)
cmd, display := s.traefikCmd(withConfigFile(file))
defer display(c)
err = cmd.Start()
c.Assert(err, checker.IsNil)
defer s.killCmd(cmd)
s.traefikCmd(withConfigFile(file))
req, err := http.NewRequest(http.MethodGet, "http://127.0.0.1:8000/", nil)
c.Assert(err, checker.IsNil)
require.NoError(s.T(), err)
req.Host = "whoami"
err = try.Request(req, 2*time.Second,
try.StatusCodeIs(200),
try.BodyContainsOr("Hostname: whoami1", "Hostname: whoami2", "Hostname: whoami3"))
c.Assert(err, checker.IsNil)
require.NoError(s.T(), err)
err = try.GetRequest("http://127.0.0.1:8080/api/rawdata", 2*time.Second,
try.StatusCodeIs(200),
@ -145,18 +151,18 @@ func (s *ConsulCatalogSuite) TestWithNotExposedByDefaultAndDefaultsSettings(c *c
fmt.Sprintf(`"http://%s:80":"UP"`, reg2.Address),
fmt.Sprintf(`"http://%s:80":"UP"`, reg3.Address),
))
c.Assert(err, checker.IsNil)
require.NoError(s.T(), err)
err = s.deregisterService("whoami1", false)
c.Assert(err, checker.IsNil)
require.NoError(s.T(), err)
err = s.deregisterService("whoami2", false)
c.Assert(err, checker.IsNil)
require.NoError(s.T(), err)
err = s.deregisterService("whoami3", false)
c.Assert(err, checker.IsNil)
require.NoError(s.T(), err)
}
func (s *ConsulCatalogSuite) TestByLabels(c *check.C) {
containerIP := s.getComposeServiceIP(c, "whoami1")
func (s *ConsulCatalogSuite) TestByLabels() {
containerIP := s.getComposeServiceIP("whoami1")
reg := &api.AgentServiceRegistration{
ID: "whoami1",
@ -171,7 +177,7 @@ func (s *ConsulCatalogSuite) TestByLabels(c *check.C) {
Address: containerIP,
}
err := s.registerService(reg, false)
c.Assert(err, checker.IsNil)
require.NoError(s.T(), err)
tempObjects := struct {
ConsulAddress string
@ -179,23 +185,18 @@ func (s *ConsulCatalogSuite) TestByLabels(c *check.C) {
ConsulAddress: s.consulURL,
}
file := s.adaptFile(c, "fixtures/consul_catalog/default_not_exposed.toml", tempObjects)
defer os.Remove(file)
file := s.adaptFile("fixtures/consul_catalog/default_not_exposed.toml", tempObjects)
cmd, display := s.traefikCmd(withConfigFile(file))
defer display(c)
err = cmd.Start()
c.Assert(err, checker.IsNil)
defer s.killCmd(cmd)
s.traefikCmd(withConfigFile(file))
err = try.GetRequest("http://127.0.0.1:8000/whoami", 5*time.Second, try.StatusCodeIs(http.StatusOK), try.BodyContainsOr("Hostname: whoami1", "Hostname: whoami2", "Hostname: whoami3"))
c.Assert(err, checker.IsNil)
require.NoError(s.T(), err)
err = s.deregisterService("whoami1", false)
c.Assert(err, checker.IsNil)
require.NoError(s.T(), err)
}
func (s *ConsulCatalogSuite) TestSimpleConfiguration(c *check.C) {
func (s *ConsulCatalogSuite) TestSimpleConfiguration() {
tempObjects := struct {
ConsulAddress string
DefaultRule string
@ -204,37 +205,32 @@ func (s *ConsulCatalogSuite) TestSimpleConfiguration(c *check.C) {
DefaultRule: "Host(`{{ normalize .Name }}.consul.localhost`)",
}
file := s.adaptFile(c, "fixtures/consul_catalog/simple.toml", tempObjects)
defer os.Remove(file)
file := s.adaptFile("fixtures/consul_catalog/simple.toml", tempObjects)
reg := &api.AgentServiceRegistration{
ID: "whoami1",
Name: "whoami",
Tags: []string{"traefik.enable=true"},
Port: 80,
Address: s.getComposeServiceIP(c, "whoami1"),
Address: s.getComposeServiceIP("whoami1"),
}
err := s.registerService(reg, false)
c.Assert(err, checker.IsNil)
require.NoError(s.T(), err)
cmd, display := s.traefikCmd(withConfigFile(file))
defer display(c)
err = cmd.Start()
c.Assert(err, checker.IsNil)
defer s.killCmd(cmd)
s.traefikCmd(withConfigFile(file))
req, err := http.NewRequest(http.MethodGet, "http://127.0.0.1:8000/", nil)
c.Assert(err, checker.IsNil)
require.NoError(s.T(), err)
req.Host = "whoami.consul.localhost"
err = try.Request(req, 2*time.Second, try.StatusCodeIs(200), try.BodyContainsOr("Hostname: whoami1"))
c.Assert(err, checker.IsNil)
require.NoError(s.T(), err)
err = s.deregisterService("whoami1", false)
c.Assert(err, checker.IsNil)
require.NoError(s.T(), err)
}
func (s *ConsulCatalogSuite) TestSimpleConfigurationWithWatch(c *check.C) {
func (s *ConsulCatalogSuite) TestSimpleConfigurationWithWatch() {
tempObjects := struct {
ConsulAddress string
DefaultRule string
@ -243,39 +239,34 @@ func (s *ConsulCatalogSuite) TestSimpleConfigurationWithWatch(c *check.C) {
DefaultRule: "Host(`{{ normalize .Name }}.consul.localhost`)",
}
file := s.adaptFile(c, "fixtures/consul_catalog/simple_watch.toml", tempObjects)
defer os.Remove(file)
file := s.adaptFile("fixtures/consul_catalog/simple_watch.toml", tempObjects)
reg := &api.AgentServiceRegistration{
ID: "whoami1",
Name: "whoami",
Tags: []string{"traefik.enable=true"},
Port: 80,
Address: s.getComposeServiceIP(c, "whoami1"),
Address: s.getComposeServiceIP("whoami1"),
}
err := s.registerService(reg, false)
c.Assert(err, checker.IsNil)
require.NoError(s.T(), err)
cmd, display := s.traefikCmd(withConfigFile(file))
defer display(c)
err = cmd.Start()
c.Assert(err, checker.IsNil)
defer s.killCmd(cmd)
s.traefikCmd(withConfigFile(file))
req, err := http.NewRequest(http.MethodGet, "http://127.0.0.1:8000/", nil)
c.Assert(err, checker.IsNil)
require.NoError(s.T(), err)
req.Host = "whoami.consul.localhost"
err = try.Request(req, 2*time.Second, try.StatusCodeIs(http.StatusOK), try.BodyContainsOr("Hostname: whoami1"))
c.Assert(err, checker.IsNil)
require.NoError(s.T(), err)
err = s.deregisterService("whoami1", false)
c.Assert(err, checker.IsNil)
require.NoError(s.T(), err)
err = try.Request(req, 2*time.Second, try.StatusCodeIs(http.StatusNotFound))
c.Assert(err, checker.IsNil)
require.NoError(s.T(), err)
whoamiIP := s.getComposeServiceIP(c, "whoami1")
whoamiIP := s.getComposeServiceIP("whoami1")
reg.Check = &api.AgentServiceCheck{
CheckID: "some-ok-check",
TCP: whoamiIP + ":80",
@ -285,10 +276,10 @@ func (s *ConsulCatalogSuite) TestSimpleConfigurationWithWatch(c *check.C) {
}
err = s.registerService(reg, false)
c.Assert(err, checker.IsNil)
require.NoError(s.T(), err)
err = try.Request(req, 2*time.Second, try.StatusCodeIs(http.StatusOK), try.BodyContainsOr("Hostname: whoami1"))
c.Assert(err, checker.IsNil)
err = try.Request(req, 5*time.Second, try.StatusCodeIs(http.StatusOK), try.BodyContainsOr("Hostname: whoami1"))
require.NoError(s.T(), err)
reg.Check = &api.AgentServiceCheck{
CheckID: "some-failing-check",
@ -299,16 +290,16 @@ func (s *ConsulCatalogSuite) TestSimpleConfigurationWithWatch(c *check.C) {
}
err = s.registerService(reg, false)
c.Assert(err, checker.IsNil)
require.NoError(s.T(), err)
err = try.Request(req, 2*time.Second, try.StatusCodeIs(http.StatusNotFound))
c.Assert(err, checker.IsNil)
err = try.Request(req, 5*time.Second, try.StatusCodeIs(http.StatusNotFound))
require.NoError(s.T(), err)
err = s.deregisterService("whoami1", false)
c.Assert(err, checker.IsNil)
require.NoError(s.T(), err)
}
func (s *ConsulCatalogSuite) TestRegisterServiceWithoutIP(c *check.C) {
func (s *ConsulCatalogSuite) TestRegisterServiceWithoutIP() {
tempObjects := struct {
ConsulAddress string
DefaultRule string
@ -317,8 +308,7 @@ func (s *ConsulCatalogSuite) TestRegisterServiceWithoutIP(c *check.C) {
DefaultRule: "Host(`{{ normalize .Name }}.consul.localhost`)",
}
file := s.adaptFile(c, "fixtures/consul_catalog/simple.toml", tempObjects)
defer os.Remove(file)
file := s.adaptFile("fixtures/consul_catalog/simple.toml", tempObjects)
reg := &api.AgentServiceRegistration{
ID: "whoami1",
@ -328,25 +318,21 @@ func (s *ConsulCatalogSuite) TestRegisterServiceWithoutIP(c *check.C) {
Address: "",
}
err := s.registerService(reg, false)
c.Assert(err, checker.IsNil)
require.NoError(s.T(), err)
cmd, display := s.traefikCmd(withConfigFile(file))
defer display(c)
err = cmd.Start()
c.Assert(err, checker.IsNil)
defer s.killCmd(cmd)
s.traefikCmd(withConfigFile(file))
req, err := http.NewRequest(http.MethodGet, "http://127.0.0.1:8080/api/http/services", nil)
c.Assert(err, checker.IsNil)
require.NoError(s.T(), err)
err = try.Request(req, 2*time.Second, try.StatusCodeIs(200), try.BodyContainsOr("whoami@consulcatalog", "\"http://127.0.0.1:80\": \"UP\""))
c.Assert(err, checker.IsNil)
require.NoError(s.T(), err)
err = s.deregisterService("whoami1", false)
c.Assert(err, checker.IsNil)
require.NoError(s.T(), err)
}
func (s *ConsulCatalogSuite) TestDefaultConsulService(c *check.C) {
func (s *ConsulCatalogSuite) TestDefaultConsulService() {
tempObjects := struct {
ConsulAddress string
DefaultRule string
@ -355,37 +341,32 @@ func (s *ConsulCatalogSuite) TestDefaultConsulService(c *check.C) {
DefaultRule: "Host(`{{ normalize .Name }}.consul.localhost`)",
}
file := s.adaptFile(c, "fixtures/consul_catalog/simple.toml", tempObjects)
defer os.Remove(file)
file := s.adaptFile("fixtures/consul_catalog/simple.toml", tempObjects)
reg := &api.AgentServiceRegistration{
ID: "whoami1",
Name: "whoami",
Port: 80,
Address: s.getComposeServiceIP(c, "whoami1"),
Address: s.getComposeServiceIP("whoami1"),
}
err := s.registerService(reg, false)
c.Assert(err, checker.IsNil)
require.NoError(s.T(), err)
// Start traefik
cmd, display := s.traefikCmd(withConfigFile(file))
defer display(c)
err = cmd.Start()
c.Assert(err, checker.IsNil)
defer s.killCmd(cmd)
s.traefikCmd(withConfigFile(file))
req, err := http.NewRequest(http.MethodGet, "http://127.0.0.1:8000/", nil)
c.Assert(err, checker.IsNil)
require.NoError(s.T(), err)
req.Host = "whoami.consul.localhost"
err = try.Request(req, 2*time.Second, try.StatusCodeIs(200), try.BodyContainsOr("Hostname: whoami1"))
c.Assert(err, checker.IsNil)
require.NoError(s.T(), err)
err = s.deregisterService("whoami1", false)
c.Assert(err, checker.IsNil)
require.NoError(s.T(), err)
}
func (s *ConsulCatalogSuite) TestConsulServiceWithTCPLabels(c *check.C) {
func (s *ConsulCatalogSuite) TestConsulServiceWithTCPLabels() {
tempObjects := struct {
ConsulAddress string
DefaultRule string
@ -394,8 +375,7 @@ func (s *ConsulCatalogSuite) TestConsulServiceWithTCPLabels(c *check.C) {
DefaultRule: "Host(`{{ normalize .Name }}.consul.localhost`)",
}
file := s.adaptFile(c, "fixtures/consul_catalog/simple.toml", tempObjects)
defer os.Remove(file)
file := s.adaptFile("fixtures/consul_catalog/simple.toml", tempObjects)
// Start a container with some tags
reg := &api.AgentServiceRegistration{
@ -407,32 +387,28 @@ func (s *ConsulCatalogSuite) TestConsulServiceWithTCPLabels(c *check.C) {
"traefik.tcp.Services.Super.Loadbalancer.server.port=8080",
},
Port: 8080,
Address: s.getComposeServiceIP(c, "whoamitcp"),
Address: s.getComposeServiceIP("whoamitcp"),
}
err := s.registerService(reg, false)
c.Assert(err, checker.IsNil)
require.NoError(s.T(), err)
// Start traefik
cmd, display := s.traefikCmd(withConfigFile(file))
defer display(c)
err = cmd.Start()
c.Assert(err, checker.IsNil)
defer s.killCmd(cmd)
s.traefikCmd(withConfigFile(file))
err = try.GetRequest("http://127.0.0.1:8080/api/rawdata", 1500*time.Millisecond, try.StatusCodeIs(http.StatusOK), try.BodyContains("HostSNI(`my.super.host`)"))
c.Assert(err, checker.IsNil)
require.NoError(s.T(), err)
who, err := guessWho("127.0.0.1:8000", "my.super.host", true)
c.Assert(err, checker.IsNil)
require.NoError(s.T(), err)
c.Assert(who, checker.Contains, "whoamitcp")
assert.Contains(s.T(), who, "whoamitcp")
err = s.deregisterService("whoamitcp", false)
c.Assert(err, checker.IsNil)
require.NoError(s.T(), err)
}
func (s *ConsulCatalogSuite) TestConsulServiceWithLabels(c *check.C) {
func (s *ConsulCatalogSuite) TestConsulServiceWithLabels() {
tempObjects := struct {
ConsulAddress string
DefaultRule string
@ -441,8 +417,7 @@ func (s *ConsulCatalogSuite) TestConsulServiceWithLabels(c *check.C) {
DefaultRule: "Host(`{{ normalize .Name }}.consul.localhost`)",
}
file := s.adaptFile(c, "fixtures/consul_catalog/simple.toml", tempObjects)
defer os.Remove(file)
file := s.adaptFile("fixtures/consul_catalog/simple.toml", tempObjects)
// Start a container with some tags
reg1 := &api.AgentServiceRegistration{
@ -452,11 +427,11 @@ func (s *ConsulCatalogSuite) TestConsulServiceWithLabels(c *check.C) {
"traefik.http.Routers.Super.Rule=Host(`my.super.host`)",
},
Port: 80,
Address: s.getComposeServiceIP(c, "whoami1"),
Address: s.getComposeServiceIP("whoami1"),
}
err := s.registerService(reg1, false)
c.Assert(err, checker.IsNil)
require.NoError(s.T(), err)
// Start another container by replacing a '.' by a '-'
reg2 := &api.AgentServiceRegistration{
@ -466,40 +441,36 @@ func (s *ConsulCatalogSuite) TestConsulServiceWithLabels(c *check.C) {
"traefik.http.Routers.SuperHost.Rule=Host(`my-super.host`)",
},
Port: 80,
Address: s.getComposeServiceIP(c, "whoami2"),
Address: s.getComposeServiceIP("whoami2"),
}
err = s.registerService(reg2, false)
c.Assert(err, checker.IsNil)
require.NoError(s.T(), err)
// Start traefik
cmd, display := s.traefikCmd(withConfigFile(file))
defer display(c)
err = cmd.Start()
c.Assert(err, checker.IsNil)
defer s.killCmd(cmd)
s.traefikCmd(withConfigFile(file))
req, err := http.NewRequest(http.MethodGet, "http://127.0.0.1:8000/", nil)
c.Assert(err, checker.IsNil)
require.NoError(s.T(), err)
req.Host = "my-super.host"
err = try.Request(req, 2*time.Second, try.StatusCodeIs(200), try.BodyContainsOr("Hostname: whoami1"))
c.Assert(err, checker.IsNil)
require.NoError(s.T(), err)
req, err = http.NewRequest(http.MethodGet, "http://127.0.0.1:8000/", nil)
c.Assert(err, checker.IsNil)
require.NoError(s.T(), err)
req.Host = "my.super.host"
err = try.Request(req, 2*time.Second, try.StatusCodeIs(200), try.BodyContainsOr("Hostname: whoami2"))
c.Assert(err, checker.IsNil)
require.NoError(s.T(), err)
err = s.deregisterService("whoami1", false)
c.Assert(err, checker.IsNil)
require.NoError(s.T(), err)
err = s.deregisterService("whoami2", false)
c.Assert(err, checker.IsNil)
require.NoError(s.T(), err)
}
func (s *ConsulCatalogSuite) TestSameServiceIDOnDifferentConsulAgent(c *check.C) {
func (s *ConsulCatalogSuite) TestSameServiceIDOnDifferentConsulAgent() {
tempObjects := struct {
ConsulAddress string
DefaultRule string
@ -508,8 +479,7 @@ func (s *ConsulCatalogSuite) TestSameServiceIDOnDifferentConsulAgent(c *check.C)
DefaultRule: "Host(`{{ normalize .Name }}.consul.localhost`)",
}
file := s.adaptFile(c, "fixtures/consul_catalog/default_not_exposed.toml", tempObjects)
defer os.Remove(file)
file := s.adaptFile("fixtures/consul_catalog/default_not_exposed.toml", tempObjects)
// Start a container with some tags
tags := []string{
@ -523,50 +493,46 @@ func (s *ConsulCatalogSuite) TestSameServiceIDOnDifferentConsulAgent(c *check.C)
Name: "whoami",
Tags: tags,
Port: 80,
Address: s.getComposeServiceIP(c, "whoami1"),
Address: s.getComposeServiceIP("whoami1"),
}
err := s.registerService(reg1, false)
c.Assert(err, checker.IsNil)
require.NoError(s.T(), err)
reg2 := &api.AgentServiceRegistration{
ID: "whoami",
Name: "whoami",
Tags: tags,
Port: 80,
Address: s.getComposeServiceIP(c, "whoami2"),
Address: s.getComposeServiceIP("whoami2"),
}
err = s.registerService(reg2, true)
c.Assert(err, checker.IsNil)
require.NoError(s.T(), err)
// Start traefik
cmd, display := s.traefikCmd(withConfigFile(file))
defer display(c)
err = cmd.Start()
c.Assert(err, checker.IsNil)
defer s.killCmd(cmd)
s.traefikCmd(withConfigFile(file))
req, err := http.NewRequest(http.MethodGet, "http://127.0.0.1:8000/", nil)
c.Assert(err, checker.IsNil)
require.NoError(s.T(), err)
req.Host = "my.super.host"
err = try.Request(req, 2*time.Second, try.StatusCodeIs(200), try.BodyContainsOr("Hostname: whoami1", "Hostname: whoami2"))
c.Assert(err, checker.IsNil)
require.NoError(s.T(), err)
req, err = http.NewRequest(http.MethodGet, "http://127.0.0.1:8080/api/rawdata", nil)
c.Assert(err, checker.IsNil)
require.NoError(s.T(), err)
err = try.Request(req, 2*time.Second, try.StatusCodeIs(200),
try.BodyContainsOr(s.getComposeServiceIP(c, "whoami1"), s.getComposeServiceIP(c, "whoami2")))
c.Assert(err, checker.IsNil)
try.BodyContainsOr(s.getComposeServiceIP("whoami1"), s.getComposeServiceIP("whoami2")))
require.NoError(s.T(), err)
err = s.deregisterService("whoami", false)
c.Assert(err, checker.IsNil)
require.NoError(s.T(), err)
err = s.deregisterService("whoami", true)
c.Assert(err, checker.IsNil)
require.NoError(s.T(), err)
}
func (s *ConsulCatalogSuite) TestConsulServiceWithOneMissingLabels(c *check.C) {
func (s *ConsulCatalogSuite) TestConsulServiceWithOneMissingLabels() {
tempObjects := struct {
ConsulAddress string
DefaultRule string
@ -575,8 +541,7 @@ func (s *ConsulCatalogSuite) TestConsulServiceWithOneMissingLabels(c *check.C) {
DefaultRule: "Host(`{{ normalize .Name }}.consul.localhost`)",
}
file := s.adaptFile(c, "fixtures/consul_catalog/simple.toml", tempObjects)
defer os.Remove(file)
file := s.adaptFile("fixtures/consul_catalog/simple.toml", tempObjects)
// Start a container with some tags
reg := &api.AgentServiceRegistration{
@ -586,32 +551,28 @@ func (s *ConsulCatalogSuite) TestConsulServiceWithOneMissingLabels(c *check.C) {
"traefik.random.value=my.super.host",
},
Port: 80,
Address: s.getComposeServiceIP(c, "whoami1"),
Address: s.getComposeServiceIP("whoami1"),
}
err := s.registerService(reg, false)
c.Assert(err, checker.IsNil)
require.NoError(s.T(), err)
// Start traefik
cmd, display := s.traefikCmd(withConfigFile(file))
defer display(c)
err = cmd.Start()
c.Assert(err, checker.IsNil)
defer s.killCmd(cmd)
s.traefikCmd(withConfigFile(file))
req, err := http.NewRequest(http.MethodGet, "http://127.0.0.1:8000/version", nil)
c.Assert(err, checker.IsNil)
require.NoError(s.T(), err)
req.Host = "my.super.host"
// TODO Need to wait than 500 milliseconds more (for swarm or traefik to boot up ?)
// TODO validate : run on 80
// Expected a 404 as we did not configure anything
err = try.Request(req, 1500*time.Millisecond, try.StatusCodeIs(http.StatusNotFound))
c.Assert(err, checker.IsNil)
require.NoError(s.T(), err)
}
func (s *ConsulCatalogSuite) TestConsulServiceWithHealthCheck(c *check.C) {
whoamiIP := s.getComposeServiceIP(c, "whoami1")
func (s *ConsulCatalogSuite) TestConsulServiceWithHealthCheck() {
whoamiIP := s.getComposeServiceIP("whoami1")
tags := []string{
"traefik.enable=true",
"traefik.http.routers.router1.rule=Path(`/whoami`)",
@ -635,7 +596,7 @@ func (s *ConsulCatalogSuite) TestConsulServiceWithHealthCheck(c *check.C) {
}
err := s.registerService(reg1, false)
c.Assert(err, checker.IsNil)
require.NoError(s.T(), err)
tempObjects := struct {
ConsulAddress string
@ -643,22 +604,17 @@ func (s *ConsulCatalogSuite) TestConsulServiceWithHealthCheck(c *check.C) {
ConsulAddress: s.consulURL,
}
file := s.adaptFile(c, "fixtures/consul_catalog/simple.toml", tempObjects)
defer os.Remove(file)
file := s.adaptFile("fixtures/consul_catalog/simple.toml", tempObjects)
cmd, display := s.traefikCmd(withConfigFile(file))
defer display(c)
err = cmd.Start()
c.Assert(err, checker.IsNil)
defer s.killCmd(cmd)
s.traefikCmd(withConfigFile(file))
err = try.GetRequest("http://127.0.0.1:8000/whoami", 2*time.Second, try.StatusCodeIs(http.StatusNotFound))
c.Assert(err, checker.IsNil)
require.NoError(s.T(), err)
err = s.deregisterService("whoami1", false)
c.Assert(err, checker.IsNil)
require.NoError(s.T(), err)
whoami2IP := s.getComposeServiceIP(c, "whoami2")
whoami2IP := s.getComposeServiceIP("whoami2")
reg2 := &api.AgentServiceRegistration{
ID: "whoami2",
Name: "whoami",
@ -675,26 +631,26 @@ func (s *ConsulCatalogSuite) TestConsulServiceWithHealthCheck(c *check.C) {
}
err = s.registerService(reg2, false)
c.Assert(err, checker.IsNil)
require.NoError(s.T(), err)
req, err := http.NewRequest(http.MethodGet, "http://127.0.0.1:8000/whoami", nil)
c.Assert(err, checker.IsNil)
require.NoError(s.T(), err)
req.Host = "whoami"
// TODO Need to wait for up to 10 seconds (for consul discovery or traefik to boot up ?)
err = try.Request(req, 10*time.Second, try.StatusCodeIs(200), try.BodyContainsOr("Hostname: whoami2"))
c.Assert(err, checker.IsNil)
require.NoError(s.T(), err)
err = s.deregisterService("whoami2", false)
c.Assert(err, checker.IsNil)
require.NoError(s.T(), err)
}
func (s *ConsulCatalogSuite) TestConsulConnect(c *check.C) {
func (s *ConsulCatalogSuite) TestConsulConnect() {
// Wait for consul to fully initialize connect CA
err := s.waitForConnectCA()
c.Assert(err, checker.IsNil)
require.NoError(s.T(), err)
connectIP := s.getComposeServiceIP(c, "connect")
connectIP := s.getComposeServiceIP("connect")
reg := &api.AgentServiceRegistration{
ID: "uuid-api1",
Name: "uuid-api",
@ -712,9 +668,9 @@ func (s *ConsulCatalogSuite) TestConsulConnect(c *check.C) {
Address: connectIP,
}
err = s.registerService(reg, false)
c.Assert(err, checker.IsNil)
require.NoError(s.T(), err)
whoamiIP := s.getComposeServiceIP(c, "whoami1")
whoamiIP := s.getComposeServiceIP("whoami1")
regWhoami := &api.AgentServiceRegistration{
ID: "whoami1",
Name: "whoami",
@ -727,40 +683,35 @@ func (s *ConsulCatalogSuite) TestConsulConnect(c *check.C) {
Address: whoamiIP,
}
err = s.registerService(regWhoami, false)
c.Assert(err, checker.IsNil)
require.NoError(s.T(), err)
tempObjects := struct {
ConsulAddress string
}{
ConsulAddress: s.consulURL,
}
file := s.adaptFile(c, "fixtures/consul_catalog/connect.toml", tempObjects)
defer os.Remove(file)
file := s.adaptFile("fixtures/consul_catalog/connect.toml", tempObjects)
cmd, display := s.traefikCmd(withConfigFile(file))
defer display(c)
err = cmd.Start()
c.Assert(err, checker.IsNil)
defer s.killCmd(cmd)
s.traefikCmd(withConfigFile(file))
err = try.GetRequest("http://127.0.0.1:8000/", 10*time.Second, try.StatusCodeIs(http.StatusOK))
c.Assert(err, checker.IsNil)
require.NoError(s.T(), err)
err = try.GetRequest("http://127.0.0.1:8000/whoami", 10*time.Second, try.StatusCodeIs(http.StatusOK))
c.Assert(err, checker.IsNil)
require.NoError(s.T(), err)
err = s.deregisterService("uuid-api1", false)
c.Assert(err, checker.IsNil)
require.NoError(s.T(), err)
err = s.deregisterService("whoami1", false)
c.Assert(err, checker.IsNil)
require.NoError(s.T(), err)
}
func (s *ConsulCatalogSuite) TestConsulConnect_ByDefault(c *check.C) {
func (s *ConsulCatalogSuite) TestConsulConnect_ByDefault() {
// Wait for consul to fully initialize connect CA
err := s.waitForConnectCA()
c.Assert(err, checker.IsNil)
require.NoError(s.T(), err)
connectIP := s.getComposeServiceIP(c, "connect")
connectIP := s.getComposeServiceIP("connect")
reg := &api.AgentServiceRegistration{
ID: "uuid-api1",
Name: "uuid-api",
@ -777,9 +728,9 @@ func (s *ConsulCatalogSuite) TestConsulConnect_ByDefault(c *check.C) {
Address: connectIP,
}
err = s.registerService(reg, false)
c.Assert(err, checker.IsNil)
require.NoError(s.T(), err)
whoamiIP := s.getComposeServiceIP(c, "whoami1")
whoamiIP := s.getComposeServiceIP("whoami1")
regWhoami := &api.AgentServiceRegistration{
ID: "whoami1",
Name: "whoami1",
@ -792,9 +743,9 @@ func (s *ConsulCatalogSuite) TestConsulConnect_ByDefault(c *check.C) {
Address: whoamiIP,
}
err = s.registerService(regWhoami, false)
c.Assert(err, checker.IsNil)
require.NoError(s.T(), err)
whoami2IP := s.getComposeServiceIP(c, "whoami2")
whoami2IP := s.getComposeServiceIP("whoami2")
regWhoami2 := &api.AgentServiceRegistration{
ID: "whoami2",
Name: "whoami2",
@ -808,45 +759,40 @@ func (s *ConsulCatalogSuite) TestConsulConnect_ByDefault(c *check.C) {
Address: whoami2IP,
}
err = s.registerService(regWhoami2, false)
c.Assert(err, checker.IsNil)
require.NoError(s.T(), err)
tempObjects := struct {
ConsulAddress string
}{
ConsulAddress: s.consulURL,
}
file := s.adaptFile(c, "fixtures/consul_catalog/connect_by_default.toml", tempObjects)
defer os.Remove(file)
file := s.adaptFile("fixtures/consul_catalog/connect_by_default.toml", tempObjects)
cmd, display := s.traefikCmd(withConfigFile(file))
defer display(c)
err = cmd.Start()
c.Assert(err, checker.IsNil)
defer s.killCmd(cmd)
s.traefikCmd(withConfigFile(file))
err = try.GetRequest("http://127.0.0.1:8000/", 10*time.Second, try.StatusCodeIs(http.StatusOK))
c.Assert(err, checker.IsNil)
require.NoError(s.T(), err)
err = try.GetRequest("http://127.0.0.1:8000/whoami", 10*time.Second, try.StatusCodeIs(http.StatusNotFound))
c.Assert(err, checker.IsNil)
require.NoError(s.T(), err)
err = try.GetRequest("http://127.0.0.1:8000/whoami2", 10*time.Second, try.StatusCodeIs(http.StatusOK))
c.Assert(err, checker.IsNil)
require.NoError(s.T(), err)
err = s.deregisterService("uuid-api1", false)
c.Assert(err, checker.IsNil)
require.NoError(s.T(), err)
err = s.deregisterService("whoami1", false)
c.Assert(err, checker.IsNil)
require.NoError(s.T(), err)
err = s.deregisterService("whoami2", false)
c.Assert(err, checker.IsNil)
require.NoError(s.T(), err)
}
func (s *ConsulCatalogSuite) TestConsulConnect_NotAware(c *check.C) {
func (s *ConsulCatalogSuite) TestConsulConnect_NotAware() {
// Wait for consul to fully initialize connect CA
err := s.waitForConnectCA()
c.Assert(err, checker.IsNil)
require.NoError(s.T(), err)
connectIP := s.getComposeServiceIP(c, "connect")
connectIP := s.getComposeServiceIP("connect")
reg := &api.AgentServiceRegistration{
ID: "uuid-api1",
Name: "uuid-api",
@ -864,9 +810,9 @@ func (s *ConsulCatalogSuite) TestConsulConnect_NotAware(c *check.C) {
Address: connectIP,
}
err = s.registerService(reg, false)
c.Assert(err, checker.IsNil)
require.NoError(s.T(), err)
whoamiIP := s.getComposeServiceIP(c, "whoami1")
whoamiIP := s.getComposeServiceIP("whoami1")
regWhoami := &api.AgentServiceRegistration{
ID: "whoami1",
Name: "whoami",
@ -879,30 +825,25 @@ func (s *ConsulCatalogSuite) TestConsulConnect_NotAware(c *check.C) {
Address: whoamiIP,
}
err = s.registerService(regWhoami, false)
c.Assert(err, checker.IsNil)
require.NoError(s.T(), err)
tempObjects := struct {
ConsulAddress string
}{
ConsulAddress: s.consulURL,
}
file := s.adaptFile(c, "fixtures/consul_catalog/connect_not_aware.toml", tempObjects)
defer os.Remove(file)
file := s.adaptFile("fixtures/consul_catalog/connect_not_aware.toml", tempObjects)
cmd, display := s.traefikCmd(withConfigFile(file))
defer display(c)
err = cmd.Start()
c.Assert(err, checker.IsNil)
defer s.killCmd(cmd)
s.traefikCmd(withConfigFile(file))
err = try.GetRequest("http://127.0.0.1:8000/", 10*time.Second, try.StatusCodeIs(http.StatusNotFound))
c.Assert(err, checker.IsNil)
require.NoError(s.T(), err)
err = try.GetRequest("http://127.0.0.1:8000/whoami", 10*time.Second, try.StatusCodeIs(http.StatusOK))
c.Assert(err, checker.IsNil)
require.NoError(s.T(), err)
err = s.deregisterService("uuid-api1", false)
c.Assert(err, checker.IsNil)
require.NoError(s.T(), err)
err = s.deregisterService("whoami1", false)
c.Assert(err, checker.IsNil)
require.NoError(s.T(), err)
}

View file

@ -10,16 +10,17 @@ import (
"net/http"
"os"
"path/filepath"
"testing"
"time"
"github.com/go-check/check"
"github.com/kvtools/consul"
"github.com/kvtools/valkeyrie"
"github.com/kvtools/valkeyrie/store"
"github.com/pmezard/go-difflib/difflib"
"github.com/stretchr/testify/require"
"github.com/stretchr/testify/suite"
"github.com/traefik/traefik/v3/integration/try"
"github.com/traefik/traefik/v3/pkg/api"
checker "github.com/vdemeester/shakers"
)
// Consul test suites.
@ -29,18 +30,16 @@ type ConsulSuite struct {
consulURL string
}
func (s *ConsulSuite) resetStore(c *check.C) {
err := s.kvClient.DeleteTree(context.Background(), "traefik")
if err != nil && !errors.Is(err, store.ErrKeyNotFound) {
c.Fatal(err)
}
func TestConsulSuite(t *testing.T) {
suite.Run(t, new(ConsulSuite))
}
func (s *ConsulSuite) setupStore(c *check.C) {
s.createComposeProject(c, "consul")
s.composeUp(c)
func (s *ConsulSuite) SetupSuite() {
s.BaseSuite.SetupSuite()
s.createComposeProject("consul")
s.composeUp()
consulAddr := net.JoinHostPort(s.getComposeServiceIP(c, "consul"), "8500")
consulAddr := net.JoinHostPort(s.getComposeServiceIP("consul"), "8500")
s.consulURL = fmt.Sprintf("http://%s", consulAddr)
kv, err := valkeyrie.NewStore(
@ -51,21 +50,27 @@ func (s *ConsulSuite) setupStore(c *check.C) {
ConnectionTimeout: 10 * time.Second,
},
)
if err != nil {
c.Fatal("Cannot create store consul")
}
require.NoError(s.T(), err, "Cannot create store consul")
s.kvClient = kv
// wait for consul
err = try.Do(60*time.Second, try.KVExists(kv, "test"))
c.Assert(err, checker.IsNil)
require.NoError(s.T(), err)
}
func (s *ConsulSuite) TestSimpleConfiguration(c *check.C) {
s.setupStore(c)
func (s *ConsulSuite) TearDownSuite() {
s.BaseSuite.TearDownSuite()
}
file := s.adaptFile(c, "fixtures/consul/simple.toml", struct{ ConsulAddress string }{s.consulURL})
defer os.Remove(file)
func (s *ConsulSuite) TearDownTest() {
err := s.kvClient.DeleteTree(context.Background(), "traefik")
if err != nil && !errors.Is(err, store.ErrKeyNotFound) {
require.ErrorIs(s.T(), err, store.ErrKeyNotFound)
}
}
func (s *ConsulSuite) TestSimpleConfiguration() {
file := s.adaptFile("fixtures/consul/simple.toml", struct{ ConsulAddress string }{s.consulURL})
data := map[string]string{
"traefik/http/routers/Router0/entryPoints/0": "web",
@ -114,39 +119,35 @@ func (s *ConsulSuite) TestSimpleConfiguration(c *check.C) {
for k, v := range data {
err := s.kvClient.Put(context.Background(), k, []byte(v), nil)
c.Assert(err, checker.IsNil)
require.NoError(s.T(), err)
}
cmd, display := s.traefikCmd(withConfigFile(file))
defer display(c)
err := cmd.Start()
c.Assert(err, checker.IsNil)
defer s.killCmd(cmd)
s.traefikCmd(withConfigFile(file))
// wait for traefik
err = try.GetRequest("http://127.0.0.1:8080/api/rawdata", 2*time.Second,
err := try.GetRequest("http://127.0.0.1:8080/api/rawdata", 2*time.Second,
try.BodyContains(`"striper@consul":`, `"compressor@consul":`, `"srvcA@consul":`, `"srvcB@consul":`),
)
c.Assert(err, checker.IsNil)
require.NoError(s.T(), err)
resp, err := http.Get("http://127.0.0.1:8080/api/rawdata")
c.Assert(err, checker.IsNil)
require.NoError(s.T(), err)
var obtained api.RunTimeRepresentation
err = json.NewDecoder(resp.Body).Decode(&obtained)
c.Assert(err, checker.IsNil)
require.NoError(s.T(), err)
got, err := json.MarshalIndent(obtained, "", " ")
c.Assert(err, checker.IsNil)
require.NoError(s.T(), err)
expectedJSON := filepath.FromSlash("testdata/rawdata-consul.json")
if *updateExpected {
err = os.WriteFile(expectedJSON, got, 0o666)
c.Assert(err, checker.IsNil)
require.NoError(s.T(), err)
}
expected, err := os.ReadFile(expectedJSON)
c.Assert(err, checker.IsNil)
require.NoError(s.T(), err)
if !bytes.Equal(expected, got) {
diff := difflib.UnifiedDiff{
@ -158,33 +159,27 @@ func (s *ConsulSuite) TestSimpleConfiguration(c *check.C) {
}
text, err := difflib.GetUnifiedDiffString(diff)
c.Assert(err, checker.IsNil)
c.Error(text)
require.NoError(s.T(), err, text)
}
}
func (s *ConsulSuite) assertWhoami(c *check.C, host string, expectedStatusCode int) {
func (s *ConsulSuite) assertWhoami(host string, expectedStatusCode int) {
req, err := http.NewRequest(http.MethodGet, "http://127.0.0.1:8000", nil)
if err != nil {
c.Fatal(err)
}
require.NoError(s.T(), err)
req.Host = host
resp, err := try.ResponseUntilStatusCode(req, 15*time.Second, expectedStatusCode)
require.NoError(s.T(), err)
resp.Body.Close()
c.Assert(err, checker.IsNil)
}
func (s *ConsulSuite) TestDeleteRootKey(c *check.C) {
func (s *ConsulSuite) TestDeleteRootKey() {
// This test case reproduce the issue: https://github.com/traefik/traefik/issues/8092
s.setupStore(c)
s.resetStore(c)
file := s.adaptFile(c, "fixtures/consul/simple.toml", struct{ ConsulAddress string }{s.consulURL})
defer os.Remove(file)
file := s.adaptFile("fixtures/consul/simple.toml", struct{ ConsulAddress string }{s.consulURL})
ctx := context.Background()
svcaddr := net.JoinHostPort(s.getComposeServiceIP(c, "whoami"), "80")
svcaddr := net.JoinHostPort(s.getComposeServiceIP("whoami"), "80")
data := map[string]string{
"traefik/http/routers/Router0/entryPoints/0": "web",
@ -201,32 +196,28 @@ func (s *ConsulSuite) TestDeleteRootKey(c *check.C) {
for k, v := range data {
err := s.kvClient.Put(ctx, k, []byte(v), nil)
c.Assert(err, checker.IsNil)
require.NoError(s.T(), err)
}
cmd, display := s.traefikCmd(withConfigFile(file))
defer display(c)
err := cmd.Start()
c.Assert(err, checker.IsNil)
defer s.killCmd(cmd)
s.traefikCmd(withConfigFile(file))
// wait for traefik
err = try.GetRequest("http://127.0.0.1:8080/api/rawdata", 2*time.Second,
err := try.GetRequest("http://127.0.0.1:8080/api/rawdata", 2*time.Second,
try.BodyContains(`"Router0@consul":`, `"Router1@consul":`, `"simplesvc0@consul":`, `"simplesvc1@consul":`),
)
c.Assert(err, checker.IsNil)
s.assertWhoami(c, "kv1.localhost", http.StatusOK)
s.assertWhoami(c, "kv2.localhost", http.StatusOK)
require.NoError(s.T(), err)
s.assertWhoami("kv1.localhost", http.StatusOK)
s.assertWhoami("kv2.localhost", http.StatusOK)
// delete router1
err = s.kvClient.DeleteTree(ctx, "traefik/http/routers/Router1")
c.Assert(err, checker.IsNil)
s.assertWhoami(c, "kv1.localhost", http.StatusOK)
s.assertWhoami(c, "kv2.localhost", http.StatusNotFound)
require.NoError(s.T(), err)
s.assertWhoami("kv1.localhost", http.StatusOK)
s.assertWhoami("kv2.localhost", http.StatusNotFound)
// delete simple services and router0
err = s.kvClient.DeleteTree(ctx, "traefik")
c.Assert(err, checker.IsNil)
s.assertWhoami(c, "kv1.localhost", http.StatusNotFound)
s.assertWhoami(c, "kv2.localhost", http.StatusNotFound)
require.NoError(s.T(), err)
s.assertWhoami("kv1.localhost", http.StatusNotFound)
s.assertWhoami("kv2.localhost", http.StatusNotFound)
}

View file

@ -3,15 +3,16 @@ package integration
import (
"encoding/json"
"net/http"
"os"
"strings"
"testing"
"time"
"github.com/go-check/check"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
"github.com/stretchr/testify/suite"
"github.com/traefik/traefik/v3/integration/try"
"github.com/traefik/traefik/v3/pkg/api"
"github.com/traefik/traefik/v3/pkg/testhelpers"
checker "github.com/vdemeester/shakers"
)
// Docker tests suite.
@ -19,12 +20,21 @@ type DockerComposeSuite struct {
BaseSuite
}
func (s *DockerComposeSuite) SetUpSuite(c *check.C) {
s.createComposeProject(c, "minimal")
s.composeUp(c)
func TestDockerComposeSuite(t *testing.T) {
suite.Run(t, new(DockerComposeSuite))
}
func (s *DockerComposeSuite) TestComposeScale(c *check.C) {
func (s *DockerComposeSuite) SetupSuite() {
s.BaseSuite.SetupSuite()
s.createComposeProject("minimal")
s.composeUp()
}
func (s *DockerComposeSuite) TearDownSuite() {
s.BaseSuite.TearDownSuite()
}
func (s *DockerComposeSuite) TestComposeScale() {
tempObjects := struct {
DockerHost string
DefaultRule string
@ -32,41 +42,36 @@ func (s *DockerComposeSuite) TestComposeScale(c *check.C) {
DockerHost: s.getDockerHost(),
DefaultRule: "Host(`{{ normalize .Name }}.docker.localhost`)",
}
file := s.adaptFile(c, "fixtures/docker/minimal.toml", tempObjects)
defer os.Remove(file)
file := s.adaptFile("fixtures/docker/minimal.toml", tempObjects)
cmd, display := s.traefikCmd(withConfigFile(file))
defer display(c)
err := cmd.Start()
c.Assert(err, checker.IsNil)
defer s.killCmd(cmd)
s.traefikCmd(withConfigFile(file))
req := testhelpers.MustNewRequest(http.MethodGet, "http://127.0.0.1:8000/whoami", nil)
req.Host = "my.super.host"
_, err = try.ResponseUntilStatusCode(req, 1500*time.Millisecond, http.StatusOK)
c.Assert(err, checker.IsNil)
_, err := try.ResponseUntilStatusCode(req, 5*time.Second, http.StatusOK)
require.NoError(s.T(), err)
resp, err := http.Get("http://127.0.0.1:8080/api/rawdata")
c.Assert(err, checker.IsNil)
require.NoError(s.T(), err)
defer resp.Body.Close()
var rtconf api.RunTimeRepresentation
err = json.NewDecoder(resp.Body).Decode(&rtconf)
c.Assert(err, checker.IsNil)
require.NoError(s.T(), err)
// check that we have only three routers (the one from this test + 2 unrelated internal ones)
c.Assert(rtconf.Routers, checker.HasLen, 3)
assert.Len(s.T(), rtconf.Routers, 3)
// check that we have only one service (not counting the internal ones) with n servers
services := rtconf.Services
c.Assert(services, checker.HasLen, 4)
assert.Len(s.T(), services, 4)
for name, service := range services {
if strings.HasSuffix(name, "@internal") {
continue
}
c.Assert(name, checker.Equals, "whoami1-"+s.composeProject.Name+"@docker")
c.Assert(service.LoadBalancer.Servers, checker.HasLen, 2)
assert.Equal(s.T(), "service-mini@docker", name)
assert.Len(s.T(), service.LoadBalancer.Servers, 2)
// We could break here, but we don't just to keep us honest.
}
}

View file

@ -2,15 +2,15 @@ package integration
import (
"encoding/json"
"fmt"
"io"
"net/http"
"os"
"testing"
"time"
"github.com/go-check/check"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
"github.com/stretchr/testify/suite"
"github.com/traefik/traefik/v3/integration/try"
checker "github.com/vdemeester/shakers"
)
// Docker tests suite.
@ -18,15 +18,24 @@ type DockerSuite struct {
BaseSuite
}
func (s *DockerSuite) SetUpTest(c *check.C) {
s.createComposeProject(c, "docker")
func TestDockerSuite(t *testing.T) {
suite.Run(t, new(DockerSuite))
}
func (s *DockerSuite) TearDownTest(c *check.C) {
s.composeDown(c)
func (s *DockerSuite) SetupSuite() {
s.BaseSuite.SetupSuite()
s.createComposeProject("docker")
}
func (s *DockerSuite) TestSimpleConfiguration(c *check.C) {
func (s *DockerSuite) TearDownSuite() {
s.BaseSuite.TearDownSuite()
}
func (s *DockerSuite) TearDownTest() {
s.composeStop("simple", "withtcplabels", "withlabels1", "withlabels2", "withonelabelmissing", "powpow")
}
func (s *DockerSuite) TestSimpleConfiguration() {
tempObjects := struct {
DockerHost string
DefaultRule string
@ -35,24 +44,18 @@ func (s *DockerSuite) TestSimpleConfiguration(c *check.C) {
DefaultRule: "Host(`{{ normalize .Name }}.docker.localhost`)",
}
file := s.adaptFile(c, "fixtures/docker/simple.toml", tempObjects)
defer os.Remove(file)
file := s.adaptFile("fixtures/docker/simple.toml", tempObjects)
s.composeUp(c)
s.composeUp()
cmd, display := s.traefikCmd(withConfigFile(file))
defer display(c)
err := cmd.Start()
c.Assert(err, checker.IsNil)
defer s.killCmd(cmd)
s.traefikCmd(withConfigFile(file))
// Expected a 404 as we did not configure anything
err = try.GetRequest("http://127.0.0.1:8000/", 500*time.Millisecond, try.StatusCodeIs(http.StatusNotFound))
c.Assert(err, checker.IsNil)
err := try.GetRequest("http://127.0.0.1:8000/", 500*time.Millisecond, try.StatusCodeIs(http.StatusNotFound))
require.NoError(s.T(), err)
}
func (s *DockerSuite) TestDefaultDockerContainers(c *check.C) {
func (s *DockerSuite) TestDefaultDockerContainers() {
tempObjects := struct {
DockerHost string
DefaultRule string
@ -61,37 +64,30 @@ func (s *DockerSuite) TestDefaultDockerContainers(c *check.C) {
DefaultRule: "Host(`{{ normalize .Name }}.docker.localhost`)",
}
file := s.adaptFile(c, "fixtures/docker/simple.toml", tempObjects)
defer os.Remove(file)
file := s.adaptFile("fixtures/docker/simple.toml", tempObjects)
s.composeUp(c, "simple")
s.composeUp("simple")
// Start traefik
cmd, display := s.traefikCmd(withConfigFile(file))
defer display(c)
err := cmd.Start()
c.Assert(err, checker.IsNil)
defer s.killCmd(cmd)
s.traefikCmd(withConfigFile(file))
req, err := http.NewRequest(http.MethodGet, "http://127.0.0.1:8000/version", nil)
c.Assert(err, checker.IsNil)
req.Host = fmt.Sprintf("simple-%s.docker.localhost", s.composeProject.Name)
require.NoError(s.T(), err)
req.Host = "simple.docker.localhost"
// TODO Need to wait than 500 milliseconds more (for swarm or traefik to boot up ?)
resp, err := try.ResponseUntilStatusCode(req, 1500*time.Millisecond, http.StatusOK)
c.Assert(err, checker.IsNil)
resp, err := try.ResponseUntilStatusCode(req, 3*time.Second, http.StatusOK)
require.NoError(s.T(), err)
body, err := io.ReadAll(resp.Body)
c.Assert(err, checker.IsNil)
require.NoError(s.T(), err)
var version map[string]interface{}
c.Assert(json.Unmarshal(body, &version), checker.IsNil)
c.Assert(version["Version"], checker.Equals, "swarm/1.0.0")
assert.NoError(s.T(), json.Unmarshal(body, &version))
assert.Equal(s.T(), "swarm/1.0.0", version["Version"])
}
func (s *DockerSuite) TestDockerContainersWithTCPLabels(c *check.C) {
func (s *DockerSuite) TestDockerContainersWithTCPLabels() {
tempObjects := struct {
DockerHost string
DefaultRule string
@ -100,29 +96,23 @@ func (s *DockerSuite) TestDockerContainersWithTCPLabels(c *check.C) {
DefaultRule: "Host(`{{ normalize .Name }}.docker.localhost`)",
}
file := s.adaptFile(c, "fixtures/docker/simple.toml", tempObjects)
defer os.Remove(file)
file := s.adaptFile("fixtures/docker/simple.toml", tempObjects)
s.composeUp(c, "withtcplabels")
s.composeUp("withtcplabels")
// Start traefik
cmd, display := s.traefikCmd(withConfigFile(file))
defer display(c)
s.traefikCmd(withConfigFile(file))
err := cmd.Start()
c.Assert(err, checker.IsNil)
defer s.killCmd(cmd)
err = try.GetRequest("http://127.0.0.1:8080/api/rawdata", 500*time.Millisecond, try.StatusCodeIs(http.StatusOK), try.BodyContains("HostSNI(`my.super.host`)"))
c.Assert(err, checker.IsNil)
err := try.GetRequest("http://127.0.0.1:8080/api/rawdata", 500*time.Millisecond, try.StatusCodeIs(http.StatusOK), try.BodyContains("HostSNI(`my.super.host`)"))
require.NoError(s.T(), err)
who, err := guessWho("127.0.0.1:8000", "my.super.host", true)
c.Assert(err, checker.IsNil)
require.NoError(s.T(), err)
c.Assert(who, checker.Contains, "my.super.host")
assert.Contains(s.T(), who, "my.super.host")
}
func (s *DockerSuite) TestDockerContainersWithLabels(c *check.C) {
func (s *DockerSuite) TestDockerContainersWithLabels() {
tempObjects := struct {
DockerHost string
DefaultRule string
@ -131,44 +121,37 @@ func (s *DockerSuite) TestDockerContainersWithLabels(c *check.C) {
DefaultRule: "Host(`{{ normalize .Name }}.docker.localhost`)",
}
file := s.adaptFile(c, "fixtures/docker/simple.toml", tempObjects)
defer os.Remove(file)
file := s.adaptFile("fixtures/docker/simple.toml", tempObjects)
s.composeUp(c, "withlabels1", "withlabels2")
s.composeUp("withlabels1", "withlabels2")
// Start traefik
cmd, display := s.traefikCmd(withConfigFile(file))
defer display(c)
err := cmd.Start()
c.Assert(err, checker.IsNil)
defer s.killCmd(cmd)
s.traefikCmd(withConfigFile(file))
req, err := http.NewRequest(http.MethodGet, "http://127.0.0.1:8000/version", nil)
c.Assert(err, checker.IsNil)
require.NoError(s.T(), err)
req.Host = "my-super.host"
// TODO Need to wait than 500 milliseconds more (for swarm or traefik to boot up ?)
_, err = try.ResponseUntilStatusCode(req, 1500*time.Millisecond, http.StatusOK)
c.Assert(err, checker.IsNil)
_, err = try.ResponseUntilStatusCode(req, 3*time.Second, http.StatusOK)
require.NoError(s.T(), err)
req, err = http.NewRequest(http.MethodGet, "http://127.0.0.1:8000/version", nil)
c.Assert(err, checker.IsNil)
require.NoError(s.T(), err)
req.Host = "my.super.host"
// TODO Need to wait than 500 milliseconds more (for swarm or traefik to boot up ?)
resp, err := try.ResponseUntilStatusCode(req, 1500*time.Millisecond, http.StatusOK)
c.Assert(err, checker.IsNil)
resp, err := try.ResponseUntilStatusCode(req, 3*time.Second, http.StatusOK)
require.NoError(s.T(), err)
body, err := io.ReadAll(resp.Body)
c.Assert(err, checker.IsNil)
require.NoError(s.T(), err)
var version map[string]interface{}
c.Assert(json.Unmarshal(body, &version), checker.IsNil)
c.Assert(version["Version"], checker.Equals, "swarm/1.0.0")
assert.NoError(s.T(), json.Unmarshal(body, &version))
assert.Equal(s.T(), "swarm/1.0.0", version["Version"])
}
func (s *DockerSuite) TestDockerContainersWithOneMissingLabels(c *check.C) {
func (s *DockerSuite) TestDockerContainersWithOneMissingLabels() {
tempObjects := struct {
DockerHost string
DefaultRule string
@ -177,31 +160,23 @@ func (s *DockerSuite) TestDockerContainersWithOneMissingLabels(c *check.C) {
DefaultRule: "Host(`{{ normalize .Name }}.docker.localhost`)",
}
file := s.adaptFile(c, "fixtures/docker/simple.toml", tempObjects)
defer os.Remove(file)
file := s.adaptFile("fixtures/docker/simple.toml", tempObjects)
s.composeUp(c, "withonelabelmissing")
s.composeUp("withonelabelmissing")
// Start traefik
cmd, display := s.traefikCmd(withConfigFile(file))
defer display(c)
err := cmd.Start()
c.Assert(err, checker.IsNil)
defer s.killCmd(cmd)
s.traefikCmd(withConfigFile(file))
req, err := http.NewRequest(http.MethodGet, "http://127.0.0.1:8000/version", nil)
c.Assert(err, checker.IsNil)
require.NoError(s.T(), err)
req.Host = "my.super.host"
// TODO Need to wait than 500 milliseconds more (for swarm or traefik to boot up ?)
// TODO validate : run on 80
// Expected a 404 as we did not configure anything
err = try.Request(req, 1500*time.Millisecond, try.StatusCodeIs(http.StatusNotFound))
c.Assert(err, checker.IsNil)
err = try.Request(req, 3*time.Second, try.StatusCodeIs(http.StatusNotFound))
require.NoError(s.T(), err)
}
func (s *DockerSuite) TestRestartDockerContainers(c *check.C) {
func (s *DockerSuite) TestRestartDockerContainers() {
tempObjects := struct {
DockerHost string
DefaultRule string
@ -210,46 +185,40 @@ func (s *DockerSuite) TestRestartDockerContainers(c *check.C) {
DefaultRule: "Host(`{{ normalize .Name }}.docker.localhost`)",
}
file := s.adaptFile(c, "fixtures/docker/simple.toml", tempObjects)
defer os.Remove(file)
file := s.adaptFile("fixtures/docker/simple.toml", tempObjects)
s.composeUp(c, "powpow")
s.composeUp("powpow")
// Start traefik
cmd, display := s.traefikCmd(withConfigFile(file))
defer display(c)
err := cmd.Start()
c.Assert(err, checker.IsNil)
defer s.killCmd(cmd)
s.traefikCmd(withConfigFile(file))
req, err := http.NewRequest(http.MethodGet, "http://127.0.0.1:8000/version", nil)
c.Assert(err, checker.IsNil)
require.NoError(s.T(), err)
req.Host = "my.super.host"
// TODO Need to wait than 500 milliseconds more (for swarm or traefik to boot up ?)
resp, err := try.ResponseUntilStatusCode(req, 1500*time.Millisecond, http.StatusOK)
c.Assert(err, checker.IsNil)
require.NoError(s.T(), err)
body, err := io.ReadAll(resp.Body)
c.Assert(err, checker.IsNil)
require.NoError(s.T(), err)
var version map[string]interface{}
c.Assert(json.Unmarshal(body, &version), checker.IsNil)
c.Assert(version["Version"], checker.Equals, "swarm/1.0.0")
assert.NoError(s.T(), json.Unmarshal(body, &version))
assert.Equal(s.T(), "swarm/1.0.0", version["Version"])
err = try.GetRequest("http://127.0.0.1:8080/api/rawdata", 60*time.Second, try.BodyContains("powpow"))
c.Assert(err, checker.IsNil)
require.NoError(s.T(), err)
s.composeStop(c, "powpow")
s.composeStop("powpow")
time.Sleep(5 * time.Second)
err = try.GetRequest("http://127.0.0.1:8080/api/rawdata", 10*time.Second, try.BodyContains("powpow"))
c.Assert(err, checker.NotNil)
assert.Error(s.T(), err)
s.composeUp(c, "powpow")
s.composeUp("powpow")
err = try.GetRequest("http://127.0.0.1:8080/api/rawdata", 60*time.Second, try.BodyContains("powpow"))
c.Assert(err, checker.IsNil)
require.NoError(s.T(), err)
}

View file

@ -3,12 +3,12 @@ package integration
import (
"net/http"
"net/http/httptest"
"os"
"testing"
"time"
"github.com/go-check/check"
"github.com/stretchr/testify/require"
"github.com/stretchr/testify/suite"
"github.com/traefik/traefik/v3/integration/try"
checker "github.com/vdemeester/shakers"
)
// ErrorPagesSuite test suites.
@ -18,83 +18,78 @@ type ErrorPagesSuite struct {
BackendIP string
}
func (s *ErrorPagesSuite) SetUpSuite(c *check.C) {
s.createComposeProject(c, "error_pages")
s.composeUp(c)
s.ErrorPageIP = s.getComposeServiceIP(c, "nginx2")
s.BackendIP = s.getComposeServiceIP(c, "nginx1")
func TestErrorPagesSuite(t *testing.T) {
suite.Run(t, new(ErrorPagesSuite))
}
func (s *ErrorPagesSuite) TestSimpleConfiguration(c *check.C) {
file := s.adaptFile(c, "fixtures/error_pages/simple.toml", struct {
func (s *ErrorPagesSuite) SetupSuite() {
s.BaseSuite.SetupSuite()
s.createComposeProject("error_pages")
s.composeUp()
s.ErrorPageIP = s.getComposeServiceIP("nginx2")
s.BackendIP = s.getComposeServiceIP("nginx1")
}
func (s *ErrorPagesSuite) TearDownSuite() {
s.BaseSuite.TearDownSuite()
}
func (s *ErrorPagesSuite) TestSimpleConfiguration() {
file := s.adaptFile("fixtures/error_pages/simple.toml", struct {
Server1 string
Server2 string
}{"http://" + s.BackendIP + ":80", s.ErrorPageIP})
defer os.Remove(file)
cmd, display := s.traefikCmd(withConfigFile(file))
defer display(c)
err := cmd.Start()
c.Assert(err, checker.IsNil)
defer s.killCmd(cmd)
s.traefikCmd(withConfigFile(file))
frontendReq, err := http.NewRequest(http.MethodGet, "http://127.0.0.1:8080", nil)
c.Assert(err, checker.IsNil)
require.NoError(s.T(), err)
frontendReq.Host = "test.local"
err = try.Request(frontendReq, 2*time.Second, try.BodyContains("nginx"))
c.Assert(err, checker.IsNil)
require.NoError(s.T(), err)
}
func (s *ErrorPagesSuite) TestErrorPage(c *check.C) {
func (s *ErrorPagesSuite) TestErrorPage() {
// error.toml contains a mis-configuration of the backend host
file := s.adaptFile(c, "fixtures/error_pages/error.toml", struct {
file := s.adaptFile("fixtures/error_pages/error.toml", struct {
Server1 string
Server2 string
}{s.BackendIP, s.ErrorPageIP})
defer os.Remove(file)
cmd, display := s.traefikCmd(withConfigFile(file))
defer display(c)
err := cmd.Start()
c.Assert(err, checker.IsNil)
defer s.killCmd(cmd)
s.traefikCmd(withConfigFile(file))
frontendReq, err := http.NewRequest(http.MethodGet, "http://127.0.0.1:8080", nil)
c.Assert(err, checker.IsNil)
require.NoError(s.T(), err)
frontendReq.Host = "test.local"
err = try.Request(frontendReq, 2*time.Second, try.BodyContains("An error occurred."))
c.Assert(err, checker.IsNil)
require.NoError(s.T(), err)
}
func (s *ErrorPagesSuite) TestErrorPageFlush(c *check.C) {
func (s *ErrorPagesSuite) TestErrorPageFlush() {
srv := httptest.NewServer(http.HandlerFunc(func(rw http.ResponseWriter, r *http.Request) {
rw.Header().Add("Transfer-Encoding", "chunked")
rw.WriteHeader(http.StatusInternalServerError)
_, _ = rw.Write([]byte("KO"))
}))
file := s.adaptFile(c, "fixtures/error_pages/simple.toml", struct {
file := s.adaptFile("fixtures/error_pages/simple.toml", struct {
Server1 string
Server2 string
}{srv.URL, s.ErrorPageIP})
defer os.Remove(file)
cmd, display := s.traefikCmd(withConfigFile(file))
defer display(c)
err := cmd.Start()
c.Assert(err, checker.IsNil)
defer s.killCmd(cmd)
s.traefikCmd(withConfigFile(file))
frontendReq, err := http.NewRequest(http.MethodGet, "http://127.0.0.1:8080", nil)
c.Assert(err, checker.IsNil)
require.NoError(s.T(), err)
frontendReq.Host = "test.local"
err = try.Request(frontendReq, 2*time.Second,
try.BodyContains("An error occurred."),
try.HasHeaderValue("Content-Type", "text/html", true),
)
c.Assert(err, checker.IsNil)
require.NoError(s.T(), err)
}

View file

@ -8,16 +8,17 @@ import (
"net/http"
"os"
"path/filepath"
"testing"
"time"
"github.com/go-check/check"
"github.com/kvtools/etcdv3"
"github.com/kvtools/valkeyrie"
"github.com/kvtools/valkeyrie/store"
"github.com/pmezard/go-difflib/difflib"
"github.com/stretchr/testify/require"
"github.com/stretchr/testify/suite"
"github.com/traefik/traefik/v3/integration/try"
"github.com/traefik/traefik/v3/pkg/api"
checker "github.com/vdemeester/shakers"
)
// etcd test suites.
@ -27,12 +28,18 @@ type EtcdSuite struct {
etcdAddr string
}
func (s *EtcdSuite) SetUpSuite(c *check.C) {
s.createComposeProject(c, "etcd")
s.composeUp(c)
func TestEtcdSuite(t *testing.T) {
suite.Run(t, new(EtcdSuite))
}
func (s *EtcdSuite) SetupSuite() {
s.BaseSuite.SetupSuite()
s.createComposeProject("etcd")
s.composeUp()
var err error
s.etcdAddr = net.JoinHostPort(s.getComposeServiceIP(c, "etcd"), "2379")
s.etcdAddr = net.JoinHostPort(s.getComposeServiceIP("etcd"), "2379")
s.kvClient, err = valkeyrie.NewStore(
context.Background(),
etcdv3.StoreName,
@ -41,18 +48,19 @@ func (s *EtcdSuite) SetUpSuite(c *check.C) {
ConnectionTimeout: 10 * time.Second,
},
)
if err != nil {
c.Fatal("Cannot create store etcd")
}
require.NoError(s.T(), err)
// wait for etcd
err = try.Do(60*time.Second, try.KVExists(s.kvClient, "test"))
c.Assert(err, checker.IsNil)
require.NoError(s.T(), err)
}
func (s *EtcdSuite) TestSimpleConfiguration(c *check.C) {
file := s.adaptFile(c, "fixtures/etcd/simple.toml", struct{ EtcdAddress string }{s.etcdAddr})
defer os.Remove(file)
func (s *EtcdSuite) TearDownSuite() {
s.BaseSuite.TearDownSuite()
}
func (s *EtcdSuite) TestSimpleConfiguration() {
file := s.adaptFile("fixtures/etcd/simple.toml", struct{ EtcdAddress string }{s.etcdAddr})
data := map[string]string{
"traefik/http/routers/Router0/entryPoints/0": "web",
@ -101,39 +109,35 @@ func (s *EtcdSuite) TestSimpleConfiguration(c *check.C) {
for k, v := range data {
err := s.kvClient.Put(context.Background(), k, []byte(v), nil)
c.Assert(err, checker.IsNil)
require.NoError(s.T(), err)
}
cmd, display := s.traefikCmd(withConfigFile(file))
defer display(c)
err := cmd.Start()
c.Assert(err, checker.IsNil)
defer s.killCmd(cmd)
s.traefikCmd(withConfigFile(file))
// wait for traefik
err = try.GetRequest("http://127.0.0.1:8080/api/rawdata", 2*time.Second,
err := try.GetRequest("http://127.0.0.1:8080/api/rawdata", 2*time.Second,
try.BodyContains(`"striper@etcd":`, `"compressor@etcd":`, `"srvcA@etcd":`, `"srvcB@etcd":`),
)
c.Assert(err, checker.IsNil)
require.NoError(s.T(), err)
resp, err := http.Get("http://127.0.0.1:8080/api/rawdata")
c.Assert(err, checker.IsNil)
require.NoError(s.T(), err)
var obtained api.RunTimeRepresentation
err = json.NewDecoder(resp.Body).Decode(&obtained)
c.Assert(err, checker.IsNil)
require.NoError(s.T(), err)
got, err := json.MarshalIndent(obtained, "", " ")
c.Assert(err, checker.IsNil)
require.NoError(s.T(), err)
expectedJSON := filepath.FromSlash("testdata/rawdata-etcd.json")
if *updateExpected {
err = os.WriteFile(expectedJSON, got, 0o666)
c.Assert(err, checker.IsNil)
require.NoError(s.T(), err)
}
expected, err := os.ReadFile(expectedJSON)
c.Assert(err, checker.IsNil)
require.NoError(s.T(), err)
if !bytes.Equal(expected, got) {
diff := difflib.UnifiedDiff{
@ -145,7 +149,6 @@ func (s *EtcdSuite) TestSimpleConfiguration(c *check.C) {
}
text, err := difflib.GetUnifiedDiffString(diff)
c.Assert(err, checker.IsNil)
c.Error(text)
require.NoError(s.T(), err, text)
}
}

View file

@ -2,61 +2,58 @@ package integration
import (
"net/http"
"os"
"testing"
"time"
"github.com/go-check/check"
"github.com/stretchr/testify/require"
"github.com/stretchr/testify/suite"
"github.com/traefik/traefik/v3/integration/try"
checker "github.com/vdemeester/shakers"
)
// File tests suite.
type FileSuite struct{ BaseSuite }
func (s *FileSuite) SetUpSuite(c *check.C) {
s.createComposeProject(c, "file")
s.composeUp(c)
func TestFileSuite(t *testing.T) {
suite.Run(t, new(FileSuite))
}
func (s *FileSuite) TestSimpleConfiguration(c *check.C) {
file := s.adaptFile(c, "fixtures/file/simple.toml", struct{}{})
defer os.Remove(file)
cmd, display := s.traefikCmd(withConfigFile(file))
defer display(c)
err := cmd.Start()
c.Assert(err, checker.IsNil)
defer s.killCmd(cmd)
func (s *FileSuite) SetupSuite() {
s.BaseSuite.SetupSuite()
s.createComposeProject("file")
s.composeUp()
}
func (s *FileSuite) TearDownSuite() {
s.BaseSuite.TearDownSuite()
}
func (s *FileSuite) TestSimpleConfiguration() {
file := s.adaptFile("fixtures/file/simple.toml", struct{}{})
s.traefikCmd(withConfigFile(file))
// Expected a 404 as we did not configure anything
err = try.GetRequest("http://127.0.0.1:8000/", 1000*time.Millisecond, try.StatusCodeIs(http.StatusNotFound))
c.Assert(err, checker.IsNil)
err := try.GetRequest("http://127.0.0.1:8000/", 1000*time.Millisecond, try.StatusCodeIs(http.StatusNotFound))
require.NoError(s.T(), err)
}
// #56 regression test, make sure it does not fail?
func (s *FileSuite) TestSimpleConfigurationNoPanic(c *check.C) {
cmd, display := s.traefikCmd(withConfigFile("fixtures/file/56-simple-panic.toml"))
defer display(c)
err := cmd.Start()
c.Assert(err, checker.IsNil)
defer s.killCmd(cmd)
func (s *FileSuite) TestSimpleConfigurationNoPanic() {
s.traefikCmd(withConfigFile("fixtures/file/56-simple-panic.toml"))
// Expected a 404 as we did not configure anything
err = try.GetRequest("http://127.0.0.1:8000/", 1000*time.Millisecond, try.StatusCodeIs(http.StatusNotFound))
c.Assert(err, checker.IsNil)
err := try.GetRequest("http://127.0.0.1:8000/", 1000*time.Millisecond, try.StatusCodeIs(http.StatusNotFound))
require.NoError(s.T(), err)
}
func (s *FileSuite) TestDirectoryConfiguration(c *check.C) {
cmd, display := s.traefikCmd(withConfigFile("fixtures/file/directory.toml"))
defer display(c)
err := cmd.Start()
c.Assert(err, checker.IsNil)
defer s.killCmd(cmd)
func (s *FileSuite) TestDirectoryConfiguration() {
s.traefikCmd(withConfigFile("fixtures/file/directory.toml"))
// Expected a 404 as we did not configure anything at /test
err = try.GetRequest("http://127.0.0.1:8000/test", 1000*time.Millisecond, try.StatusCodeIs(http.StatusNotFound))
c.Assert(err, checker.IsNil)
err := try.GetRequest("http://127.0.0.1:8000/test", 1000*time.Millisecond, try.StatusCodeIs(http.StatusNotFound))
require.NoError(s.T(), err)
// Expected a 502 as there is no backend server
err = try.GetRequest("http://127.0.0.1:8000/test2", 1000*time.Millisecond, try.StatusCodeIs(http.StatusBadGateway))
c.Assert(err, checker.IsNil)
require.NoError(s.T(), err)
}

File diff suppressed because it is too large Load diff

View file

@ -1164,6 +1164,36 @@ spec:
description: 'IPAllowList holds the IP allowlist middleware configuration.
This middleware accepts / refuses requests based on the client IP.
More info: https://doc.traefik.io/traefik/v3.0/middlewares/http/ipallowlist/'
properties:
ipStrategy:
description: 'IPStrategy holds the IP strategy configuration used
by Traefik to determine the client IP. More info: https://doc.traefik.io/traefik/v3.0/middlewares/http/ipallowlist/#ipstrategy'
properties:
depth:
description: Depth tells Traefik to use the X-Forwarded-For
header and take the IP located at the depth position (starting
from the right).
type: integer
excludedIPs:
description: ExcludedIPs configures Traefik to scan the X-Forwarded-For
header and select the first IP not in the list.
items:
type: string
type: array
type: object
rejectStatusCode:
description: RejectStatusCode defines the HTTP status code used
for refused requests. If not set, the default is 403 (Forbidden).
type: integer
sourceRange:
description: SourceRange defines the set of allowed IPs (or ranges
of allowed IPs by using CIDR notation).
items:
type: string
type: array
type: object
ipWhiteList:
description: 'Deprecated: please use IPAllowList instead.'
properties:
ipStrategy:
description: 'IPStrategy holds the IP strategy configuration used
@ -1524,6 +1554,17 @@ spec:
type: string
type: array
type: object
ipWhiteList:
description: 'IPWhiteList defines the IPWhiteList middleware configuration.
Deprecated: please use IPAllowList instead.'
properties:
sourceRange:
description: SourceRange defines the allowed IPs (or ranges of
allowed IPs by using CIDR notation).
items:
type: string
type: array
type: object
type: object
required:
- metadata

View file

@ -1,6 +1,6 @@
---
kind: GatewayClass
apiVersion: gateway.networking.k8s.io/v1alpha2
apiVersion: gateway.networking.k8s.io/v1
metadata:
name: my-gateway-class
spec:
@ -8,7 +8,7 @@ spec:
---
kind: Gateway
apiVersion: gateway.networking.k8s.io/v1alpha2
apiVersion: gateway.networking.k8s.io/v1
metadata:
name: my-gateway
namespace: default
@ -24,7 +24,7 @@ spec:
---
kind: Gateway
apiVersion: gateway.networking.k8s.io/v1alpha2
apiVersion: gateway.networking.k8s.io/v1
metadata:
name: my-tcp-gateway
namespace: default
@ -51,7 +51,7 @@ data:
---
kind: Gateway
apiVersion: gateway.networking.k8s.io/v1alpha2
apiVersion: gateway.networking.k8s.io/v1
metadata:
name: my-tls-gateway
namespace: default
@ -83,7 +83,7 @@ spec:
---
kind: Gateway
apiVersion: gateway.networking.k8s.io/v1alpha2
apiVersion: gateway.networking.k8s.io/v1
metadata:
name: my-https-gateway
namespace: default
@ -104,7 +104,7 @@ spec:
---
kind: HTTPRoute
apiVersion: gateway.networking.k8s.io/v1alpha2
apiVersion: gateway.networking.k8s.io/v1
metadata:
name: http-app-1
namespace: default

View file

@ -7,10 +7,14 @@
noColor = true
[entryPoints]
[entryPoints.web]
[entryPoints.trust]
address = ":8000"
[entryPoints.web.proxyProtocol]
trustedIPs = ["{{.HaproxyIP}}"]
[entryPoints.trust.proxyProtocol]
trustedIPs = ["127.0.0.1"]
[entryPoints.nottrust]
address = ":9000"
[entryPoints.nottrust.proxyProtocol]
trustedIPs = ["1.2.3.4"]
[api]
insecure = true

View file

@ -1,33 +0,0 @@
[global]
checkNewVersion = false
sendAnonymousUsage = false
[log]
level = "DEBUG"
noColor = true
[entryPoints]
[entryPoints.web]
address = ":8000"
[entryPoints.web.proxyProtocol]
trustedIPs = ["1.2.3.4"]
[api]
insecure = true
[providers.file]
filename = "{{ .SelfFilename }}"
## dynamic configuration ##
[http.routers]
[http.routers.router1]
service = "service1"
rule = "Path(`/whoami`)"
[http.services]
[http.services.service1]
[http.services.service1.loadBalancer]
[[http.services.service1.loadBalancer.servers]]
url = "http://{{.WhoamiIP}}"

View file

@ -0,0 +1,19 @@
[global]
checkNewVersion = false
sendAnonymousUsage = false
[log]
level = "DEBUG"
[entryPoints.web]
address = ":8000"
[api]
insecure = true
[providers.redis]
rootKey = "traefik"
endpoints = ["{{ .RedisAddress }}"]
[providers.redis.sentinel]
masterName = "mymaster"

View file

@ -0,0 +1,18 @@
[global]
checkNewVersion = false
sendAnonymousUsage = false
[log]
level = "DEBUG"
[entryPoints]
[entryPoints.web]
address = ":8000"
[entryPoints.web.ForwardedHeaders]
insecure = true
[api]
insecure = true
[providers]
[providers.docker]

View file

@ -4,6 +4,8 @@
[log]
level = "DEBUG"
noColor = true
[entryPoints]
[entryPoints.tcp]

View file

@ -0,0 +1,52 @@
[global]
checkNewVersion = false
sendAnonymousUsage = false
[log]
level = "DEBUG"
noColor = true
[entryPoints]
[entryPoints.tcp]
address = ":8093"
[api]
insecure = true
[providers.file]
filename = "{{ .SelfFilename }}"
## dynamic configuration ##
[tcp]
[tcp.routers]
[tcp.routers.to-whoami-a]
entryPoints = ["tcp"]
rule = "HostSNI(`whoami-a.test`)"
service = "whoami-a"
middlewares = ["blocking-ipwhitelist"]
[tcp.routers.to-whoami-a.tls]
passthrough = true
[tcp.routers.to-whoami-b]
entryPoints = ["tcp"]
rule = "HostSNI(`whoami-b.test`)"
service = "whoami-b"
middlewares = ["allowing-ipwhitelist"]
[tcp.routers.to-whoami-b.tls]
passthrough = true
[tcp.services]
[tcp.services.whoami-a.loadBalancer]
[[tcp.services.whoami-a.loadBalancer.servers]]
address = "{{ .WhoamiA }}"
[tcp.services.whoami-b.loadBalancer]
[[tcp.services.whoami-b.loadBalancer.servers]]
address = "{{ .WhoamiB }}"
[tcp.middlewares]
[tcp.middlewares.allowing-ipwhitelist.ipWhiteList]
sourceRange = ["127.0.0.1/32"]
[tcp.middlewares.blocking-ipwhitelist.ipWhiteList]
sourceRange = ["127.127.127.127/32"]

View file

@ -0,0 +1,21 @@
receivers:
otlp:
protocols:
grpc:
http:
exporters:
otlp/tempo:
endpoint: http://tempo:4317
tls:
insecure: true
service:
telemetry:
logs:
level: "error"
extensions: [ health_check ]
pipelines:
traces:
receivers: [otlp]
exporters: [otlp/tempo]
extensions:
health_check:

View file

@ -1,70 +0,0 @@
[global]
checkNewVersion = false
sendAnonymousUsage = false
[log]
level = "DEBUG"
noColor = true
[api]
insecure = true
[entryPoints]
[entryPoints.web]
address = ":8000"
[tracing]
servicename = "tracing"
[tracing.jaeger]
samplingType = "const"
samplingParam = 1.0
samplingServerURL = "http://{{.IP}}:5778/sampling"
localAgentHostPort = "{{.IP}}:6831"
traceContextHeaderName = "{{.TraceContextHeaderName}}"
[providers.file]
filename = "{{ .SelfFilename }}"
## dynamic configuration ##
[http.routers]
[http.routers.router1]
Service = "service1"
Middlewares = ["retry", "ratelimit-1"]
Rule = "Path(`/ratelimit`)"
[http.routers.router2]
Service = "service2"
Middlewares = ["retry"]
Rule = "Path(`/retry`)"
[http.routers.router3]
Service = "service3"
Middlewares = ["retry", "basic-auth"]
Rule = "Path(`/auth`)"
[http.middlewares]
[http.middlewares.retry.retry]
attempts = 3
[http.middlewares.basic-auth.basicAuth]
users = ["test:$apr1$H6uskkkW$IgXLP6ewTrSuBkTrqE8wj/", "test2:$apr1$d9hr9HBB$4HxwgUir3HP4EsggP/QNo0"]
[http.middlewares.ratelimit-1.rateLimit]
average = 1
burst = 2
[http.services]
[http.services.service1]
[http.services.service1.loadBalancer]
passHostHeader = true
[[http.services.service1.loadBalancer.servers]]
url = "http://{{.WhoamiIP}}:{{.WhoamiPort}}"
[http.services.service2]
[http.services.service2.loadBalancer]
passHostHeader = true
[[http.services.service2.loadBalancer.servers]]
url = "http://{{.WhoamiIP}}:{{.WhoamiPort}}"
[http.services.service3]
[http.services.service3.loadBalancer]
passHostHeader = true
[[http.services.service3.loadBalancer.servers]]
url = "http://{{.WhoamiIP}}:{{.WhoamiPort}}"

View file

@ -15,12 +15,15 @@
[tracing]
servicename = "tracing"
[tracing.jaeger]
samplingType = "const"
samplingParam = 1.0
samplingServerURL = "http://{{.IP}}:5778/sampling"
[tracing.jaeger.collector]
endpoint = "http://{{.IP}}:14268/api/traces?format=jaeger.thrift"
sampleRate = 1.0
{{if .IsHTTP}}
[tracing.otlp.http]
endpoint = "http://{{.IP}}:4318"
{{else}}
[tracing.otlp.grpc]
endpoint = "{{.IP}}:4317"
insecure = true
{{end}}
[providers.file]
filename = "{{ .SelfFilename }}"
@ -28,6 +31,10 @@
## dynamic configuration ##
[http.routers]
[http.routers.router0]
Service = "service0"
Middlewares = []
Rule = "Path(`/basic`)"
[http.routers.router1]
Service = "service1"
Middlewares = ["retry", "ratelimit-1"]
@ -50,8 +57,13 @@
average = 1
burst = 2
[http.services]
[http.services.service0]
[http.services.service0.loadBalancer]
passHostHeader = true
[[http.services.service0.loadBalancer.servers]]
url = "http://{{.WhoamiIP}}:{{.WhoamiPort}}"
[http.services.service1]
[http.services.service1.loadBalancer]
passHostHeader = true

View file

@ -1,66 +0,0 @@
[global]
checkNewVersion = false
sendAnonymousUsage = false
[log]
level = "DEBUG"
noColor = true
[api]
insecure = true
[entryPoints]
[entryPoints.web]
address = ":8000"
[tracing]
servicename = "tracing"
[tracing.zipkin]
httpEndpoint = "http://{{.IP}}:9411/api/v2/spans"
[providers.file]
filename = "{{ .SelfFilename }}"
## dynamic configuration ##
[http.routers]
[http.routers.router1]
service = "service1"
middlewares = ["retry", "ratelimit-1"]
rule = "Path(`/ratelimit`)"
[http.routers.router2]
service = "service2"
middlewares = ["retry"]
rule = "Path(`/retry`)"
[http.routers.router3]
service = "service3"
middlewares = ["retry", "basic-auth"]
rule = "Path(`/auth`)"
[http.middlewares]
[http.middlewares.retry.retry]
attempts = 3
[http.middlewares.basic-auth.basicAuth]
users = ["test:$apr1$H6uskkkW$IgXLP6ewTrSuBkTrqE8wj/", "test2:$apr1$d9hr9HBB$4HxwgUir3HP4EsggP/QNo0"]
[http.middlewares.ratelimit-1.rateLimit]
average = 1
burst = 2
[http.services]
[http.services.service1]
[http.services.service1.loadBalancer]
passHostHeader = true
[[http.services.service1.loadBalancer.servers]]
url = "http://{{.WhoamiIP}}:{{.WhoamiPort}}"
[http.services.service2]
[http.services.service2.loadBalancer]
passHostHeader = true
[[http.services.service2.loadBalancer.servers]]
url = "http://{{.WhoamiIP}}:{{.WhoamiPort}}"
[http.services.service3]
[http.services.service3.loadBalancer]
passHostHeader = true
[[http.services.service3.loadBalancer.servers]]
url = "http://{{.WhoamiIP}}:{{.WhoamiPort}}"

View file

@ -0,0 +1,56 @@
server:
http_listen_port: 3200
query_frontend:
search:
duration_slo: 5s
throughput_bytes_slo: 1.073741824e+09
trace_by_id:
duration_slo: 5s
distributor:
receivers:
otlp:
protocols:
grpc:
ingester:
lifecycler:
address: 127.0.0.1
ring:
kvstore:
store: inmemory
replication_factor: 1
final_sleep: 0s
trace_idle_period: 1s
max_block_duration: 1m
complete_block_timeout: 5s
flush_check_period: 1s
compactor:
compaction:
block_retention: 1h
metrics_generator:
registry:
external_labels:
source: tempo
cluster: docker-compose
storage:
path: /tmp/tempo/generator/wal
remote_write:
- url: http://prometheus:9090/api/v1/write
send_exemplars: true
storage:
trace:
backend: local # backend configuration to use
wal:
path: /tmp/tempo/wal # where to store the wal locally
local:
path: /tmp/tempo/blocks
overrides:
defaults:
metrics_generator:
processors: [service-graphs, span-metrics] # enables metrics generator

View file

@ -8,10 +8,12 @@ import (
"math/rand"
"net"
"os"
"testing"
"time"
"github.com/go-check/check"
"github.com/rs/zerolog/log"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/suite"
"github.com/traefik/traefik/v3/integration/helloworld"
"github.com/traefik/traefik/v3/integration/try"
"google.golang.org/grpc"
@ -29,16 +31,20 @@ const randCharset = "abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ1234567
// GRPCSuite tests suite.
type GRPCSuite struct{ BaseSuite }
func TestGRPCSuite(t *testing.T) {
suite.Run(t, new(GRPCSuite))
}
type myserver struct {
stopStreamExample chan bool
}
func (s *GRPCSuite) SetUpSuite(c *check.C) {
func (s *GRPCSuite) SetupSuite() {
var err error
LocalhostCert, err = os.ReadFile("./resources/tls/local.cert")
c.Assert(err, check.IsNil)
assert.NoError(s.T(), err)
LocalhostKey, err = os.ReadFile("./resources/tls/local.key")
c.Assert(err, check.IsNil)
assert.NoError(s.T(), err)
}
func (s *myserver) SayHello(ctx context.Context, in *helloworld.HelloRequest) (*helloworld.HelloReply, error) {
@ -137,19 +143,18 @@ func callStreamExampleClientGRPC() (helloworld.Greeter_StreamExampleClient, func
return t, closer, nil
}
func (s *GRPCSuite) TestGRPC(c *check.C) {
func (s *GRPCSuite) TestGRPC() {
lis, err := net.Listen("tcp", ":0")
c.Assert(err, check.IsNil)
assert.NoError(s.T(), err)
_, port, err := net.SplitHostPort(lis.Addr().String())
c.Assert(err, check.IsNil)
assert.NoError(s.T(), err)
go func() {
err := startGRPCServer(lis, &myserver{})
c.Log(err)
c.Assert(err, check.IsNil)
assert.NoError(s.T(), err)
}()
file := s.adaptFile(c, "fixtures/grpc/config.toml", struct {
file := s.adaptFile("fixtures/grpc/config.toml", struct {
CertContent string
KeyContent string
GRPCServerPort string
@ -159,79 +164,65 @@ func (s *GRPCSuite) TestGRPC(c *check.C) {
GRPCServerPort: port,
})
defer os.Remove(file)
cmd, display := s.traefikCmd(withConfigFile(file))
defer display(c)
err = cmd.Start()
c.Assert(err, check.IsNil)
defer s.killCmd(cmd)
s.traefikCmd(withConfigFile(file))
// wait for Traefik
err = try.GetRequest("http://127.0.0.1:8080/api/rawdata", 1*time.Second, try.BodyContains("Host(`127.0.0.1`)"))
c.Assert(err, check.IsNil)
assert.NoError(s.T(), err)
var response string
err = try.Do(1*time.Second, func() error {
response, err = callHelloClientGRPC("World", true)
return err
})
c.Assert(err, check.IsNil)
c.Assert(response, check.Equals, "Hello World")
assert.NoError(s.T(), err)
assert.Equal(s.T(), "Hello World", response)
}
func (s *GRPCSuite) TestGRPCh2c(c *check.C) {
func (s *GRPCSuite) TestGRPCh2c() {
lis, err := net.Listen("tcp", ":0")
c.Assert(err, check.IsNil)
assert.NoError(s.T(), err)
_, port, err := net.SplitHostPort(lis.Addr().String())
c.Assert(err, check.IsNil)
assert.NoError(s.T(), err)
go func() {
err := starth2cGRPCServer(lis, &myserver{})
c.Log(err)
c.Assert(err, check.IsNil)
assert.NoError(s.T(), err)
}()
file := s.adaptFile(c, "fixtures/grpc/config_h2c.toml", struct {
file := s.adaptFile("fixtures/grpc/config_h2c.toml", struct {
GRPCServerPort string
}{
GRPCServerPort: port,
})
defer os.Remove(file)
cmd, display := s.traefikCmd(withConfigFile(file))
defer display(c)
err = cmd.Start()
c.Assert(err, check.IsNil)
defer s.killCmd(cmd)
s.traefikCmd(withConfigFile(file))
// wait for Traefik
err = try.GetRequest("http://127.0.0.1:8080/api/rawdata", 1*time.Second, try.BodyContains("Host(`127.0.0.1`)"))
c.Assert(err, check.IsNil)
assert.NoError(s.T(), err)
var response string
err = try.Do(1*time.Second, func() error {
response, err = callHelloClientGRPC("World", false)
return err
})
c.Assert(err, check.IsNil)
c.Assert(response, check.Equals, "Hello World")
assert.NoError(s.T(), err)
assert.Equal(s.T(), "Hello World", response)
}
func (s *GRPCSuite) TestGRPCh2cTermination(c *check.C) {
func (s *GRPCSuite) TestGRPCh2cTermination() {
lis, err := net.Listen("tcp", ":0")
c.Assert(err, check.IsNil)
assert.NoError(s.T(), err)
_, port, err := net.SplitHostPort(lis.Addr().String())
c.Assert(err, check.IsNil)
assert.NoError(s.T(), err)
go func() {
err := starth2cGRPCServer(lis, &myserver{})
c.Log(err)
c.Assert(err, check.IsNil)
assert.NoError(s.T(), err)
}()
file := s.adaptFile(c, "fixtures/grpc/config_h2c_termination.toml", struct {
file := s.adaptFile("fixtures/grpc/config_h2c_termination.toml", struct {
CertContent string
KeyContent string
GRPCServerPort string
@ -241,40 +232,33 @@ func (s *GRPCSuite) TestGRPCh2cTermination(c *check.C) {
GRPCServerPort: port,
})
defer os.Remove(file)
cmd, display := s.traefikCmd(withConfigFile(file))
defer display(c)
err = cmd.Start()
c.Assert(err, check.IsNil)
defer s.killCmd(cmd)
s.traefikCmd(withConfigFile(file))
// wait for Traefik
err = try.GetRequest("http://127.0.0.1:8080/api/rawdata", 1*time.Second, try.BodyContains("Host(`127.0.0.1`)"))
c.Assert(err, check.IsNil)
assert.NoError(s.T(), err)
var response string
err = try.Do(1*time.Second, func() error {
response, err = callHelloClientGRPC("World", true)
return err
})
c.Assert(err, check.IsNil)
c.Assert(response, check.Equals, "Hello World")
assert.NoError(s.T(), err)
assert.Equal(s.T(), "Hello World", response)
}
func (s *GRPCSuite) TestGRPCInsecure(c *check.C) {
func (s *GRPCSuite) TestGRPCInsecure() {
lis, err := net.Listen("tcp", ":0")
c.Assert(err, check.IsNil)
assert.NoError(s.T(), err)
_, port, err := net.SplitHostPort(lis.Addr().String())
c.Assert(err, check.IsNil)
assert.NoError(s.T(), err)
go func() {
err := startGRPCServer(lis, &myserver{})
c.Log(err)
c.Assert(err, check.IsNil)
assert.NoError(s.T(), err)
}()
file := s.adaptFile(c, "fixtures/grpc/config_insecure.toml", struct {
file := s.adaptFile("fixtures/grpc/config_insecure.toml", struct {
CertContent string
KeyContent string
GRPCServerPort string
@ -284,44 +268,37 @@ func (s *GRPCSuite) TestGRPCInsecure(c *check.C) {
GRPCServerPort: port,
})
defer os.Remove(file)
cmd, display := s.traefikCmd(withConfigFile(file))
defer display(c)
err = cmd.Start()
c.Assert(err, check.IsNil)
defer s.killCmd(cmd)
s.traefikCmd(withConfigFile(file))
// wait for Traefik
err = try.GetRequest("http://127.0.0.1:8080/api/rawdata", 1*time.Second, try.BodyContains("Host(`127.0.0.1`)"))
c.Assert(err, check.IsNil)
assert.NoError(s.T(), err)
var response string
err = try.Do(1*time.Second, func() error {
response, err = callHelloClientGRPC("World", true)
return err
})
c.Assert(err, check.IsNil)
c.Assert(response, check.Equals, "Hello World")
assert.NoError(s.T(), err)
assert.Equal(s.T(), "Hello World", response)
}
func (s *GRPCSuite) TestGRPCBuffer(c *check.C) {
func (s *GRPCSuite) TestGRPCBuffer() {
stopStreamExample := make(chan bool)
defer func() { stopStreamExample <- true }()
lis, err := net.Listen("tcp", ":0")
c.Assert(err, check.IsNil)
assert.NoError(s.T(), err)
_, port, err := net.SplitHostPort(lis.Addr().String())
c.Assert(err, check.IsNil)
assert.NoError(s.T(), err)
go func() {
err := startGRPCServer(lis, &myserver{
stopStreamExample: stopStreamExample,
})
c.Log(err)
c.Assert(err, check.IsNil)
assert.NoError(s.T(), err)
}()
file := s.adaptFile(c, "fixtures/grpc/config.toml", struct {
file := s.adaptFile("fixtures/grpc/config.toml", struct {
CertContent string
KeyContent string
GRPCServerPort string
@ -331,27 +308,21 @@ func (s *GRPCSuite) TestGRPCBuffer(c *check.C) {
GRPCServerPort: port,
})
defer os.Remove(file)
cmd, display := s.traefikCmd(withConfigFile(file))
defer display(c)
err = cmd.Start()
c.Assert(err, check.IsNil)
defer s.killCmd(cmd)
s.traefikCmd(withConfigFile(file))
// wait for Traefik
err = try.GetRequest("http://127.0.0.1:8080/api/rawdata", 1*time.Second, try.BodyContains("Host(`127.0.0.1`)"))
c.Assert(err, check.IsNil)
assert.NoError(s.T(), err)
var client helloworld.Greeter_StreamExampleClient
client, closer, err := callStreamExampleClientGRPC()
defer func() { _ = closer() }()
c.Assert(err, check.IsNil)
assert.NoError(s.T(), err)
received := make(chan bool)
go func() {
tr, err := client.Recv()
c.Assert(err, check.IsNil)
c.Assert(len(tr.GetData()), check.Equals, 512)
assert.NoError(s.T(), err)
assert.Len(s.T(), tr.GetData(), 512)
received <- true
}()
@ -363,25 +334,24 @@ func (s *GRPCSuite) TestGRPCBuffer(c *check.C) {
return errors.New("failed to receive stream data")
}
})
c.Assert(err, check.IsNil)
assert.NoError(s.T(), err)
}
func (s *GRPCSuite) TestGRPCBufferWithFlushInterval(c *check.C) {
func (s *GRPCSuite) TestGRPCBufferWithFlushInterval() {
stopStreamExample := make(chan bool)
lis, err := net.Listen("tcp", ":0")
c.Assert(err, check.IsNil)
assert.NoError(s.T(), err)
_, port, err := net.SplitHostPort(lis.Addr().String())
c.Assert(err, check.IsNil)
assert.NoError(s.T(), err)
go func() {
err := startGRPCServer(lis, &myserver{
stopStreamExample: stopStreamExample,
})
c.Log(err)
c.Assert(err, check.IsNil)
assert.NoError(s.T(), err)
}()
file := s.adaptFile(c, "fixtures/grpc/config.toml", struct {
file := s.adaptFile("fixtures/grpc/config.toml", struct {
CertContent string
KeyContent string
GRPCServerPort string
@ -390,17 +360,11 @@ func (s *GRPCSuite) TestGRPCBufferWithFlushInterval(c *check.C) {
KeyContent: string(LocalhostKey),
GRPCServerPort: port,
})
defer os.Remove(file)
cmd, display := s.traefikCmd(withConfigFile(file))
defer display(c)
err = cmd.Start()
c.Assert(err, check.IsNil)
defer s.killCmd(cmd)
s.traefikCmd(withConfigFile(file))
// wait for Traefik
err = try.GetRequest("http://127.0.0.1:8080/api/rawdata", 1*time.Second, try.BodyContains("Host(`127.0.0.1`)"))
c.Assert(err, check.IsNil)
assert.NoError(s.T(), err)
var client helloworld.Greeter_StreamExampleClient
client, closer, err := callStreamExampleClientGRPC()
@ -408,13 +372,13 @@ func (s *GRPCSuite) TestGRPCBufferWithFlushInterval(c *check.C) {
_ = closer()
stopStreamExample <- true
}()
c.Assert(err, check.IsNil)
assert.NoError(s.T(), err)
received := make(chan bool)
go func() {
tr, err := client.Recv()
c.Assert(err, check.IsNil)
c.Assert(len(tr.GetData()), check.Equals, 512)
assert.NoError(s.T(), err)
assert.Len(s.T(), tr.GetData(), 512)
received <- true
}()
@ -426,22 +390,21 @@ func (s *GRPCSuite) TestGRPCBufferWithFlushInterval(c *check.C) {
return errors.New("failed to receive stream data")
}
})
c.Assert(err, check.IsNil)
assert.NoError(s.T(), err)
}
func (s *GRPCSuite) TestGRPCWithRetry(c *check.C) {
func (s *GRPCSuite) TestGRPCWithRetry() {
lis, err := net.Listen("tcp", ":0")
c.Assert(err, check.IsNil)
assert.NoError(s.T(), err)
_, port, err := net.SplitHostPort(lis.Addr().String())
c.Assert(err, check.IsNil)
assert.NoError(s.T(), err)
go func() {
err := startGRPCServer(lis, &myserver{})
c.Log(err)
c.Assert(err, check.IsNil)
assert.NoError(s.T(), err)
}()
file := s.adaptFile(c, "fixtures/grpc/config_retry.toml", struct {
file := s.adaptFile("fixtures/grpc/config_retry.toml", struct {
CertContent string
KeyContent string
GRPCServerPort string
@ -451,23 +414,17 @@ func (s *GRPCSuite) TestGRPCWithRetry(c *check.C) {
GRPCServerPort: port,
})
defer os.Remove(file)
cmd, display := s.traefikCmd(withConfigFile(file))
defer display(c)
err = cmd.Start()
c.Assert(err, check.IsNil)
defer s.killCmd(cmd)
s.traefikCmd(withConfigFile(file))
// wait for Traefik
err = try.GetRequest("http://127.0.0.1:8080/api/rawdata", 1*time.Second, try.BodyContains("Host(`127.0.0.1`)"))
c.Assert(err, check.IsNil)
assert.NoError(s.T(), err)
var response string
err = try.Do(1*time.Second, func() error {
response, err = callHelloClientGRPC("World", true)
return err
})
c.Assert(err, check.IsNil)
c.Assert(response, check.Equals, "Hello World")
assert.NoError(s.T(), err)
assert.Equal(s.T(), "Hello World", response)
}

View file

@ -4,50 +4,45 @@ import (
"net"
"net/http"
"net/http/httptest"
"os"
"testing"
"time"
"github.com/go-check/check"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
"github.com/stretchr/testify/suite"
"github.com/traefik/traefik/v3/integration/try"
checker "github.com/vdemeester/shakers"
)
// Headers tests suite.
type HeadersSuite struct{ BaseSuite }
func (s *HeadersSuite) TestSimpleConfiguration(c *check.C) {
cmd, display := s.traefikCmd(withConfigFile("fixtures/headers/basic.toml"))
defer display(c)
err := cmd.Start()
c.Assert(err, checker.IsNil)
defer s.killCmd(cmd)
// Expected a 404 as we did not configure anything
err = try.GetRequest("http://127.0.0.1:8000/", 1000*time.Millisecond, try.StatusCodeIs(http.StatusNotFound))
c.Assert(err, checker.IsNil)
func TestHeadersSuite(t *testing.T) {
suite.Run(t, new(HeadersSuite))
}
func (s *HeadersSuite) TestReverseProxyHeaderRemoved(c *check.C) {
file := s.adaptFile(c, "fixtures/headers/remove_reverseproxy_headers.toml", struct{}{})
defer os.Remove(file)
cmd, display := s.traefikCmd(withConfigFile(file))
defer display(c)
func (s *HeadersSuite) TestSimpleConfiguration() {
s.traefikCmd(withConfigFile("fixtures/headers/basic.toml"))
err := cmd.Start()
c.Assert(err, checker.IsNil)
defer s.killCmd(cmd)
// Expected a 404 as we did not configure anything
err := try.GetRequest("http://127.0.0.1:8000/", 1000*time.Millisecond, try.StatusCodeIs(http.StatusNotFound))
require.NoError(s.T(), err)
}
func (s *HeadersSuite) TestReverseProxyHeaderRemoved() {
file := s.adaptFile("fixtures/headers/remove_reverseproxy_headers.toml", struct{}{})
s.traefikCmd(withConfigFile(file))
handler := http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
_, found := r.Header["X-Forwarded-Host"]
c.Assert(found, checker.True)
assert.True(s.T(), found)
_, found = r.Header["Foo"]
c.Assert(found, checker.False)
assert.False(s.T(), found)
_, found = r.Header["X-Forwarded-For"]
c.Assert(found, checker.False)
assert.False(s.T(), found)
})
listener, err := net.Listen("tcp", "127.0.0.1:9000")
c.Assert(err, checker.IsNil)
require.NoError(s.T(), err)
ts := &httptest.Server{
Listener: listener,
@ -57,31 +52,25 @@ func (s *HeadersSuite) TestReverseProxyHeaderRemoved(c *check.C) {
defer ts.Close()
req, err := http.NewRequest(http.MethodGet, "http://127.0.0.1:8000/", nil)
c.Assert(err, checker.IsNil)
require.NoError(s.T(), err)
req.Host = "test.localhost"
req.Header = http.Header{
"Foo": {"bar"},
}
err = try.Request(req, 500*time.Millisecond, try.StatusCodeIs(http.StatusOK))
c.Assert(err, checker.IsNil)
require.NoError(s.T(), err)
}
func (s *HeadersSuite) TestCorsResponses(c *check.C) {
file := s.adaptFile(c, "fixtures/headers/cors.toml", struct{}{})
defer os.Remove(file)
cmd, display := s.traefikCmd(withConfigFile(file))
defer display(c)
err := cmd.Start()
c.Assert(err, checker.IsNil)
defer s.killCmd(cmd)
func (s *HeadersSuite) TestCorsResponses() {
file := s.adaptFile("fixtures/headers/cors.toml", struct{}{})
s.traefikCmd(withConfigFile(file))
backend := startTestServer("9000", http.StatusOK, "")
defer backend.Close()
err = try.GetRequest(backend.URL, 500*time.Millisecond, try.StatusCodeIs(http.StatusOK))
c.Assert(err, checker.IsNil)
err := try.GetRequest(backend.URL, 500*time.Millisecond, try.StatusCodeIs(http.StatusOK))
require.NoError(s.T(), err)
testCase := []struct {
desc string
@ -147,30 +136,24 @@ func (s *HeadersSuite) TestCorsResponses(c *check.C) {
for _, test := range testCase {
req, err := http.NewRequest(test.method, "http://127.0.0.1:8000/", nil)
c.Assert(err, checker.IsNil)
require.NoError(s.T(), err)
req.Host = test.reqHost
req.Header = test.requestHeaders
err = try.Request(req, 500*time.Millisecond, try.HasHeaderStruct(test.expected))
c.Assert(err, checker.IsNil)
require.NoError(s.T(), err)
}
}
func (s *HeadersSuite) TestSecureHeadersResponses(c *check.C) {
file := s.adaptFile(c, "fixtures/headers/secure.toml", struct{}{})
defer os.Remove(file)
cmd, display := s.traefikCmd(withConfigFile(file))
defer display(c)
err := cmd.Start()
c.Assert(err, checker.IsNil)
defer s.killCmd(cmd)
func (s *HeadersSuite) TestSecureHeadersResponses() {
file := s.adaptFile("fixtures/headers/secure.toml", struct{}{})
s.traefikCmd(withConfigFile(file))
backend := startTestServer("9000", http.StatusOK, "")
defer backend.Close()
err = try.GetRequest(backend.URL, 500*time.Millisecond, try.StatusCodeIs(http.StatusOK))
c.Assert(err, checker.IsNil)
err := try.GetRequest(backend.URL, 500*time.Millisecond, try.StatusCodeIs(http.StatusOK))
require.NoError(s.T(), err)
testCase := []struct {
desc string
@ -190,36 +173,30 @@ func (s *HeadersSuite) TestSecureHeadersResponses(c *check.C) {
for _, test := range testCase {
req, err := http.NewRequest(http.MethodGet, "http://127.0.0.1:8000/", nil)
c.Assert(err, checker.IsNil)
require.NoError(s.T(), err)
req.Host = test.reqHost
err = try.Request(req, 500*time.Millisecond, try.StatusCodeIs(http.StatusOK), try.HasHeaderStruct(test.expected))
c.Assert(err, checker.IsNil)
require.NoError(s.T(), err)
req, err = http.NewRequest(http.MethodGet, "http://127.0.0.1:8000/api/rawdata", nil)
c.Assert(err, checker.IsNil)
require.NoError(s.T(), err)
req.Host = test.internalReqHost
err = try.Request(req, 500*time.Millisecond, try.StatusCodeIs(http.StatusOK), try.HasHeaderStruct(test.expected))
c.Assert(err, checker.IsNil)
require.NoError(s.T(), err)
}
}
func (s *HeadersSuite) TestMultipleSecureHeadersResponses(c *check.C) {
file := s.adaptFile(c, "fixtures/headers/secure_multiple.toml", struct{}{})
defer os.Remove(file)
cmd, display := s.traefikCmd(withConfigFile(file))
defer display(c)
err := cmd.Start()
c.Assert(err, checker.IsNil)
defer s.killCmd(cmd)
func (s *HeadersSuite) TestMultipleSecureHeadersResponses() {
file := s.adaptFile("fixtures/headers/secure_multiple.toml", struct{}{})
s.traefikCmd(withConfigFile(file))
backend := startTestServer("9000", http.StatusOK, "")
defer backend.Close()
err = try.GetRequest(backend.URL, 500*time.Millisecond, try.StatusCodeIs(http.StatusOK))
c.Assert(err, checker.IsNil)
err := try.GetRequest(backend.URL, 500*time.Millisecond, try.StatusCodeIs(http.StatusOK))
require.NoError(s.T(), err)
testCase := []struct {
desc string
@ -238,10 +215,10 @@ func (s *HeadersSuite) TestMultipleSecureHeadersResponses(c *check.C) {
for _, test := range testCase {
req, err := http.NewRequest(http.MethodGet, "http://127.0.0.1:8000/", nil)
c.Assert(err, checker.IsNil)
require.NoError(s.T(), err)
req.Host = test.reqHost
err = try.Request(req, 500*time.Millisecond, try.HasHeaderStruct(test.expected))
c.Assert(err, checker.IsNil)
require.NoError(s.T(), err)
}
}

View file

@ -7,11 +7,13 @@ import (
"net/http"
"os"
"strings"
"testing"
"time"
"github.com/go-check/check"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
"github.com/stretchr/testify/suite"
"github.com/traefik/traefik/v3/integration/try"
checker "github.com/vdemeester/shakers"
)
// HealthCheck test suites.
@ -23,287 +25,272 @@ type HealthCheckSuite struct {
whoami4IP string
}
func (s *HealthCheckSuite) SetUpSuite(c *check.C) {
s.createComposeProject(c, "healthcheck")
s.composeUp(c)
s.whoami1IP = s.getComposeServiceIP(c, "whoami1")
s.whoami2IP = s.getComposeServiceIP(c, "whoami2")
s.whoami3IP = s.getComposeServiceIP(c, "whoami3")
s.whoami4IP = s.getComposeServiceIP(c, "whoami4")
func TestHealthCheckSuite(t *testing.T) {
suite.Run(t, new(HealthCheckSuite))
}
func (s *HealthCheckSuite) TestSimpleConfiguration(c *check.C) {
file := s.adaptFile(c, "fixtures/healthcheck/simple.toml", struct {
func (s *HealthCheckSuite) SetupSuite() {
s.BaseSuite.SetupSuite()
s.createComposeProject("healthcheck")
s.composeUp()
s.whoami1IP = s.getComposeServiceIP("whoami1")
s.whoami2IP = s.getComposeServiceIP("whoami2")
s.whoami3IP = s.getComposeServiceIP("whoami3")
s.whoami4IP = s.getComposeServiceIP("whoami4")
}
func (s *HealthCheckSuite) TearDownSuite() {
s.BaseSuite.TearDownSuite()
}
func (s *HealthCheckSuite) TestSimpleConfiguration() {
file := s.adaptFile("fixtures/healthcheck/simple.toml", struct {
Server1 string
Server2 string
}{s.whoami1IP, s.whoami2IP})
defer os.Remove(file)
cmd, display := s.traefikCmd(withConfigFile(file))
defer display(c)
err := cmd.Start()
c.Assert(err, checker.IsNil)
defer s.killCmd(cmd)
s.traefikCmd(withConfigFile(file))
// wait for traefik
err = try.GetRequest("http://127.0.0.1:8080/api/rawdata", 60*time.Second, try.BodyContains("Host(`test.localhost`)"))
c.Assert(err, checker.IsNil)
err := try.GetRequest("http://127.0.0.1:8080/api/rawdata", 60*time.Second, try.BodyContains("Host(`test.localhost`)"))
require.NoError(s.T(), err)
frontendHealthReq, err := http.NewRequest(http.MethodGet, "http://127.0.0.1:8000/health", nil)
c.Assert(err, checker.IsNil)
require.NoError(s.T(), err)
frontendHealthReq.Host = "test.localhost"
err = try.Request(frontendHealthReq, 500*time.Millisecond, try.StatusCodeIs(http.StatusOK))
c.Assert(err, checker.IsNil)
require.NoError(s.T(), err)
// Fix all whoami health to 500
client := &http.Client{}
whoamiHosts := []string{s.whoami1IP, s.whoami2IP}
for _, whoami := range whoamiHosts {
statusInternalServerErrorReq, err := http.NewRequest(http.MethodPost, "http://"+whoami+"/health", bytes.NewBufferString("500"))
c.Assert(err, checker.IsNil)
require.NoError(s.T(), err)
_, err = client.Do(statusInternalServerErrorReq)
c.Assert(err, checker.IsNil)
require.NoError(s.T(), err)
}
// Verify no backend service is available due to failing health checks
err = try.Request(frontendHealthReq, 3*time.Second, try.StatusCodeIs(http.StatusServiceUnavailable))
c.Assert(err, checker.IsNil)
require.NoError(s.T(), err)
// Change one whoami health to 200
statusOKReq1, err := http.NewRequest(http.MethodPost, "http://"+s.whoami1IP+"/health", bytes.NewBufferString("200"))
c.Assert(err, checker.IsNil)
require.NoError(s.T(), err)
_, err = client.Do(statusOKReq1)
c.Assert(err, checker.IsNil)
require.NoError(s.T(), err)
// Verify frontend health : after
err = try.Request(frontendHealthReq, 3*time.Second, try.StatusCodeIs(http.StatusOK))
c.Assert(err, checker.IsNil)
require.NoError(s.T(), err)
frontendReq, err := http.NewRequest(http.MethodGet, "http://127.0.0.1:8000/", nil)
c.Assert(err, checker.IsNil)
require.NoError(s.T(), err)
frontendReq.Host = "test.localhost"
// Check if whoami1 responds
err = try.Request(frontendReq, 500*time.Millisecond, try.BodyContains(s.whoami1IP))
c.Assert(err, checker.IsNil)
require.NoError(s.T(), err)
// Check if the service with bad health check (whoami2) never respond.
err = try.Request(frontendReq, 2*time.Second, try.BodyContains(s.whoami2IP))
c.Assert(err, checker.NotNil)
assert.Error(s.T(), err)
// TODO validate : run on 80
resp, err := http.Get("http://127.0.0.1:8000/")
// Expected a 404 as we did not configure anything
c.Assert(err, checker.IsNil)
c.Assert(resp.StatusCode, checker.Equals, http.StatusNotFound)
require.NoError(s.T(), err)
assert.Equal(s.T(), http.StatusNotFound, resp.StatusCode)
}
func (s *HealthCheckSuite) TestMultipleEntrypoints(c *check.C) {
file := s.adaptFile(c, "fixtures/healthcheck/multiple-entrypoints.toml", struct {
func (s *HealthCheckSuite) TestMultipleEntrypoints() {
file := s.adaptFile("fixtures/healthcheck/multiple-entrypoints.toml", struct {
Server1 string
Server2 string
}{s.whoami1IP, s.whoami2IP})
defer os.Remove(file)
cmd, display := s.traefikCmd(withConfigFile(file))
defer display(c)
err := cmd.Start()
c.Assert(err, checker.IsNil)
defer s.killCmd(cmd)
s.traefikCmd(withConfigFile(file))
// Wait for traefik
err = try.GetRequest("http://localhost:8080/api/rawdata", 60*time.Second, try.BodyContains("Host(`test.localhost`)"))
c.Assert(err, checker.IsNil)
err := try.GetRequest("http://127.0.0.1:8080/api/rawdata", 60*time.Second, try.BodyContains("Host(`test.localhost`)"))
require.NoError(s.T(), err)
// Check entrypoint http1
frontendHealthReq, err := http.NewRequest(http.MethodGet, "http://127.0.0.1:8000/health", nil)
c.Assert(err, checker.IsNil)
require.NoError(s.T(), err)
frontendHealthReq.Host = "test.localhost"
err = try.Request(frontendHealthReq, 500*time.Millisecond, try.StatusCodeIs(http.StatusOK))
c.Assert(err, checker.IsNil)
err = try.Request(frontendHealthReq, 5*time.Second, try.StatusCodeIs(http.StatusOK))
require.NoError(s.T(), err)
// Check entrypoint http2
frontendHealthReq, err = http.NewRequest(http.MethodGet, "http://127.0.0.1:9000/health", nil)
c.Assert(err, checker.IsNil)
require.NoError(s.T(), err)
frontendHealthReq.Host = "test.localhost"
err = try.Request(frontendHealthReq, 500*time.Millisecond, try.StatusCodeIs(http.StatusOK))
c.Assert(err, checker.IsNil)
err = try.Request(frontendHealthReq, 5*time.Second, try.StatusCodeIs(http.StatusOK))
require.NoError(s.T(), err)
// Set the both whoami health to 500
client := &http.Client{}
whoamiHosts := []string{s.whoami1IP, s.whoami2IP}
for _, whoami := range whoamiHosts {
statusInternalServerErrorReq, err := http.NewRequest(http.MethodPost, "http://"+whoami+"/health", bytes.NewBufferString("500"))
c.Assert(err, checker.IsNil)
require.NoError(s.T(), err)
_, err = client.Do(statusInternalServerErrorReq)
c.Assert(err, checker.IsNil)
require.NoError(s.T(), err)
}
// Verify no backend service is available due to failing health checks
err = try.Request(frontendHealthReq, 5*time.Second, try.StatusCodeIs(http.StatusServiceUnavailable))
c.Assert(err, checker.IsNil)
require.NoError(s.T(), err)
// reactivate the whoami2
statusInternalServerOkReq, err := http.NewRequest(http.MethodPost, "http://"+s.whoami2IP+"/health", bytes.NewBufferString("200"))
c.Assert(err, checker.IsNil)
require.NoError(s.T(), err)
_, err = client.Do(statusInternalServerOkReq)
c.Assert(err, checker.IsNil)
require.NoError(s.T(), err)
frontend1Req, err := http.NewRequest(http.MethodGet, "http://127.0.0.1:8000/", nil)
c.Assert(err, checker.IsNil)
require.NoError(s.T(), err)
frontend1Req.Host = "test.localhost"
frontend2Req, err := http.NewRequest(http.MethodGet, "http://127.0.0.1:9000/", nil)
c.Assert(err, checker.IsNil)
require.NoError(s.T(), err)
frontend2Req.Host = "test.localhost"
// Check if whoami1 never responds
err = try.Request(frontend2Req, 2*time.Second, try.BodyContains(s.whoami1IP))
c.Assert(err, checker.NotNil)
assert.Error(s.T(), err)
// Check if whoami1 never responds
err = try.Request(frontend1Req, 2*time.Second, try.BodyContains(s.whoami1IP))
c.Assert(err, checker.NotNil)
assert.Error(s.T(), err)
}
func (s *HealthCheckSuite) TestPortOverload(c *check.C) {
func (s *HealthCheckSuite) TestPortOverload() {
// Set one whoami health to 200
client := &http.Client{}
statusInternalServerErrorReq, err := http.NewRequest(http.MethodPost, "http://"+s.whoami1IP+"/health", bytes.NewBufferString("200"))
c.Assert(err, checker.IsNil)
require.NoError(s.T(), err)
_, err = client.Do(statusInternalServerErrorReq)
c.Assert(err, checker.IsNil)
require.NoError(s.T(), err)
file := s.adaptFile(c, "fixtures/healthcheck/port_overload.toml", struct {
file := s.adaptFile("fixtures/healthcheck/port_overload.toml", struct {
Server1 string
}{s.whoami1IP})
defer os.Remove(file)
cmd, display := s.traefikCmd(withConfigFile(file))
defer display(c)
err = cmd.Start()
c.Assert(err, checker.IsNil)
defer s.killCmd(cmd)
s.traefikCmd(withConfigFile(file))
// wait for traefik
err = try.GetRequest("http://127.0.0.1:8080/api/rawdata", 10*time.Second, try.BodyContains("Host(`test.localhost`)"))
c.Assert(err, checker.IsNil)
require.NoError(s.T(), err)
frontendHealthReq, err := http.NewRequest(http.MethodGet, "http://127.0.0.1:8000/health", nil)
c.Assert(err, checker.IsNil)
require.NoError(s.T(), err)
frontendHealthReq.Host = "test.localhost"
// We test bad gateway because we use an invalid port for the backend
err = try.Request(frontendHealthReq, 500*time.Millisecond, try.StatusCodeIs(http.StatusBadGateway))
c.Assert(err, checker.IsNil)
require.NoError(s.T(), err)
// Set one whoami health to 500
statusInternalServerErrorReq, err = http.NewRequest(http.MethodPost, "http://"+s.whoami1IP+"/health", bytes.NewBufferString("500"))
c.Assert(err, checker.IsNil)
require.NoError(s.T(), err)
_, err = client.Do(statusInternalServerErrorReq)
c.Assert(err, checker.IsNil)
require.NoError(s.T(), err)
// Verify no backend service is available due to failing health checks
err = try.Request(frontendHealthReq, 3*time.Second, try.StatusCodeIs(http.StatusServiceUnavailable))
c.Assert(err, checker.IsNil)
require.NoError(s.T(), err)
}
// Checks if all the loadbalancers created will correctly update the server status.
func (s *HealthCheckSuite) TestMultipleRoutersOnSameService(c *check.C) {
file := s.adaptFile(c, "fixtures/healthcheck/multiple-routers-one-same-service.toml", struct {
func (s *HealthCheckSuite) TestMultipleRoutersOnSameService() {
file := s.adaptFile("fixtures/healthcheck/multiple-routers-one-same-service.toml", struct {
Server1 string
}{s.whoami1IP})
defer os.Remove(file)
cmd, display := s.traefikCmd(withConfigFile(file))
defer display(c)
err := cmd.Start()
c.Assert(err, checker.IsNil)
defer s.killCmd(cmd)
s.traefikCmd(withConfigFile(file))
// wait for traefik
err = try.GetRequest("http://127.0.0.1:8080/api/rawdata", 60*time.Second, try.BodyContains("Host(`test.localhost`)"))
c.Assert(err, checker.IsNil)
err := try.GetRequest("http://127.0.0.1:8080/api/rawdata", 60*time.Second, try.BodyContains("Host(`test.localhost`)"))
require.NoError(s.T(), err)
// Set whoami health to 200 to be sure to start with the wanted status
client := &http.Client{}
statusOkReq, err := http.NewRequest(http.MethodPost, "http://"+s.whoami1IP+"/health", bytes.NewBufferString("200"))
c.Assert(err, checker.IsNil)
require.NoError(s.T(), err)
_, err = client.Do(statusOkReq)
c.Assert(err, checker.IsNil)
require.NoError(s.T(), err)
// check healthcheck on web1 entrypoint
healthReqWeb1, err := http.NewRequest(http.MethodGet, "http://127.0.0.1:8000/health", nil)
c.Assert(err, checker.IsNil)
require.NoError(s.T(), err)
healthReqWeb1.Host = "test.localhost"
err = try.Request(healthReqWeb1, 1*time.Second, try.StatusCodeIs(http.StatusOK))
c.Assert(err, checker.IsNil)
require.NoError(s.T(), err)
// check healthcheck on web2 entrypoint
healthReqWeb2, err := http.NewRequest(http.MethodGet, "http://127.0.0.1:9000/health", nil)
c.Assert(err, checker.IsNil)
require.NoError(s.T(), err)
healthReqWeb2.Host = "test.localhost"
err = try.Request(healthReqWeb2, 500*time.Millisecond, try.StatusCodeIs(http.StatusOK))
c.Assert(err, checker.IsNil)
require.NoError(s.T(), err)
// Set whoami health to 500
statusInternalServerErrorReq, err := http.NewRequest(http.MethodPost, "http://"+s.whoami1IP+"/health", bytes.NewBufferString("500"))
c.Assert(err, checker.IsNil)
require.NoError(s.T(), err)
_, err = client.Do(statusInternalServerErrorReq)
c.Assert(err, checker.IsNil)
require.NoError(s.T(), err)
// Verify no backend service is available due to failing health checks
err = try.Request(healthReqWeb1, 3*time.Second, try.StatusCodeIs(http.StatusServiceUnavailable))
c.Assert(err, checker.IsNil)
require.NoError(s.T(), err)
err = try.Request(healthReqWeb2, 3*time.Second, try.StatusCodeIs(http.StatusServiceUnavailable))
c.Assert(err, checker.IsNil)
require.NoError(s.T(), err)
// Change one whoami health to 200
statusOKReq1, err := http.NewRequest(http.MethodPost, "http://"+s.whoami1IP+"/health", bytes.NewBufferString("200"))
c.Assert(err, checker.IsNil)
require.NoError(s.T(), err)
_, err = client.Do(statusOKReq1)
c.Assert(err, checker.IsNil)
require.NoError(s.T(), err)
// Verify health check
err = try.Request(healthReqWeb1, 3*time.Second, try.StatusCodeIs(http.StatusOK))
c.Assert(err, checker.IsNil)
require.NoError(s.T(), err)
err = try.Request(healthReqWeb2, 3*time.Second, try.StatusCodeIs(http.StatusOK))
c.Assert(err, checker.IsNil)
require.NoError(s.T(), err)
}
func (s *HealthCheckSuite) TestPropagate(c *check.C) {
file := s.adaptFile(c, "fixtures/healthcheck/propagate.toml", struct {
func (s *HealthCheckSuite) TestPropagate() {
file := s.adaptFile("fixtures/healthcheck/propagate.toml", struct {
Server1 string
Server2 string
Server3 string
Server4 string
}{s.whoami1IP, s.whoami2IP, s.whoami3IP, s.whoami4IP})
defer os.Remove(file)
cmd, display := s.traefikCmd(withConfigFile(file))
defer display(c)
err := cmd.Start()
c.Assert(err, checker.IsNil)
defer s.killCmd(cmd)
s.traefikCmd(withConfigFile(file))
// wait for traefik
err = try.GetRequest("http://127.0.0.1:8080/api/rawdata", 60*time.Second, try.BodyContains("Host(`root.localhost`)"))
c.Assert(err, checker.IsNil)
err := try.GetRequest("http://127.0.0.1:8080/api/rawdata", 60*time.Second, try.BodyContains("Host(`root.localhost`)"))
require.NoError(s.T(), err)
rootReq, err := http.NewRequest(http.MethodGet, "http://127.0.0.1:8000", nil)
c.Assert(err, checker.IsNil)
require.NoError(s.T(), err)
rootReq.Host = "root.localhost"
err = try.Request(rootReq, 500*time.Millisecond, try.StatusCodeIs(http.StatusOK))
c.Assert(err, checker.IsNil)
require.NoError(s.T(), err)
// Bring whoami1 and whoami3 down
client := http.Client{
@ -313,9 +300,9 @@ func (s *HealthCheckSuite) TestPropagate(c *check.C) {
whoamiHosts := []string{s.whoami1IP, s.whoami3IP}
for _, whoami := range whoamiHosts {
statusInternalServerErrorReq, err := http.NewRequest(http.MethodPost, "http://"+whoami+"/health", bytes.NewBufferString("500"))
c.Assert(err, checker.IsNil)
require.NoError(s.T(), err)
_, err = client.Do(statusInternalServerErrorReq)
c.Assert(err, checker.IsNil)
require.NoError(s.T(), err)
}
try.Sleep(time.Second)
@ -327,19 +314,19 @@ func (s *HealthCheckSuite) TestPropagate(c *check.C) {
reachedServers := make(map[string]int)
for i := 0; i < 4; i++ {
resp, err := client.Do(rootReq)
c.Assert(err, checker.IsNil)
require.NoError(s.T(), err)
body, err := io.ReadAll(resp.Body)
c.Assert(err, checker.IsNil)
require.NoError(s.T(), err)
if reachedServers[s.whoami4IP] > reachedServers[s.whoami2IP] {
c.Assert(string(body), checker.Contains, want2)
assert.Contains(s.T(), string(body), want2)
reachedServers[s.whoami2IP]++
continue
}
if reachedServers[s.whoami2IP] > reachedServers[s.whoami4IP] {
c.Assert(string(body), checker.Contains, want4)
assert.Contains(s.T(), string(body), want4)
reachedServers[s.whoami4IP]++
continue
}
@ -356,48 +343,48 @@ func (s *HealthCheckSuite) TestPropagate(c *check.C) {
}
}
c.Assert(reachedServers[s.whoami2IP], checker.Equals, 2)
c.Assert(reachedServers[s.whoami4IP], checker.Equals, 2)
assert.Equal(s.T(), 2, reachedServers[s.whoami2IP])
assert.Equal(s.T(), 2, reachedServers[s.whoami4IP])
fooReq, err := http.NewRequest(http.MethodGet, "http://127.0.0.1:8000", nil)
c.Assert(err, checker.IsNil)
require.NoError(s.T(), err)
fooReq.Host = "foo.localhost"
// Verify load-balancing on foo still works, and that we're getting wsp2, wsp2, wsp2, wsp2, etc.
want := `IP: ` + s.whoami2IP
for i := 0; i < 4; i++ {
resp, err := client.Do(fooReq)
c.Assert(err, checker.IsNil)
require.NoError(s.T(), err)
body, err := io.ReadAll(resp.Body)
c.Assert(err, checker.IsNil)
require.NoError(s.T(), err)
c.Assert(string(body), checker.Contains, want)
assert.Contains(s.T(), string(body), want)
}
barReq, err := http.NewRequest(http.MethodGet, "http://127.0.0.1:8000", nil)
c.Assert(err, checker.IsNil)
require.NoError(s.T(), err)
barReq.Host = "bar.localhost"
// Verify load-balancing on bar still works, and that we're getting wsp2, wsp2, wsp2, wsp2, etc.
want = `IP: ` + s.whoami2IP
for i := 0; i < 4; i++ {
resp, err := client.Do(barReq)
c.Assert(err, checker.IsNil)
require.NoError(s.T(), err)
body, err := io.ReadAll(resp.Body)
c.Assert(err, checker.IsNil)
require.NoError(s.T(), err)
c.Assert(string(body), checker.Contains, want)
assert.Contains(s.T(), string(body), want)
}
// Bring whoami2 and whoami4 down
whoamiHosts = []string{s.whoami2IP, s.whoami4IP}
for _, whoami := range whoamiHosts {
statusInternalServerErrorReq, err := http.NewRequest(http.MethodPost, "http://"+whoami+"/health", bytes.NewBufferString("500"))
c.Assert(err, checker.IsNil)
require.NoError(s.T(), err)
_, err = client.Do(statusInternalServerErrorReq)
c.Assert(err, checker.IsNil)
require.NoError(s.T(), err)
}
try.Sleep(time.Second)
@ -405,25 +392,25 @@ func (s *HealthCheckSuite) TestPropagate(c *check.C) {
// Verify that everything is down, and that we get 503s everywhere.
for i := 0; i < 2; i++ {
resp, err := client.Do(rootReq)
c.Assert(err, checker.IsNil)
c.Assert(resp.StatusCode, checker.Equals, http.StatusServiceUnavailable)
require.NoError(s.T(), err)
assert.Equal(s.T(), http.StatusServiceUnavailable, resp.StatusCode)
resp, err = client.Do(fooReq)
c.Assert(err, checker.IsNil)
c.Assert(resp.StatusCode, checker.Equals, http.StatusServiceUnavailable)
require.NoError(s.T(), err)
assert.Equal(s.T(), http.StatusServiceUnavailable, resp.StatusCode)
resp, err = client.Do(barReq)
c.Assert(err, checker.IsNil)
c.Assert(resp.StatusCode, checker.Equals, http.StatusServiceUnavailable)
require.NoError(s.T(), err)
assert.Equal(s.T(), http.StatusServiceUnavailable, resp.StatusCode)
}
// Bring everything back up.
whoamiHosts = []string{s.whoami1IP, s.whoami2IP, s.whoami3IP, s.whoami4IP}
for _, whoami := range whoamiHosts {
statusOKReq, err := http.NewRequest(http.MethodPost, "http://"+whoami+"/health", bytes.NewBufferString("200"))
c.Assert(err, checker.IsNil)
require.NoError(s.T(), err)
_, err = client.Do(statusOKReq)
c.Assert(err, checker.IsNil)
require.NoError(s.T(), err)
}
try.Sleep(time.Second)
@ -432,10 +419,10 @@ func (s *HealthCheckSuite) TestPropagate(c *check.C) {
reachedServers = make(map[string]int)
for i := 0; i < 4; i++ {
resp, err := client.Do(rootReq)
c.Assert(err, checker.IsNil)
require.NoError(s.T(), err)
body, err := io.ReadAll(resp.Body)
c.Assert(err, checker.IsNil)
require.NoError(s.T(), err)
if strings.Contains(string(body), `IP: `+s.whoami1IP) {
reachedServers[s.whoami1IP]++
@ -458,19 +445,19 @@ func (s *HealthCheckSuite) TestPropagate(c *check.C) {
}
}
c.Assert(reachedServers[s.whoami1IP], checker.Equals, 1)
c.Assert(reachedServers[s.whoami2IP], checker.Equals, 1)
c.Assert(reachedServers[s.whoami3IP], checker.Equals, 1)
c.Assert(reachedServers[s.whoami4IP], checker.Equals, 1)
assert.Equal(s.T(), 1, reachedServers[s.whoami1IP])
assert.Equal(s.T(), 1, reachedServers[s.whoami2IP])
assert.Equal(s.T(), 1, reachedServers[s.whoami3IP])
assert.Equal(s.T(), 1, reachedServers[s.whoami4IP])
// Verify everything is up on foo router.
reachedServers = make(map[string]int)
for i := 0; i < 4; i++ {
resp, err := client.Do(fooReq)
c.Assert(err, checker.IsNil)
require.NoError(s.T(), err)
body, err := io.ReadAll(resp.Body)
c.Assert(err, checker.IsNil)
require.NoError(s.T(), err)
if strings.Contains(string(body), `IP: `+s.whoami1IP) {
reachedServers[s.whoami1IP]++
@ -493,19 +480,19 @@ func (s *HealthCheckSuite) TestPropagate(c *check.C) {
}
}
c.Assert(reachedServers[s.whoami1IP], checker.Equals, 2)
c.Assert(reachedServers[s.whoami2IP], checker.Equals, 1)
c.Assert(reachedServers[s.whoami3IP], checker.Equals, 1)
c.Assert(reachedServers[s.whoami4IP], checker.Equals, 0)
assert.Equal(s.T(), 2, reachedServers[s.whoami1IP])
assert.Equal(s.T(), 1, reachedServers[s.whoami2IP])
assert.Equal(s.T(), 1, reachedServers[s.whoami3IP])
assert.Equal(s.T(), 0, reachedServers[s.whoami4IP])
// Verify everything is up on bar router.
reachedServers = make(map[string]int)
for i := 0; i < 4; i++ {
resp, err := client.Do(barReq)
c.Assert(err, checker.IsNil)
require.NoError(s.T(), err)
body, err := io.ReadAll(resp.Body)
c.Assert(err, checker.IsNil)
require.NoError(s.T(), err)
if strings.Contains(string(body), `IP: `+s.whoami1IP) {
reachedServers[s.whoami1IP]++
@ -528,102 +515,87 @@ func (s *HealthCheckSuite) TestPropagate(c *check.C) {
}
}
c.Assert(reachedServers[s.whoami1IP], checker.Equals, 2)
c.Assert(reachedServers[s.whoami2IP], checker.Equals, 1)
c.Assert(reachedServers[s.whoami3IP], checker.Equals, 1)
c.Assert(reachedServers[s.whoami4IP], checker.Equals, 0)
assert.Equal(s.T(), 2, reachedServers[s.whoami1IP])
assert.Equal(s.T(), 1, reachedServers[s.whoami2IP])
assert.Equal(s.T(), 1, reachedServers[s.whoami3IP])
assert.Equal(s.T(), 0, reachedServers[s.whoami4IP])
}
func (s *HealthCheckSuite) TestPropagateNoHealthCheck(c *check.C) {
file := s.adaptFile(c, "fixtures/healthcheck/propagate_no_healthcheck.toml", struct {
func (s *HealthCheckSuite) TestPropagateNoHealthCheck() {
file := s.adaptFile("fixtures/healthcheck/propagate_no_healthcheck.toml", struct {
Server1 string
}{s.whoami1IP})
defer os.Remove(file)
cmd, display := s.traefikCmd(withConfigFile(file))
defer display(c)
err := cmd.Start()
c.Assert(err, checker.IsNil)
defer s.killCmd(cmd)
s.traefikCmd(withConfigFile(file))
// wait for traefik
err = try.GetRequest("http://127.0.0.1:8080/api/rawdata", 60*time.Second, try.BodyContains("Host(`noop.localhost`)"), try.BodyNotContains("Host(`root.localhost`)"))
c.Assert(err, checker.IsNil)
err := try.GetRequest("http://127.0.0.1:8080/api/rawdata", 60*time.Second, try.BodyContains("Host(`noop.localhost`)"), try.BodyNotContains("Host(`root.localhost`)"))
require.NoError(s.T(), err)
rootReq, err := http.NewRequest(http.MethodGet, "http://127.0.0.1:8000", nil)
c.Assert(err, checker.IsNil)
require.NoError(s.T(), err)
rootReq.Host = "root.localhost"
err = try.Request(rootReq, 500*time.Millisecond, try.StatusCodeIs(http.StatusNotFound))
c.Assert(err, checker.IsNil)
require.NoError(s.T(), err)
}
func (s *HealthCheckSuite) TestPropagateReload(c *check.C) {
func (s *HealthCheckSuite) TestPropagateReload() {
// Setup a WSP service without the healthcheck enabled (wsp-service1)
withoutHealthCheck := s.adaptFile(c, "fixtures/healthcheck/reload_without_healthcheck.toml", struct {
withoutHealthCheck := s.adaptFile("fixtures/healthcheck/reload_without_healthcheck.toml", struct {
Server1 string
Server2 string
}{s.whoami1IP, s.whoami2IP})
defer os.Remove(withoutHealthCheck)
withHealthCheck := s.adaptFile(c, "fixtures/healthcheck/reload_with_healthcheck.toml", struct {
withHealthCheck := s.adaptFile("fixtures/healthcheck/reload_with_healthcheck.toml", struct {
Server1 string
Server2 string
}{s.whoami1IP, s.whoami2IP})
defer os.Remove(withHealthCheck)
cmd, display := s.traefikCmd(withConfigFile(withoutHealthCheck))
defer display(c)
err := cmd.Start()
c.Assert(err, checker.IsNil)
defer s.killCmd(cmd)
s.traefikCmd(withConfigFile(withoutHealthCheck))
// wait for traefik
err = try.GetRequest("http://127.0.0.1:8080/api/rawdata", 60*time.Second, try.BodyContains("Host(`root.localhost`)"))
c.Assert(err, checker.IsNil)
err := try.GetRequest("http://127.0.0.1:8080/api/rawdata", 60*time.Second, try.BodyContains("Host(`root.localhost`)"))
require.NoError(s.T(), err)
// Allow one of the underlying services on it to fail all servers HC (whoami2)
client := http.Client{
Timeout: 10 * time.Second,
}
statusOKReq, err := http.NewRequest(http.MethodPost, "http://"+s.whoami2IP+"/health", bytes.NewBufferString("500"))
c.Assert(err, checker.IsNil)
require.NoError(s.T(), err)
_, err = client.Do(statusOKReq)
c.Assert(err, checker.IsNil)
require.NoError(s.T(), err)
rootReq, err := http.NewRequest(http.MethodGet, "http://127.0.0.1:8000", nil)
c.Assert(err, checker.IsNil)
require.NoError(s.T(), err)
rootReq.Host = "root.localhost"
// Check the failed service (whoami2) is getting requests, but answer 500
err = try.Request(rootReq, 500*time.Millisecond, try.StatusCodeIs(http.StatusServiceUnavailable))
c.Assert(err, checker.IsNil)
require.NoError(s.T(), err)
// Enable the healthcheck on the root WSP (wsp-service1) and let Traefik reload the config
fr1, err := os.OpenFile(withoutHealthCheck, os.O_APPEND|os.O_WRONLY, 0o644)
c.Assert(fr1, checker.NotNil)
c.Assert(err, checker.IsNil)
assert.NotNil(s.T(), fr1)
require.NoError(s.T(), err)
err = fr1.Truncate(0)
c.Assert(err, checker.IsNil)
require.NoError(s.T(), err)
fr2, err := os.ReadFile(withHealthCheck)
c.Assert(err, checker.IsNil)
require.NoError(s.T(), err)
_, err = fmt.Fprint(fr1, string(fr2))
c.Assert(err, checker.IsNil)
require.NoError(s.T(), err)
err = fr1.Close()
c.Assert(err, checker.IsNil)
require.NoError(s.T(), err)
try.Sleep(1 * time.Second)
// Waiting for the reflected change.
err = try.Request(rootReq, 5*time.Second, try.StatusCodeIs(http.StatusOK))
require.NoError(s.T(), err)
// Check the failed service (whoami2) is not getting requests
wantIPs := []string{s.whoami1IP, s.whoami1IP, s.whoami1IP, s.whoami1IP}
for _, ip := range wantIPs {
want := "IP: " + ip
resp, err := client.Do(rootReq)
c.Assert(err, checker.IsNil)
body, err := io.ReadAll(resp.Body)
c.Assert(err, checker.IsNil)
c.Assert(string(body), checker.Contains, want)
err = try.Request(rootReq, 5*time.Second, try.StatusCodeIs(http.StatusOK), try.BodyContains("IP: "+ip))
require.NoError(s.T(), err)
}
}

View file

@ -2,27 +2,33 @@ package integration
import (
"net/http"
"testing"
"time"
"github.com/go-check/check"
"github.com/stretchr/testify/require"
"github.com/stretchr/testify/suite"
"github.com/traefik/traefik/v3/integration/try"
checker "github.com/vdemeester/shakers"
)
type HostResolverSuite struct{ BaseSuite }
func (s *HostResolverSuite) SetUpSuite(c *check.C) {
s.createComposeProject(c, "hostresolver")
s.composeUp(c)
func TestHostResolverSuite(t *testing.T) {
suite.Run(t, new(HostResolverSuite))
}
func (s *HostResolverSuite) TestSimpleConfig(c *check.C) {
cmd, display := s.traefikCmd(withConfigFile("fixtures/simple_hostresolver.toml"))
defer display(c)
func (s *HostResolverSuite) SetupSuite() {
s.BaseSuite.SetupSuite()
err := cmd.Start()
c.Assert(err, checker.IsNil)
defer s.killCmd(cmd)
s.createComposeProject("hostresolver")
s.composeUp()
}
func (s *HostResolverSuite) TearDownSuite() {
s.BaseSuite.TearDownSuite()
}
func (s *HostResolverSuite) TestSimpleConfig() {
s.traefikCmd(withConfigFile("fixtures/simple_hostresolver.toml"))
testCase := []struct {
desc string
@ -43,10 +49,10 @@ func (s *HostResolverSuite) TestSimpleConfig(c *check.C) {
for _, test := range testCase {
req, err := http.NewRequest(http.MethodGet, "http://127.0.0.1:8000/", nil)
c.Assert(err, checker.IsNil)
require.NoError(s.T(), err)
req.Host = test.host
err = try.Request(req, 1*time.Second, try.StatusCodeIs(test.status), try.HasBody())
c.Assert(err, checker.IsNil)
err = try.Request(req, 5*time.Second, try.StatusCodeIs(test.status), try.HasBody())
require.NoError(s.T(), err)
}
}

View file

@ -5,28 +5,27 @@ import (
"net"
"net/http"
"net/http/httptest"
"testing"
"time"
"github.com/go-check/check"
"github.com/stretchr/testify/require"
"github.com/stretchr/testify/suite"
"github.com/traefik/traefik/v3/integration/try"
"github.com/traefik/traefik/v3/pkg/config/dynamic"
checker "github.com/vdemeester/shakers"
)
type HTTPSuite struct{ BaseSuite }
func (s *HTTPSuite) TestSimpleConfiguration(c *check.C) {
cmd, display := s.traefikCmd(withConfigFile("fixtures/http/simple.toml"))
defer display(c)
func TestHTTPSuite(t *testing.T) {
suite.Run(t, new(HTTPSuite))
}
err := cmd.Start()
c.Assert(err, checker.IsNil)
defer s.killCmd(cmd)
func (s *HTTPSuite) TestSimpleConfiguration() {
s.traefikCmd(withConfigFile("fixtures/http/simple.toml"))
// Expect a 404 as we configured nothing.
err = try.GetRequest("http://127.0.0.1:8000/", time.Second, try.StatusCodeIs(http.StatusNotFound))
c.Assert(err, checker.IsNil)
err := try.GetRequest("http://127.0.0.1:8000/", time.Second, try.StatusCodeIs(http.StatusNotFound))
require.NoError(s.T(), err)
// Provide a configuration, fetched by Traefik provider.
configuration := &dynamic.Configuration{
@ -55,14 +54,14 @@ func (s *HTTPSuite) TestSimpleConfiguration(c *check.C) {
}
configData, err := json.Marshal(configuration)
c.Assert(err, checker.IsNil)
require.NoError(s.T(), err)
server := startTestServerWithResponse(configData)
defer server.Close()
// Expect configuration to be applied.
err = try.GetRequest("http://127.0.0.1:9090/api/rawdata", 3*time.Second, try.BodyContains("routerHTTP@http", "serviceHTTP@http", "http://bacon:80"))
c.Assert(err, checker.IsNil)
require.NoError(s.T(), err)
}
func startTestServerWithResponse(response []byte) (ts *httptest.Server) {

File diff suppressed because it is too large Load diff

View file

@ -7,208 +7,327 @@ import (
"errors"
"flag"
"fmt"
"io"
"io/fs"
stdlog "log"
"net/http"
"os"
"os/exec"
"path/filepath"
"regexp"
"slices"
"strings"
"testing"
"text/template"
"time"
"github.com/compose-spec/compose-go/cli"
"github.com/compose-spec/compose-go/types"
"github.com/docker/cli/cli/config/configfile"
"github.com/docker/compose/v2/cmd/formatter"
composeapi "github.com/docker/compose/v2/pkg/api"
"github.com/docker/compose/v2/pkg/compose"
dockertypes "github.com/docker/docker/api/types"
"github.com/docker/docker/api/types/filters"
"github.com/docker/docker/client"
"github.com/docker/docker/api/types/container"
"github.com/docker/docker/api/types/mount"
dockernetwork "github.com/docker/docker/api/types/network"
"github.com/fatih/structs"
"github.com/go-check/check"
"github.com/rs/zerolog"
"github.com/rs/zerolog/log"
"github.com/sirupsen/logrus"
"github.com/traefik/traefik/v3/pkg/logs"
checker "github.com/vdemeester/shakers"
"github.com/stretchr/testify/require"
"github.com/stretchr/testify/suite"
"github.com/testcontainers/testcontainers-go"
"github.com/testcontainers/testcontainers-go/network"
"github.com/traefik/traefik/v3/integration/try"
"gopkg.in/yaml.v3"
)
var (
integration = flag.Bool("integration", false, "run integration tests")
showLog = flag.Bool("tlog", false, "always show Traefik logs")
)
var showLog = flag.Bool("tlog", false, "always show Traefik logs")
func Test(t *testing.T) {
if !*integration {
log.Info().Msg("Integration tests disabled.")
return
type composeConfig struct {
Services map[string]composeService `yaml:"services"`
}
log.Logger = zerolog.New(zerolog.ConsoleWriter{Out: os.Stderr, TimeFormat: time.RFC3339}).
With().Timestamp().Caller().Logger()
zerolog.SetGlobalLevel(zerolog.InfoLevel)
// Global logrus replacement
logrus.StandardLogger().Out = logs.NoLevel(log.Logger, zerolog.DebugLevel)
// configure default standard log.
stdlog.SetFlags(stdlog.Lshortfile | stdlog.LstdFlags)
stdlog.SetOutput(logs.NoLevel(log.Logger, zerolog.DebugLevel))
// TODO(mpl): very niche optimization: do not start tailscale if none of the wanted tests actually need it (e.g. KeepAliveSuite does not).
var (
vpn *tailscaleNotSuite
useVPN bool
)
if os.Getenv("IN_DOCKER") != "true" {
if vpn = setupVPN(nil, "tailscale.secret"); vpn != nil {
defer vpn.TearDownSuite(nil)
useVPN = true
}
type composeService struct {
Image string `yaml:"image"`
Labels map[string]string `yaml:"labels"`
Hostname string `yaml:"hostname"`
Volumes []string `yaml:"volumes"`
CapAdd []string `yaml:"cap_add"`
Command []string `yaml:"command"`
Environment map[string]string `yaml:"environment"`
Privileged bool `yaml:"privileged"`
Deploy composeDeploy `yaml:"deploy"`
}
check.Suite(&AccessLogSuite{})
if !useVPN {
check.Suite(&AcmeSuite{})
}
check.Suite(&ConsulCatalogSuite{})
check.Suite(&ConsulSuite{})
check.Suite(&DockerComposeSuite{})
check.Suite(&DockerSuite{})
check.Suite(&ErrorPagesSuite{})
check.Suite(&EtcdSuite{})
check.Suite(&FileSuite{})
check.Suite(&GRPCSuite{})
check.Suite(&HeadersSuite{})
check.Suite(&HealthCheckSuite{})
check.Suite(&HostResolverSuite{})
check.Suite(&HTTPSSuite{})
check.Suite(&HTTPSuite{})
if !useVPN {
check.Suite(&K8sSuite{})
}
check.Suite(&KeepAliveSuite{})
check.Suite(&LogRotationSuite{})
if !useVPN {
check.Suite(&ProxyProtocolSuite{})
}
check.Suite(&RateLimitSuite{})
check.Suite(&RedisSuite{})
check.Suite(&RestSuite{})
check.Suite(&RetrySuite{})
check.Suite(&SimpleSuite{})
check.Suite(&TCPSuite{})
check.Suite(&TimeoutSuite{})
check.Suite(&ThrottlingSuite{})
check.Suite(&TLSClientHeadersSuite{})
check.Suite(&TracingSuite{})
check.Suite(&UDPSuite{})
check.Suite(&WebsocketSuite{})
check.Suite(&ZookeeperSuite{})
check.TestingT(t)
type composeDeploy struct {
Replicas int `yaml:"replicas"`
}
var traefikBinary = "../dist/traefik"
type BaseSuite struct {
composeProject *types.Project
dockerComposeService composeapi.Service
dockerClient *client.Client
suite.Suite
containers map[string]testcontainers.Container
network *testcontainers.DockerNetwork
hostIP string
}
func (s *BaseSuite) TearDownSuite(c *check.C) {
if s.composeProject != nil && s.dockerComposeService != nil {
s.composeDown(c)
func (s *BaseSuite) waitForTraefik(containerName string) {
time.Sleep(1 * time.Second)
// Wait for Traefik to turn ready.
req, err := http.NewRequest(http.MethodGet, "http://127.0.0.1:8080/api/rawdata", nil)
require.NoError(s.T(), err)
err = try.Request(req, 2*time.Second, try.StatusCodeIs(http.StatusOK), try.BodyContains(containerName))
require.NoError(s.T(), err)
}
func (s *BaseSuite) displayTraefikLogFile(path string) {
if s.T().Failed() {
if _, err := os.Stat(path); !os.IsNotExist(err) {
content, errRead := os.ReadFile(path)
// TODO TestName
// fmt.Printf("%s: Traefik logs: \n", c.TestName())
fmt.Print("Traefik logs: \n")
if errRead == nil {
fmt.Println(content)
} else {
fmt.Println(errRead)
}
} else {
// fmt.Printf("%s: No Traefik logs.\n", c.TestName())
fmt.Print("No Traefik logs.\n")
}
errRemove := os.Remove(path)
if errRemove != nil {
fmt.Println(errRemove)
}
}
}
func (s *BaseSuite) SetupSuite() {
// configure default standard log.
stdlog.SetFlags(stdlog.Lshortfile | stdlog.LstdFlags)
// TODO
// stdlog.SetOutput(log.Logger)
ctx := context.Background()
// Create docker network
// docker network create traefik-test-network --driver bridge --subnet 172.31.42.0/24
var opts []network.NetworkCustomizer
opts = append(opts, network.WithDriver("bridge"))
opts = append(opts, network.WithIPAM(&dockernetwork.IPAM{
Driver: "default",
Config: []dockernetwork.IPAMConfig{
{
Subnet: "172.31.42.0/24",
},
},
}))
dockerNetwork, err := network.New(ctx, opts...)
require.NoError(s.T(), err)
s.network = dockerNetwork
s.hostIP = "172.31.42.1"
if isDockerDesktop(ctx, s.T()) {
s.hostIP = getDockerDesktopHostIP(ctx, s.T())
s.setupVPN("tailscale.secret")
}
}
func getDockerDesktopHostIP(ctx context.Context, t *testing.T) string {
t.Helper()
req := testcontainers.ContainerRequest{
Image: "alpine",
HostConfigModifier: func(config *container.HostConfig) {
config.AutoRemove = true
},
Cmd: []string{"getent", "hosts", "host.docker.internal"},
}
con, err := testcontainers.GenericContainer(ctx, testcontainers.GenericContainerRequest{
ContainerRequest: req,
Started: true,
})
require.NoError(t, err)
closer, err := con.Logs(ctx)
require.NoError(t, err)
all, err := io.ReadAll(closer)
require.NoError(t, err)
ipRegex := regexp.MustCompile(`\b(?:\d{1,3}\.){3}\d{1,3}\b`)
matches := ipRegex.FindAllString(string(all), -1)
require.Len(t, matches, 1)
return matches[0]
}
func isDockerDesktop(ctx context.Context, t *testing.T) bool {
t.Helper()
cli, err := testcontainers.NewDockerClientWithOpts(ctx)
if err != nil {
t.Fatalf("failed to create docker client: %s", err)
}
info, err := cli.Info(ctx)
if err != nil {
t.Fatalf("failed to get docker info: %s", err)
}
return info.OperatingSystem == "Docker Desktop"
}
func (s *BaseSuite) TearDownSuite() {
s.composeDown()
err := try.Do(5*time.Second, func() error {
if s.network != nil {
err := s.network.Remove(context.Background())
if err != nil {
return err
}
}
return nil
})
require.NoError(s.T(), err)
}
// createComposeProject creates the docker compose project stored as a field in the BaseSuite.
// This method should be called before starting and/or stopping compose services.
func (s *BaseSuite) createComposeProject(c *check.C, name string) {
projectName := fmt.Sprintf("traefik-integration-test-%s", name)
func (s *BaseSuite) createComposeProject(name string) {
composeFile := fmt.Sprintf("resources/compose/%s.yml", name)
var err error
s.dockerClient, err = client.NewClientWithOpts()
c.Assert(err, checker.IsNil)
file, err := os.ReadFile(composeFile)
require.NoError(s.T(), err)
s.dockerComposeService = compose.NewComposeService(s.dockerClient, &configfile.ConfigFile{})
ops, err := cli.NewProjectOptions([]string{composeFile}, cli.WithName(projectName))
c.Assert(err, checker.IsNil)
var composeConfigData composeConfig
err = yaml.Unmarshal(file, &composeConfigData)
require.NoError(s.T(), err)
s.composeProject, err = cli.ProjectFromOptions(ops)
c.Assert(err, checker.IsNil)
if s.containers == nil {
s.containers = map[string]testcontainers.Container{}
}
ctx := context.Background()
for id, containerConfig := range composeConfigData.Services {
var mounts []mount.Mount
for _, volume := range containerConfig.Volumes {
split := strings.Split(volume, ":")
if len(split) != 2 {
continue
}
if strings.HasPrefix(split[0], "./") {
path, err := os.Getwd()
if err != nil {
log.Err(err).Msg("can't determine current directory")
continue
}
split[0] = strings.Replace(split[0], "./", path+"/", 1)
}
abs, err := filepath.Abs(split[0])
require.NoError(s.T(), err)
mounts = append(mounts, mount.Mount{Source: abs, Target: split[1], Type: mount.TypeBind})
}
if containerConfig.Deploy.Replicas > 0 {
for i := 0; i < containerConfig.Deploy.Replicas; i++ {
id = fmt.Sprintf("%s-%d", id, i+1)
con, err := s.createContainer(ctx, containerConfig, id, mounts)
require.NoError(s.T(), err)
s.containers[id] = con
}
continue
}
con, err := s.createContainer(ctx, containerConfig, id, mounts)
require.NoError(s.T(), err)
s.containers[id] = con
}
}
func (s *BaseSuite) createContainer(ctx context.Context, containerConfig composeService, id string, mounts []mount.Mount) (testcontainers.Container, error) {
req := testcontainers.ContainerRequest{
Image: containerConfig.Image,
Env: containerConfig.Environment,
Cmd: containerConfig.Command,
Labels: containerConfig.Labels,
Name: id,
Hostname: containerConfig.Hostname,
Privileged: containerConfig.Privileged,
Networks: []string{s.network.Name},
HostConfigModifier: func(config *container.HostConfig) {
if containerConfig.CapAdd != nil {
config.CapAdd = containerConfig.CapAdd
}
if !isDockerDesktop(ctx, s.T()) {
config.ExtraHosts = append(config.ExtraHosts, "host.docker.internal:"+s.hostIP)
}
config.Mounts = mounts
},
}
con, err := testcontainers.GenericContainer(ctx, testcontainers.GenericContainerRequest{
ContainerRequest: req,
Started: false,
})
return con, err
}
// composeUp starts the given services of the current docker compose project, if they are not already started.
// Already running services are not affected (i.e. not stopped).
func (s *BaseSuite) composeUp(c *check.C, services ...string) {
c.Assert(s.composeProject, check.NotNil)
c.Assert(s.dockerComposeService, check.NotNil)
// We use Create and Restart instead of Up, because the only option that actually works to control which containers
// are started is within the RestartOptions.
err := s.dockerComposeService.Create(context.Background(), s.composeProject, composeapi.CreateOptions{})
c.Assert(err, checker.IsNil)
err = s.dockerComposeService.Restart(context.Background(), s.composeProject, composeapi.RestartOptions{Services: services})
c.Assert(err, checker.IsNil)
func (s *BaseSuite) composeUp(services ...string) {
for name, con := range s.containers {
if len(services) == 0 || slices.Contains(services, name) {
err := con.Start(context.Background())
require.NoError(s.T(), err)
}
}
// composeExec runs the command in the given args in the given compose service container.
// Already running services are not affected (i.e. not stopped).
func (s *BaseSuite) composeExec(c *check.C, service string, args ...string) {
c.Assert(s.composeProject, check.NotNil)
c.Assert(s.dockerComposeService, check.NotNil)
_, err := s.dockerComposeService.Exec(context.Background(), s.composeProject.Name, composeapi.RunOptions{
Service: service,
Stdin: os.Stdin,
Stdout: os.Stdout,
Stderr: os.Stderr,
Command: args,
Tty: false,
Index: 1,
})
c.Assert(err, checker.IsNil)
}
// composeStop stops the given services of the current docker compose project and removes the corresponding containers.
func (s *BaseSuite) composeStop(c *check.C, services ...string) {
c.Assert(s.dockerComposeService, check.NotNil)
c.Assert(s.composeProject, check.NotNil)
err := s.dockerComposeService.Stop(context.Background(), s.composeProject, composeapi.StopOptions{Services: services})
c.Assert(err, checker.IsNil)
err = s.dockerComposeService.Remove(context.Background(), s.composeProject, composeapi.RemoveOptions{
Services: services,
Force: true,
})
c.Assert(err, checker.IsNil)
func (s *BaseSuite) composeStop(services ...string) {
for name, con := range s.containers {
if len(services) == 0 || slices.Contains(services, name) {
timeout := 10 * time.Second
err := con.Stop(context.Background(), &timeout)
require.NoError(s.T(), err)
}
}
}
// composeDown stops all compose project services and removes the corresponding containers.
func (s *BaseSuite) composeDown(c *check.C) {
c.Assert(s.dockerComposeService, check.NotNil)
c.Assert(s.composeProject, check.NotNil)
err := s.dockerComposeService.Down(context.Background(), s.composeProject.Name, composeapi.DownOptions{})
c.Assert(err, checker.IsNil)
func (s *BaseSuite) composeDown() {
for _, c := range s.containers {
err := c.Terminate(context.Background())
require.NoError(s.T(), err)
}
s.containers = map[string]testcontainers.Container{}
}
func (s *BaseSuite) cmdTraefik(args ...string) (*exec.Cmd, *bytes.Buffer) {
cmd := exec.Command(traefikBinary, args...)
s.T().Cleanup(func() {
s.killCmd(cmd)
})
var out bytes.Buffer
cmd.Stdout = &out
cmd.Stderr = &out
err := cmd.Start()
require.NoError(s.T(), err)
return cmd, &out
}
func (s *BaseSuite) killCmd(cmd *exec.Cmd) {
if cmd.Process == nil {
log.Error().Msg("No process to kill")
return
}
err := cmd.Process.Kill()
if err != nil {
log.Error().Err(err).Msg("Kill")
@ -217,15 +336,18 @@ func (s *BaseSuite) killCmd(cmd *exec.Cmd) {
time.Sleep(100 * time.Millisecond)
}
func (s *BaseSuite) traefikCmd(args ...string) (*exec.Cmd, func(*check.C)) {
func (s *BaseSuite) traefikCmd(args ...string) *exec.Cmd {
cmd, out := s.cmdTraefik(args...)
return cmd, func(c *check.C) {
if c.Failed() || *showLog {
s.T().Cleanup(func() {
if s.T().Failed() || *showLog {
s.displayLogK3S()
s.displayLogCompose(c)
s.displayTraefikLog(c, out)
}
s.displayLogCompose()
s.displayTraefikLog(out)
}
})
return cmd
}
func (s *BaseSuite) displayLogK3S() {
@ -242,30 +364,33 @@ func (s *BaseSuite) displayLogK3S() {
log.Print()
}
func (s *BaseSuite) displayLogCompose(c *check.C) {
if s.dockerComposeService == nil || s.composeProject == nil {
log.Info().Str("testName", c.TestName()).Msg("No docker compose logs.")
return
func (s *BaseSuite) displayLogCompose() {
for name, ctn := range s.containers {
readCloser, err := ctn.Logs(context.Background())
require.NoError(s.T(), err)
for {
b := make([]byte, 1024)
_, err := readCloser.Read(b)
if errors.Is(err, io.EOF) {
break
}
require.NoError(s.T(), err)
trimLogs := bytes.Trim(bytes.TrimSpace(b), string([]byte{0}))
if len(trimLogs) > 0 {
log.Info().Str("container", name).Msg(string(trimLogs))
}
}
}
}
log.Info().Str("testName", c.TestName()).Msg("docker compose logs")
logConsumer := formatter.NewLogConsumer(context.Background(), logs.NoLevel(log.Logger, zerolog.InfoLevel), false, true)
err := s.dockerComposeService.Logs(context.Background(), s.composeProject.Name, logConsumer, composeapi.LogOptions{})
c.Assert(err, checker.IsNil)
log.Print()
log.Print("################################")
log.Print()
}
func (s *BaseSuite) displayTraefikLog(c *check.C, output *bytes.Buffer) {
func (s *BaseSuite) displayTraefikLog(output *bytes.Buffer) {
if output == nil || output.Len() == 0 {
log.Info().Str("testName", c.TestName()).Msg("No Traefik logs.")
log.Info().Msg("No Traefik logs.")
} else {
log.Info().Str("testName", c.TestName()).
Str("logs", output.String()).Msg("Traefik logs")
for _, line := range strings.Split(output.String(), "\n") {
log.Info().Msg(line)
}
}
}
@ -279,67 +404,47 @@ func (s *BaseSuite) getDockerHost() string {
return dockerHost
}
func (s *BaseSuite) adaptFile(c *check.C, path string, tempObjects interface{}) string {
func (s *BaseSuite) adaptFile(path string, tempObjects interface{}) string {
// Load file
tmpl, err := template.ParseFiles(path)
c.Assert(err, checker.IsNil)
require.NoError(s.T(), err)
folder, prefix := filepath.Split(path)
tmpFile, err := os.CreateTemp(folder, strings.TrimSuffix(prefix, filepath.Ext(prefix))+"_*"+filepath.Ext(prefix))
c.Assert(err, checker.IsNil)
require.NoError(s.T(), err)
defer tmpFile.Close()
model := structs.Map(tempObjects)
model["SelfFilename"] = tmpFile.Name()
err = tmpl.ExecuteTemplate(tmpFile, prefix, model)
c.Assert(err, checker.IsNil)
require.NoError(s.T(), err)
err = tmpFile.Sync()
c.Assert(err, checker.IsNil)
require.NoError(s.T(), err)
s.T().Cleanup(func() {
os.Remove(tmpFile.Name())
})
return tmpFile.Name()
}
func (s *BaseSuite) getComposeServiceIP(c *check.C, name string) string {
filter := filters.NewArgs(
filters.Arg("label", fmt.Sprintf("%s=%s", composeapi.ProjectLabel, s.composeProject.Name)),
filters.Arg("label", fmt.Sprintf("%s=%s", composeapi.ServiceLabel, name)),
)
containers, err := s.dockerClient.ContainerList(context.Background(), dockertypes.ContainerListOptions{Filters: filter})
c.Assert(err, checker.IsNil)
c.Assert(containers, checker.HasLen, 1)
networkNames := s.composeProject.NetworkNames()
c.Assert(networkNames, checker.HasLen, 1)
network := s.composeProject.Networks[networkNames[0]]
return containers[0].NetworkSettings.Networks[network.Name].IPAddress
}
func (s *BaseSuite) getContainerIP(c *check.C, name string) string {
container, err := s.dockerClient.ContainerInspect(context.Background(), name)
c.Assert(err, checker.IsNil)
c.Assert(container.NetworkSettings.Networks, check.NotNil)
for _, network := range container.NetworkSettings.Networks {
return network.IPAddress
}
// Should never happen.
c.Error("No network found")
func (s *BaseSuite) getComposeServiceIP(name string) string {
container, ok := s.containers[name]
if !ok {
return ""
}
ip, err := container.ContainerIP(context.Background())
if err != nil {
return ""
}
return ip
}
func withConfigFile(file string) string {
return "--configFile=" + file
}
// tailscaleNotSuite includes a BaseSuite out of convenience, so we can benefit
// from composeUp et co., but it is not meant to function as a TestSuite per se.
type tailscaleNotSuite struct{ BaseSuite }
// setupVPN starts Tailscale on the corresponding container, and makes it a subnet
// router, for all the other containers (whoamis, etc) subsequently started for the
// integration tests.
@ -355,25 +460,35 @@ type tailscaleNotSuite struct{ BaseSuite }
// "172.0.0.0/8": ["your_tailscale_identity"],
// },
// },
//
// TODO(mpl): we could maybe even move this setup to the Makefile, to start it
// and let it run (forever, or until voluntarily stopped).
func setupVPN(c *check.C, keyFile string) *tailscaleNotSuite {
func (s *BaseSuite) setupVPN(keyFile string) {
data, err := os.ReadFile(keyFile)
if err != nil {
if !errors.Is(err, fs.ErrNotExist) {
log.Fatal().Err(err).Send()
log.Error().Err(err).Send()
}
return nil
return
}
authKey := strings.TrimSpace(string(data))
// TODO: copy and create versions that don't need a check.C?
vpn := &tailscaleNotSuite{}
vpn.createComposeProject(c, "tailscale")
vpn.composeUp(c)
// // TODO: copy and create versions that don't need a check.C?
s.createComposeProject("tailscale")
s.composeUp()
time.Sleep(5 * time.Second)
// If we ever change the docker subnet in the Makefile,
// we need to change this one below correspondingly.
vpn.composeExec(c, "tailscaled", "tailscale", "up", "--authkey="+authKey, "--advertise-routes=172.31.42.0/24")
return vpn
s.composeExec("tailscaled", "tailscale", "up", "--authkey="+authKey, "--advertise-routes=172.31.42.0/24")
}
// composeExec runs the command in the given args in the given compose service container.
// Already running services are not affected (i.e. not stopped).
func (s *BaseSuite) composeExec(service string, args ...string) string {
require.Contains(s.T(), s.containers, service)
_, reader, err := s.containers[service].Exec(context.Background(), args)
require.NoError(s.T(), err)
content, err := io.ReadAll(reader)
require.NoError(s.T(), err)
return string(content)
}

View file

@ -7,18 +7,21 @@ import (
"flag"
"fmt"
"io"
"net"
"net/http"
"os"
"path/filepath"
"regexp"
"strings"
"testing"
"time"
"github.com/go-check/check"
"github.com/pmezard/go-difflib/difflib"
"github.com/rs/zerolog/log"
"github.com/stretchr/testify/require"
"github.com/stretchr/testify/suite"
"github.com/traefik/traefik/v3/integration/try"
"github.com/traefik/traefik/v3/pkg/api"
checker "github.com/vdemeester/shakers"
)
var updateExpected = flag.Bool("update_expected", false, "Update expected files in testdata")
@ -26,25 +29,39 @@ var updateExpected = flag.Bool("update_expected", false, "Update expected files
// K8sSuite tests suite.
type K8sSuite struct{ BaseSuite }
func (s *K8sSuite) SetUpSuite(c *check.C) {
s.createComposeProject(c, "k8s")
s.composeUp(c)
func TestK8sSuite(t *testing.T) {
suite.Run(t, new(K8sSuite))
}
func (s *K8sSuite) SetupSuite() {
s.BaseSuite.SetupSuite()
s.createComposeProject("k8s")
s.composeUp()
abs, err := filepath.Abs("./fixtures/k8s/config.skip/kubeconfig.yaml")
c.Assert(err, checker.IsNil)
require.NoError(s.T(), err)
err = try.Do(60*time.Second, func() error {
_, err := os.Stat(abs)
return err
})
c.Assert(err, checker.IsNil)
require.NoError(s.T(), err)
data, err := os.ReadFile(abs)
require.NoError(s.T(), err)
content := strings.ReplaceAll(string(data), "https://server:6443", fmt.Sprintf("https://%s", net.JoinHostPort(s.getComposeServiceIP("server"), "6443")))
err = os.WriteFile(abs, []byte(content), 0o644)
require.NoError(s.T(), err)
err = os.Setenv("KUBECONFIG", abs)
c.Assert(err, checker.IsNil)
require.NoError(s.T(), err)
}
func (s *K8sSuite) TearDownSuite(c *check.C) {
s.composeDown(c)
func (s *K8sSuite) TearDownSuite() {
s.BaseSuite.TearDownSuite()
generatedFiles := []string{
"./fixtures/k8s/config.skip/kubeconfig.yaml",
@ -62,120 +79,84 @@ func (s *K8sSuite) TearDownSuite(c *check.C) {
}
}
func (s *K8sSuite) TestIngressConfiguration(c *check.C) {
cmd, display := s.traefikCmd(withConfigFile("fixtures/k8s_default.toml"))
defer display(c)
func (s *K8sSuite) TestIngressConfiguration() {
s.traefikCmd(withConfigFile("fixtures/k8s_default.toml"))
err := cmd.Start()
c.Assert(err, checker.IsNil)
defer s.killCmd(cmd)
testConfiguration(c, "testdata/rawdata-ingress.json", "8080")
s.testConfiguration("testdata/rawdata-ingress.json", "8080")
}
func (s *K8sSuite) TestIngressLabelSelector(c *check.C) {
cmd, display := s.traefikCmd(withConfigFile("fixtures/k8s_ingress_label_selector.toml"))
defer display(c)
func (s *K8sSuite) TestIngressLabelSelector() {
s.traefikCmd(withConfigFile("fixtures/k8s_ingress_label_selector.toml"))
err := cmd.Start()
c.Assert(err, checker.IsNil)
defer s.killCmd(cmd)
testConfiguration(c, "testdata/rawdata-ingress-label-selector.json", "8080")
s.testConfiguration("testdata/rawdata-ingress-label-selector.json", "8080")
}
func (s *K8sSuite) TestCRDConfiguration(c *check.C) {
cmd, display := s.traefikCmd(withConfigFile("fixtures/k8s_crd.toml"))
defer display(c)
func (s *K8sSuite) TestCRDConfiguration() {
s.traefikCmd(withConfigFile("fixtures/k8s_crd.toml"))
err := cmd.Start()
c.Assert(err, checker.IsNil)
defer s.killCmd(cmd)
testConfiguration(c, "testdata/rawdata-crd.json", "8000")
s.testConfiguration("testdata/rawdata-crd.json", "8000")
}
func (s *K8sSuite) TestCRDLabelSelector(c *check.C) {
cmd, display := s.traefikCmd(withConfigFile("fixtures/k8s_crd_label_selector.toml"))
defer display(c)
func (s *K8sSuite) TestCRDLabelSelector() {
s.traefikCmd(withConfigFile("fixtures/k8s_crd_label_selector.toml"))
err := cmd.Start()
c.Assert(err, checker.IsNil)
defer s.killCmd(cmd)
testConfiguration(c, "testdata/rawdata-crd-label-selector.json", "8000")
s.testConfiguration("testdata/rawdata-crd-label-selector.json", "8000")
}
func (s *K8sSuite) TestGatewayConfiguration(c *check.C) {
cmd, display := s.traefikCmd(withConfigFile("fixtures/k8s_gateway.toml"))
defer display(c)
func (s *K8sSuite) TestGatewayConfiguration() {
s.traefikCmd(withConfigFile("fixtures/k8s_gateway.toml"))
err := cmd.Start()
c.Assert(err, checker.IsNil)
defer s.killCmd(cmd)
testConfiguration(c, "testdata/rawdata-gateway.json", "8080")
s.testConfiguration("testdata/rawdata-gateway.json", "8080")
}
func (s *K8sSuite) TestIngressclass(c *check.C) {
cmd, display := s.traefikCmd(withConfigFile("fixtures/k8s_ingressclass.toml"))
defer display(c)
func (s *K8sSuite) TestIngressclass() {
s.traefikCmd(withConfigFile("fixtures/k8s_ingressclass.toml"))
err := cmd.Start()
c.Assert(err, checker.IsNil)
defer s.killCmd(cmd)
testConfiguration(c, "testdata/rawdata-ingressclass.json", "8080")
s.testConfiguration("testdata/rawdata-ingressclass.json", "8080")
}
func (s *K8sSuite) TestDisableIngressclassLookup(c *check.C) {
cmd, display := s.traefikCmd(withConfigFile("fixtures/k8s_ingressclass_disabled.toml"))
defer display(c)
func (s *K8sSuite) TestDisableIngressclassLookup() {
s.traefikCmd(withConfigFile("fixtures/k8s_ingressclass_disabled.toml"))
err := cmd.Start()
c.Assert(err, checker.IsNil)
defer s.killCmd(cmd)
testConfiguration(c, "testdata/rawdata-ingressclass-disabled.json", "8080")
s.testConfiguration("testdata/rawdata-ingressclass-disabled.json", "8080")
}
func testConfiguration(c *check.C, path, apiPort string) {
func (s *K8sSuite) testConfiguration(path, apiPort string) {
err := try.GetRequest("http://127.0.0.1:"+apiPort+"/api/entrypoints", 20*time.Second, try.BodyContains(`"name":"web"`))
c.Assert(err, checker.IsNil)
require.NoError(s.T(), err)
expectedJSON := filepath.FromSlash(path)
if *updateExpected {
fi, err := os.Create(expectedJSON)
c.Assert(err, checker.IsNil)
require.NoError(s.T(), err)
err = fi.Close()
c.Assert(err, checker.IsNil)
require.NoError(s.T(), err)
}
var buf bytes.Buffer
err = try.GetRequest("http://127.0.0.1:"+apiPort+"/api/rawdata", 1*time.Minute, try.StatusCodeIs(http.StatusOK), matchesConfig(expectedJSON, &buf))
if !*updateExpected {
if err != nil {
c.Error(err)
}
require.NoError(s.T(), err)
return
}
if err != nil {
c.Logf("In file update mode, got expected error: %v", err)
log.Info().Msgf("In file update mode, got expected error: %v", err)
}
var rtRepr api.RunTimeRepresentation
err = json.Unmarshal(buf.Bytes(), &rtRepr)
c.Assert(err, checker.IsNil)
require.NoError(s.T(), err)
newJSON, err := json.MarshalIndent(rtRepr, "", "\t")
c.Assert(err, checker.IsNil)
require.NoError(s.T(), err)
err = os.WriteFile(expectedJSON, newJSON, 0o644)
c.Assert(err, checker.IsNil)
c.Errorf("We do not want a passing test in file update mode")
require.NoError(s.T(), err)
s.T().Fatal("We do not want a passing test in file update mode")
}
func matchesConfig(wantConfig string, buf *bytes.Buffer) try.ResponseCondition {

View file

@ -5,18 +5,23 @@ import (
"net"
"net/http"
"net/http/httptest"
"os"
"testing"
"time"
"github.com/go-check/check"
"github.com/rs/zerolog/log"
"github.com/stretchr/testify/require"
"github.com/stretchr/testify/suite"
"github.com/traefik/traefik/v3/integration/try"
checker "github.com/vdemeester/shakers"
)
type KeepAliveSuite struct {
BaseSuite
}
func TestKeepAliveSuite(t *testing.T) {
suite.Run(t, new(KeepAliveSuite))
}
type KeepAliveConfig struct {
KeepAliveServer string
IdleConnTimeout string
@ -27,7 +32,7 @@ type connStateChangeEvent struct {
state http.ConnState
}
func (s *KeepAliveSuite) TestShouldRespectConfiguredBackendHttpKeepAliveTime(c *check.C) {
func (s *KeepAliveSuite) TestShouldRespectConfiguredBackendHttpKeepAliveTime() {
idleTimeout := time.Duration(75) * time.Millisecond
connStateChanges := make(chan connStateChangeEvent)
@ -59,18 +64,18 @@ func (s *KeepAliveSuite) TestShouldRespectConfiguredBackendHttpKeepAliveTime(c *
case <-noMoreRequests:
moreRequestsExpected = false
case <-maxWaitTimeExceeded:
c.Logf("timeout waiting for all connections to close, waited for %v, configured idle timeout was %v", maxWaitDuration, idleTimeout)
c.Fail()
log.Info().Msgf("timeout waiting for all connections to close, waited for %v, configured idle timeout was %v", maxWaitDuration, idleTimeout)
s.T().Fail()
close(completed)
return
}
}
c.Check(connCount, checker.Equals, 1)
require.Equal(s.T(), 1, connCount)
for _, idlePeriod := range idlePeriodLengthMap {
// Our method of measuring the actual idle period is not precise, so allow some sub-ms deviation
c.Check(math.Round(idlePeriod.Seconds()), checker.LessOrEqualThan, idleTimeout.Seconds())
require.LessOrEqual(s.T(), math.Round(idlePeriod.Seconds()), idleTimeout.Seconds())
}
close(completed)
@ -87,22 +92,16 @@ func (s *KeepAliveSuite) TestShouldRespectConfiguredBackendHttpKeepAliveTime(c *
defer server.Close()
config := KeepAliveConfig{KeepAliveServer: server.URL, IdleConnTimeout: idleTimeout.String()}
file := s.adaptFile(c, "fixtures/timeout/keepalive.toml", config)
file := s.adaptFile("fixtures/timeout/keepalive.toml", config)
defer os.Remove(file)
cmd, display := s.traefikCmd(withConfigFile(file))
defer display(c)
err := cmd.Start()
c.Check(err, checker.IsNil)
defer s.killCmd(cmd)
s.traefikCmd(withConfigFile(file))
// Wait for Traefik
err = try.GetRequest("http://127.0.0.1:8080/api/rawdata", time.Duration(1)*time.Second, try.StatusCodeIs(200), try.BodyContains("PathPrefix(`/keepalive`)"))
c.Check(err, checker.IsNil)
err := try.GetRequest("http://127.0.0.1:8080/api/rawdata", time.Duration(1)*time.Second, try.StatusCodeIs(200), try.BodyContains("PathPrefix(`/keepalive`)"))
require.NoError(s.T(), err)
err = try.GetRequest("http://127.0.0.1:8000/keepalive", time.Duration(1)*time.Second, try.StatusCodeIs(200))
c.Check(err, checker.IsNil)
require.NoError(s.T(), err)
close(noMoreRequests)
<-completed

View file

@ -9,12 +9,14 @@ import (
"os"
"strings"
"syscall"
"testing"
"time"
"github.com/go-check/check"
"github.com/rs/zerolog/log"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
"github.com/stretchr/testify/suite"
"github.com/traefik/traefik/v3/integration/try"
checker "github.com/vdemeester/shakers"
)
const traefikTestAccessLogFileRotated = traefikTestAccessLogFile + ".rotated"
@ -22,13 +24,23 @@ const traefikTestAccessLogFileRotated = traefikTestAccessLogFile + ".rotated"
// Log rotation integration test suite.
type LogRotationSuite struct{ BaseSuite }
func (s *LogRotationSuite) SetUpSuite(c *check.C) {
s.createComposeProject(c, "access_log")
s.composeUp(c)
func TestLogRorationSuite(t *testing.T) {
suite.Run(t, new(LogRotationSuite))
}
func (s *LogRotationSuite) TearDownSuite(c *check.C) {
s.composeDown(c)
func (s *LogRotationSuite) SetupSuite() {
s.BaseSuite.SetupSuite()
os.Remove(traefikTestAccessLogFile)
os.Remove(traefikTestLogFile)
os.Remove(traefikTestAccessLogFileRotated)
s.createComposeProject("access_log")
s.composeUp()
}
func (s *LogRotationSuite) TearDownSuite() {
s.BaseSuite.TearDownSuite()
generatedFiles := []string{
traefikTestLogFile,
@ -43,84 +55,80 @@ func (s *LogRotationSuite) TearDownSuite(c *check.C) {
}
}
func (s *LogRotationSuite) TestAccessLogRotation(c *check.C) {
func (s *LogRotationSuite) TestAccessLogRotation() {
// Start Traefik
cmd, display := s.traefikCmd(withConfigFile("fixtures/access_log_config.toml"))
defer display(c)
defer displayTraefikLogFile(c, traefikTestLogFile)
err := cmd.Start()
c.Assert(err, checker.IsNil)
defer s.killCmd(cmd)
cmd, _ := s.cmdTraefik(withConfigFile("fixtures/access_log_config.toml"))
defer s.displayTraefikLogFile(traefikTestLogFile)
// Verify Traefik started ok
verifyEmptyErrorLog(c, "traefik.log")
s.verifyEmptyErrorLog("traefik.log")
waitForTraefik(c, "server1")
s.waitForTraefik("server1")
// Make some requests
req, err := http.NewRequest(http.MethodGet, "http://127.0.0.1:8000/", nil)
c.Assert(err, checker.IsNil)
require.NoError(s.T(), err)
req.Host = "frontend1.docker.local"
err = try.Request(req, 500*time.Millisecond, try.StatusCodeIs(http.StatusOK), try.HasBody())
c.Assert(err, checker.IsNil)
require.NoError(s.T(), err)
// Rename access log
err = os.Rename(traefikTestAccessLogFile, traefikTestAccessLogFileRotated)
c.Assert(err, checker.IsNil)
require.NoError(s.T(), err)
// in the midst of the requests, issue SIGUSR1 signal to server process
err = cmd.Process.Signal(syscall.SIGUSR1)
c.Assert(err, checker.IsNil)
require.NoError(s.T(), err)
// continue issuing requests
err = try.Request(req, 500*time.Millisecond, try.StatusCodeIs(http.StatusOK), try.HasBody())
c.Assert(err, checker.IsNil)
require.NoError(s.T(), err)
err = try.Request(req, 500*time.Millisecond, try.StatusCodeIs(http.StatusOK), try.HasBody())
c.Assert(err, checker.IsNil)
require.NoError(s.T(), err)
// Verify access.log.rotated output as expected
logAccessLogFile(c, traefikTestAccessLogFileRotated)
lineCount := verifyLogLines(c, traefikTestAccessLogFileRotated, 0, true)
c.Assert(lineCount, checker.GreaterOrEqualThan, 1)
s.logAccessLogFile(traefikTestAccessLogFileRotated)
lineCount := s.verifyLogLines(traefikTestAccessLogFileRotated, 0, true)
assert.GreaterOrEqual(s.T(), lineCount, 1)
// make sure that the access log file is at least created before we do assertions on it
err = try.Do(1*time.Second, func() error {
_, err := os.Stat(traefikTestAccessLogFile)
return err
})
c.Assert(err, checker.IsNil, check.Commentf("access log file was not created in time"))
assert.NoError(s.T(), err, "access log file was not created in time")
// Verify access.log output as expected
logAccessLogFile(c, traefikTestAccessLogFile)
lineCount = verifyLogLines(c, traefikTestAccessLogFile, lineCount, true)
c.Assert(lineCount, checker.Equals, 3)
s.logAccessLogFile(traefikTestAccessLogFile)
lineCount = s.verifyLogLines(traefikTestAccessLogFile, lineCount, true)
assert.Equal(s.T(), 3, lineCount)
verifyEmptyErrorLog(c, traefikTestLogFile)
s.verifyEmptyErrorLog(traefikTestLogFile)
}
func logAccessLogFile(c *check.C, fileName string) {
func (s *LogRotationSuite) logAccessLogFile(fileName string) {
output, err := os.ReadFile(fileName)
c.Assert(err, checker.IsNil)
c.Logf("Contents of file %s\n%s", fileName, string(output))
require.NoError(s.T(), err)
log.Info().Msgf("Contents of file %s\n%s", fileName, string(output))
}
func verifyEmptyErrorLog(c *check.C, name string) {
func (s *LogRotationSuite) verifyEmptyErrorLog(name string) {
err := try.Do(5*time.Second, func() error {
traefikLog, e2 := os.ReadFile(name)
if e2 != nil {
return e2
}
c.Assert(string(traefikLog), checker.HasLen, 0)
assert.Empty(s.T(), string(traefikLog))
return nil
})
c.Assert(err, checker.IsNil)
require.NoError(s.T(), err)
}
func verifyLogLines(c *check.C, fileName string, countInit int, accessLog bool) int {
func (s *LogRotationSuite) verifyLogLines(fileName string, countInit int, accessLog bool) int {
rotated, err := os.Open(fileName)
c.Assert(err, checker.IsNil)
require.NoError(s.T(), err)
rotatedLog := bufio.NewScanner(rotated)
count := countInit
for rotatedLog.Scan() {
@ -128,7 +136,7 @@ func verifyLogLines(c *check.C, fileName string, countInit int, accessLog bool)
if accessLog {
if len(line) > 0 {
if !strings.Contains(line, "/api/rawdata") {
CheckAccessLogFormat(c, line, count)
s.CheckAccessLogFormat(line, count)
count++
}
}

View file

@ -1,103 +1,138 @@
package integration
import (
"net/http"
"os"
"bufio"
"net"
"testing"
"time"
"github.com/go-check/check"
"github.com/pires/go-proxyproto"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
"github.com/stretchr/testify/suite"
"github.com/traefik/traefik/v3/integration/try"
checker "github.com/vdemeester/shakers"
)
type ProxyProtocolSuite struct {
BaseSuite
gatewayIP string
haproxyIP string
whoamiIP string
}
func (s *ProxyProtocolSuite) SetUpSuite(c *check.C) {
s.createComposeProject(c, "proxy-protocol")
s.composeUp(c)
s.gatewayIP = s.getContainerIP(c, "traefik")
s.haproxyIP = s.getComposeServiceIP(c, "haproxy")
s.whoamiIP = s.getComposeServiceIP(c, "whoami")
func TestProxyProtocolSuite(t *testing.T) {
suite.Run(t, new(ProxyProtocolSuite))
}
func (s *ProxyProtocolSuite) TestProxyProtocolTrusted(c *check.C) {
file := s.adaptFile(c, "fixtures/proxy-protocol/with.toml", struct {
func (s *ProxyProtocolSuite) SetupSuite() {
s.BaseSuite.SetupSuite()
s.createComposeProject("proxy-protocol")
s.composeUp()
s.whoamiIP = s.getComposeServiceIP("whoami")
}
func (s *ProxyProtocolSuite) TearDownSuite() {
s.BaseSuite.TearDownSuite()
}
func (s *ProxyProtocolSuite) TestProxyProtocolTrusted() {
file := s.adaptFile("fixtures/proxy-protocol/proxy-protocol.toml", struct {
HaproxyIP string
WhoamiIP string
}{HaproxyIP: s.haproxyIP, WhoamiIP: s.whoamiIP})
defer os.Remove(file)
}{WhoamiIP: s.whoamiIP})
cmd, display := s.traefikCmd(withConfigFile(file))
defer display(c)
err := cmd.Start()
c.Assert(err, checker.IsNil)
defer s.killCmd(cmd)
s.traefikCmd(withConfigFile(file))
err = try.GetRequest("http://"+s.haproxyIP+"/whoami", 1*time.Second,
try.StatusCodeIs(http.StatusOK),
try.BodyContains("X-Forwarded-For: "+s.gatewayIP))
c.Assert(err, checker.IsNil)
err := try.GetRequest("http://127.0.0.1:8000/whoami", 10*time.Second)
require.NoError(s.T(), err)
content, err := proxyProtoRequest("127.0.0.1:8000", 1)
require.NoError(s.T(), err)
assert.Contains(s.T(), content, "X-Forwarded-For: 1.2.3.4")
content, err = proxyProtoRequest("127.0.0.1:8000", 2)
require.NoError(s.T(), err)
assert.Contains(s.T(), content, "X-Forwarded-For: 1.2.3.4")
}
func (s *ProxyProtocolSuite) TestProxyProtocolV2Trusted(c *check.C) {
file := s.adaptFile(c, "fixtures/proxy-protocol/with.toml", struct {
func (s *ProxyProtocolSuite) TestProxyProtocolNotTrusted() {
file := s.adaptFile("fixtures/proxy-protocol/proxy-protocol.toml", struct {
HaproxyIP string
WhoamiIP string
}{HaproxyIP: s.haproxyIP, WhoamiIP: s.whoamiIP})
defer os.Remove(file)
}{WhoamiIP: s.whoamiIP})
cmd, display := s.traefikCmd(withConfigFile(file))
defer display(c)
err := cmd.Start()
c.Assert(err, checker.IsNil)
defer s.killCmd(cmd)
s.traefikCmd(withConfigFile(file))
err = try.GetRequest("http://"+s.haproxyIP+":81/whoami", 1*time.Second,
try.StatusCodeIs(http.StatusOK),
try.BodyContains("X-Forwarded-For: "+s.gatewayIP))
c.Assert(err, checker.IsNil)
err := try.GetRequest("http://127.0.0.1:9000/whoami", 10*time.Second)
require.NoError(s.T(), err)
content, err := proxyProtoRequest("127.0.0.1:9000", 1)
require.NoError(s.T(), err)
assert.Contains(s.T(), content, "X-Forwarded-For: 127.0.0.1")
content, err = proxyProtoRequest("127.0.0.1:9000", 2)
require.NoError(s.T(), err)
assert.Contains(s.T(), content, "X-Forwarded-For: 127.0.0.1")
}
func (s *ProxyProtocolSuite) TestProxyProtocolNotTrusted(c *check.C) {
file := s.adaptFile(c, "fixtures/proxy-protocol/without.toml", struct {
HaproxyIP string
WhoamiIP string
}{HaproxyIP: s.haproxyIP, WhoamiIP: s.whoamiIP})
defer os.Remove(file)
func proxyProtoRequest(address string, version byte) (string, error) {
// Open a TCP connection to the server
conn, err := net.Dial("tcp", address)
if err != nil {
return "", err
}
defer conn.Close()
cmd, display := s.traefikCmd(withConfigFile(file))
defer display(c)
err := cmd.Start()
c.Assert(err, checker.IsNil)
defer s.killCmd(cmd)
err = try.GetRequest("http://"+s.haproxyIP+"/whoami", 1*time.Second,
try.StatusCodeIs(http.StatusOK),
try.BodyContains("X-Forwarded-For: "+s.haproxyIP))
c.Assert(err, checker.IsNil)
// Create a Proxy Protocol header with v1
proxyHeader := &proxyproto.Header{
Version: version,
Command: proxyproto.PROXY,
TransportProtocol: proxyproto.TCPv4,
DestinationAddr: &net.TCPAddr{
IP: net.ParseIP("127.0.0.1"),
Port: 8000,
},
SourceAddr: &net.TCPAddr{
IP: net.ParseIP("1.2.3.4"),
Port: 62541,
},
}
func (s *ProxyProtocolSuite) TestProxyProtocolV2NotTrusted(c *check.C) {
file := s.adaptFile(c, "fixtures/proxy-protocol/without.toml", struct {
HaproxyIP string
WhoamiIP string
}{HaproxyIP: s.haproxyIP, WhoamiIP: s.whoamiIP})
defer os.Remove(file)
cmd, display := s.traefikCmd(withConfigFile(file))
defer display(c)
err := cmd.Start()
c.Assert(err, checker.IsNil)
defer s.killCmd(cmd)
err = try.GetRequest("http://"+s.haproxyIP+":81/whoami", 1*time.Second,
try.StatusCodeIs(http.StatusOK),
try.BodyContains("X-Forwarded-For: "+s.haproxyIP))
c.Assert(err, checker.IsNil)
// After the connection was created write the proxy headers first
_, err = proxyHeader.WriteTo(conn)
if err != nil {
return "", err
}
// Create an HTTP request
request := "GET /whoami HTTP/1.1\r\n" +
"Host: 127.0.0.1\r\n" +
"Connection: close\r\n" +
"\r\n"
// Write the HTTP request to the TCP connection
writer := bufio.NewWriter(conn)
_, err = writer.WriteString(request)
if err != nil {
return "", err
}
// Flush the buffer to ensure the request is sent
err = writer.Flush()
if err != nil {
return "", err
}
// Read the response from the server
var content string
scanner := bufio.NewScanner(conn)
for scanner.Scan() {
content += scanner.Text() + "\n"
}
if scanner.Err() != nil {
return "", err
}
return content, nil
}

View file

@ -2,12 +2,12 @@ package integration
import (
"net/http"
"os"
"testing"
"time"
"github.com/go-check/check"
"github.com/stretchr/testify/require"
"github.com/stretchr/testify/suite"
"github.com/traefik/traefik/v3/integration/try"
checker "github.com/vdemeester/shakers"
)
type RateLimitSuite struct {
@ -15,33 +15,38 @@ type RateLimitSuite struct {
ServerIP string
}
func (s *RateLimitSuite) SetUpSuite(c *check.C) {
s.createComposeProject(c, "ratelimit")
s.composeUp(c)
s.ServerIP = s.getComposeServiceIP(c, "whoami1")
func TestRateLimitSuite(t *testing.T) {
suite.Run(t, new(RateLimitSuite))
}
func (s *RateLimitSuite) TestSimpleConfiguration(c *check.C) {
file := s.adaptFile(c, "fixtures/ratelimit/simple.toml", struct {
func (s *RateLimitSuite) SetupSuite() {
s.BaseSuite.SetupSuite()
s.createComposeProject("ratelimit")
s.composeUp()
s.ServerIP = s.getComposeServiceIP("whoami1")
}
func (s *RateLimitSuite) TearDownSuite() {
s.BaseSuite.TearDownSuite()
}
func (s *RateLimitSuite) TestSimpleConfiguration() {
file := s.adaptFile("fixtures/ratelimit/simple.toml", struct {
Server1 string
}{s.ServerIP})
defer os.Remove(file)
cmd, display := s.traefikCmd(withConfigFile(file))
defer display(c)
err := cmd.Start()
c.Assert(err, checker.IsNil)
defer s.killCmd(cmd)
s.traefikCmd(withConfigFile(file))
err = try.GetRequest("http://127.0.0.1:8080/api/rawdata", 1*time.Second, try.BodyContains("ratelimit"))
c.Assert(err, checker.IsNil)
err := try.GetRequest("http://127.0.0.1:8080/api/rawdata", 1*time.Second, try.BodyContains("ratelimit"))
require.NoError(s.T(), err)
start := time.Now()
count := 0
for {
err = try.GetRequest("http://127.0.0.1:8081/", 500*time.Millisecond, try.StatusCodeIs(http.StatusOK))
c.Assert(err, checker.IsNil)
require.NoError(s.T(), err)
count++
if count > 100 {
break
@ -50,6 +55,6 @@ func (s *RateLimitSuite) TestSimpleConfiguration(c *check.C) {
stop := time.Now()
elapsed := stop.Sub(start)
if elapsed < time.Second*99/100 {
c.Fatalf("requests throughput was too fast wrt to rate limiting: 100 requests in %v", elapsed)
s.T().Fatalf("requests throughput was too fast wrt to rate limiting: 100 requests in %v", elapsed)
}
}

Some files were not shown because too many files have changed in this diff Show more