From d9fc7750841e4a17b2f6f3bc72bb044cc390b339 Mon Sep 17 00:00:00 2001 From: mpl Date: Mon, 26 Jul 2021 17:20:27 +0200 Subject: [PATCH 1/4] ratelimiter: use correct ttlSeconds value, and always call Set Co-authored-by: Romain Co-authored-by: Daniel Tomcej --- pkg/middlewares/ratelimiter/rate_limiter.go | 44 +++++++++++++++++---- 1 file changed, 36 insertions(+), 8 deletions(-) diff --git a/pkg/middlewares/ratelimiter/rate_limiter.go b/pkg/middlewares/ratelimiter/rate_limiter.go index 96be92178..7992c7d48 100644 --- a/pkg/middlewares/ratelimiter/rate_limiter.go +++ b/pkg/middlewares/ratelimiter/rate_limiter.go @@ -30,7 +30,12 @@ type rateLimiter struct { burst int64 // maxDelay is the maximum duration we're willing to wait for a bucket reservation to become effective, in nanoseconds. // For now it is somewhat arbitrarily set to 1/(2*rate). - maxDelay time.Duration + maxDelay time.Duration + // each rate limiter for a given source is stored in the buckets ttlmap. + // To keep this ttlmap constrained in size, + // each ratelimiter is "garbage collected" when it is considered expired. + // It is considered expired after it hasn't been used for ttl seconds. + ttl int sourceMatcher utils.SourceExtractor next http.Handler @@ -66,12 +71,15 @@ func New(ctx context.Context, next http.Handler, config dynamic.RateLimit, name } period := time.Duration(config.Period) + if period < 0 { + return nil, fmt.Errorf("negative value not valid for period: %v", period) + } if period == 0 { period = time.Second } - // Logically, we should set maxDelay to infinity when config.Average == 0 (because it means no rate limiting), - // but since the reservation will give us a delay = 0 anyway in this case, we're good even with any maxDelay >= 0. + // if config.Average == 0, in that case, + // the value of maxDelay does not matter since the reservation will (buggily) give us a delay of 0 anyway. var maxDelay time.Duration var rtl float64 if config.Average > 0 { @@ -86,6 +94,17 @@ func New(ctx context.Context, next http.Handler, config dynamic.RateLimit, name } } + // Make the ttl inversely proportional to how often a rate limiter is supposed to see any activity (when maxed out), + // for low rate limiters. + // Otherwise just make it a second for all the high rate limiters. + // Add an extra second in both cases for continuity between the two cases. + ttl := 1 + if rtl >= 1 { + ttl++ + } else if rtl > 0 { + ttl += int(1 / rtl) + } + return &rateLimiter{ name: name, rate: rate.Limit(rtl), @@ -94,6 +113,7 @@ func New(ctx context.Context, next http.Handler, config dynamic.RateLimit, name next: next, sourceMatcher: sourceMatcher, buckets: buckets, + ttl: ttl, }, nil } @@ -121,13 +141,21 @@ func (rl *rateLimiter) ServeHTTP(w http.ResponseWriter, r *http.Request) { bucket = rlSource.(*rate.Limiter) } else { bucket = rate.NewLimiter(rl.rate, int(rl.burst)) - if err := rl.buckets.Set(source, bucket, int(rl.maxDelay)*10+1); err != nil { - logger.Errorf("could not insert bucket: %v", err) - http.Error(w, "could not insert bucket", http.StatusInternalServerError) - return - } } + // We Set even in the case where the source already exists, + // because we want to update the expiryTime everytime we get the source, + // as the expiryTime is supposed to reflect the activity (or lack thereof) on that source. + if err := rl.buckets.Set(source, bucket, rl.ttl); err != nil { + logger.Errorf("could not insert/update bucket: %v", err) + http.Error(w, "could not insert/update bucket", http.StatusInternalServerError) + return + } + + // time/rate is bugged, since a rate.Limiter with a 0 Limit not only allows a Reservation to take place, + // but also gives a 0 delay below (because of a division by zero, followed by a multiplication that flips into the negatives), + // regardless of the current load. + // However, for now we take advantage of this behavior to provide the no-limit ratelimiter when config.Average is 0. res := bucket.Reserve() if !res.OK() { http.Error(w, "No bursty traffic allowed", http.StatusTooManyRequests) From 8be434aaadfcb14270657980f7ffffa0ebca420b Mon Sep 17 00:00:00 2001 From: Jean-Baptiste Doumenjou <925513+jbdoumenjou@users.noreply.github.com> Date: Mon, 26 Jul 2021 18:08:09 +0200 Subject: [PATCH 2/4] Prepare release v2.4.12 --- CHANGELOG.md | 9 +++++++++ script/gcg/traefik-bugfix.toml | 6 +++--- 2 files changed, 12 insertions(+), 3 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index 8da39faff..38704c0db 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,3 +1,12 @@ +## [v2.4.12](https://github.com/traefik/traefik/tree/v2.4.12) (2021-07-26) +[All Commits](https://github.com/traefik/traefik/compare/v2.4.11...v2.4.12) + +**Bug fixes:** +- **[k8s,k8s/ingress]** Get Kubernetes server version early ([#8286](https://github.com/traefik/traefik/pull/8286) by [rtribotte](https://github.com/rtribotte)) +- **[k8s,k8s/ingress]** Don't remove ingress config on API call failure ([#8185](https://github.com/traefik/traefik/pull/8185) by [dtomcej](https://github.com/dtomcej)) +- **[middleware]** Ratelimiter: use correct ttlSeconds value, and always call Set ([#8254](https://github.com/traefik/traefik/pull/8254) by [mpl](https://github.com/mpl)) +- **[tls]** Check if defaultcertificate is defined in store ([#8274](https://github.com/traefik/traefik/pull/8274) by [dtomcej](https://github.com/dtomcej)) + ## [v2.4.11](https://github.com/traefik/traefik/tree/v2.4.11) (2021-07-15) [All Commits](https://github.com/traefik/traefik/compare/v2.4.9...v2.4.11) diff --git a/script/gcg/traefik-bugfix.toml b/script/gcg/traefik-bugfix.toml index 4d1cc91d3..0e924afaa 100644 --- a/script/gcg/traefik-bugfix.toml +++ b/script/gcg/traefik-bugfix.toml @@ -4,11 +4,11 @@ RepositoryName = "traefik" OutputType = "file" FileName = "traefik_changelog.md" -# example new bugfix v2.4.11 +# example new bugfix v2.4.12 CurrentRef = "v2.4" -PreviousRef = "v2.4.9" +PreviousRef = "v2.4.11" BaseBranch = "v2.4" -FutureCurrentRefName = "v2.4.11" +FutureCurrentRefName = "v2.4.12" ThresholdPreviousRef = 10 ThresholdCurrentRef = 10 From a48a8a97a1b418faca9f3e4928e6fb8ae8b2fcd6 Mon Sep 17 00:00:00 2001 From: Michael Date: Tue, 27 Jul 2021 19:16:06 +0200 Subject: [PATCH 3/4] fix: restore cache only once --- .semaphore/semaphore.yml | 1 - integration/resources/compose/k8s.yml | 3 ++- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/.semaphore/semaphore.yml b/.semaphore/semaphore.yml index 58534df13..058f375da 100644 --- a/.semaphore/semaphore.yml +++ b/.semaphore/semaphore.yml @@ -61,7 +61,6 @@ blocks: jobs: - name: Test Integration Host commands: - - cache restore traefik-$(checksum go.sum) - mkdir -p static # Avoid to generate webui - make test-integration-host epilogue: diff --git a/integration/resources/compose/k8s.yml b/integration/resources/compose/k8s.yml index 83f313c29..1e823fe11 100644 --- a/integration/resources/compose/k8s.yml +++ b/integration/resources/compose/k8s.yml @@ -1,6 +1,6 @@ server: image: rancher/k3s:v1.17.2-k3s1 - command: server --disable-agent --no-deploy coredns --no-deploy servicelb --no-deploy traefik --no-deploy local-storage --no-deploy metrics-server --log /output/k3s.log + command: server --disable-agent --no-deploy coredns --no-deploy servicelb --no-deploy traefik --no-deploy local-storage --no-deploy metrics-server --log /output/k3s.log --kube-proxy-arg=conntrack-max-per-core=0 --kube-proxy-arg=conntrack-max-per-core=0 environment: - K3S_CLUSTER_SECRET=somethingtotallyrandom - K3S_KUBECONFIG_OUTPUT=/output/kubeconfig.yaml @@ -14,6 +14,7 @@ server: node: image: rancher/k3s:v1.17.2-k3s1 privileged: true + command: agent --kube-proxy-arg=conntrack-max-per-core=0 --kube-proxy-arg=conntrack-max-per-core=0 links: - server environment: From 319e3065f0405f18a1d8b35495f41d35b5068256 Mon Sep 17 00:00:00 2001 From: Michael Date: Wed, 28 Jul 2021 14:28:11 +0200 Subject: [PATCH 4/4] fix: upgrade k3s version --- integration/resources/compose/k8s.yml | 7 +++---- 1 file changed, 3 insertions(+), 4 deletions(-) diff --git a/integration/resources/compose/k8s.yml b/integration/resources/compose/k8s.yml index 1e823fe11..b7b970f79 100644 --- a/integration/resources/compose/k8s.yml +++ b/integration/resources/compose/k8s.yml @@ -1,6 +1,6 @@ server: - image: rancher/k3s:v1.17.2-k3s1 - command: server --disable-agent --no-deploy coredns --no-deploy servicelb --no-deploy traefik --no-deploy local-storage --no-deploy metrics-server --log /output/k3s.log --kube-proxy-arg=conntrack-max-per-core=0 --kube-proxy-arg=conntrack-max-per-core=0 + image: rancher/k3s:v1.18.20-k3s1 + command: server --disable-agent --no-deploy coredns --no-deploy servicelb --no-deploy traefik --no-deploy local-storage --no-deploy metrics-server --log /output/k3s.log environment: - K3S_CLUSTER_SECRET=somethingtotallyrandom - K3S_KUBECONFIG_OUTPUT=/output/kubeconfig.yaml @@ -12,9 +12,8 @@ server: - 6443:6443 node: - image: rancher/k3s:v1.17.2-k3s1 + image: rancher/k3s:v1.18.20-k3s1 privileged: true - command: agent --kube-proxy-arg=conntrack-max-per-core=0 --kube-proxy-arg=conntrack-max-per-core=0 links: - server environment: