From 2b828765e3d679c116bf69ad43eaab6971fb199a Mon Sep 17 00:00:00 2001 From: mpl Date: Mon, 9 Sep 2019 20:02:04 +0200 Subject: [PATCH] Improve rate limiter tests Co-authored-by: Julien Salleyron --- .../ratelimiter/rate_limiter_test.go | 103 +++++++++++++----- 1 file changed, 77 insertions(+), 26 deletions(-) diff --git a/pkg/middlewares/ratelimiter/rate_limiter_test.go b/pkg/middlewares/ratelimiter/rate_limiter_test.go index 94fcedba8..6273ca356 100644 --- a/pkg/middlewares/ratelimiter/rate_limiter_test.go +++ b/pkg/middlewares/ratelimiter/rate_limiter_test.go @@ -73,9 +73,11 @@ func TestNewRateLimiter(t *testing.T) { func TestRateLimit(t *testing.T) { testCases := []struct { - desc string - config dynamic.RateLimit - reqCount int + desc string + config dynamic.RateLimit + loadDuration time.Duration + incomingLoad int // in reqs/s + burst int }{ { desc: "Average is respected", @@ -83,15 +85,47 @@ func TestRateLimit(t *testing.T) { Average: 100, Burst: 1, }, - reqCount: 200, + loadDuration: 2 * time.Second, + incomingLoad: 400, }, { - desc: "Burst is taken into account", + desc: "burst allowed, no bursty traffic", + config: dynamic.RateLimit{ + Average: 100, + Burst: 100, + }, + loadDuration: 2 * time.Second, + incomingLoad: 200, + }, + { + desc: "burst allowed, initial burst, under capacity", + config: dynamic.RateLimit{ + Average: 100, + Burst: 100, + }, + loadDuration: 2 * time.Second, + incomingLoad: 200, + burst: 50, + }, + { + desc: "burst allowed, initial burst, over capacity", + config: dynamic.RateLimit{ + Average: 100, + Burst: 100, + }, + loadDuration: 2 * time.Second, + incomingLoad: 200, + burst: 150, + }, + { + desc: "burst over average, initial burst, over capacity", config: dynamic.RateLimit{ Average: 100, Burst: 200, }, - reqCount: 300, + loadDuration: 2 * time.Second, + incomingLoad: 200, + burst: 300, }, { desc: "Zero average ==> no rate limiting", @@ -99,26 +133,32 @@ func TestRateLimit(t *testing.T) { Average: 0, Burst: 1, }, - reqCount: 100, + incomingLoad: 1000, + loadDuration: time.Second, }, } for _, test := range testCases { test := test t.Run(test.desc, func(t *testing.T) { + t.Parallel() reqCount := 0 + dropped := 0 next := http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { reqCount++ }) - h, err := New(context.Background(), next, test.config, "rate-limiter") require.NoError(t, err) + period := time.Duration(1e9 / test.incomingLoad) start := time.Now() + end := start.Add(test.loadDuration) + ticker := time.NewTicker(period) + defer ticker.Stop() for { - if reqCount >= test.reqCount { + if time.Now().After(end) { break } @@ -127,34 +167,45 @@ func TestRateLimit(t *testing.T) { w := httptest.NewRecorder() h.ServeHTTP(w, req) - // TODO(mpl): predict and count the 200 VS the 429? + if w.Result().StatusCode != http.StatusOK { + dropped++ + } + if test.burst > 0 && reqCount < test.burst { + // if a burst is defined we first hammer the server with test.burst requests as fast as possible + continue + } + <-ticker.C } - stop := time.Now() elapsed := stop.Sub(start) + if test.config.Average == 0 { - if elapsed > time.Millisecond { - t.Fatalf("rate should not have been limited, but: %d requests in %v", reqCount, elapsed) + if reqCount < 75*test.incomingLoad/100 { + t.Fatalf("we (arbitrarily) expect at least 75%% of the requests to go through with no rate limiting, and yet only %d/%d went through", reqCount, test.incomingLoad) + } + if dropped != 0 { + t.Fatalf("no request should have been dropped if rate limiting is disabled, and yet %d were", dropped) } return } - // Assume allowed burst is initially consumed in an infinitesimal period of time - var expectedDuration time.Duration - if test.config.Average != 0 { - expectedDuration = time.Duration((int64(test.reqCount)-test.config.Burst+1)/test.config.Average) * time.Second - } - + // Note that even when there is no bursty traffic, + // we take into account the configured burst, + // because it also helps absorbing non-bursty traffic. + wantCount := int(test.config.Average*int64(test.loadDuration/time.Second) + test.config.Burst) // Allow for a 2% leeway - minDuration := expectedDuration * 98 / 100 - maxDuration := expectedDuration * 102 / 100 - - if elapsed < minDuration { - t.Fatalf("rate was faster than expected: %d requests in %v", reqCount, elapsed) - } - if elapsed > maxDuration { + maxCount := wantCount * 102 / 100 + // With very high CPU loads, + // we can expect some extra delay in addition to the rate limiting we already do, + // so we allow for some extra leeway there. + // Feel free to adjust wrt to the load on e.g. the CI. + minCount := wantCount * 95 / 100 + if reqCount < minCount { t.Fatalf("rate was slower than expected: %d requests in %v", reqCount, elapsed) } + if reqCount > maxCount { + t.Fatalf("rate was faster than expected: %d requests in %v", reqCount, elapsed) + } }) } }