Improve rate limiter tests

Co-authored-by: Julien Salleyron <julien.salleyron@gmail.com>
This commit is contained in:
mpl 2019-09-09 20:02:04 +02:00 committed by Traefiker Bot
parent 25f4c23ab2
commit 2b828765e3

View file

@ -73,9 +73,11 @@ func TestNewRateLimiter(t *testing.T) {
func TestRateLimit(t *testing.T) { func TestRateLimit(t *testing.T) {
testCases := []struct { testCases := []struct {
desc string desc string
config dynamic.RateLimit config dynamic.RateLimit
reqCount int loadDuration time.Duration
incomingLoad int // in reqs/s
burst int
}{ }{
{ {
desc: "Average is respected", desc: "Average is respected",
@ -83,15 +85,47 @@ func TestRateLimit(t *testing.T) {
Average: 100, Average: 100,
Burst: 1, Burst: 1,
}, },
reqCount: 200, loadDuration: 2 * time.Second,
incomingLoad: 400,
}, },
{ {
desc: "Burst is taken into account", desc: "burst allowed, no bursty traffic",
config: dynamic.RateLimit{
Average: 100,
Burst: 100,
},
loadDuration: 2 * time.Second,
incomingLoad: 200,
},
{
desc: "burst allowed, initial burst, under capacity",
config: dynamic.RateLimit{
Average: 100,
Burst: 100,
},
loadDuration: 2 * time.Second,
incomingLoad: 200,
burst: 50,
},
{
desc: "burst allowed, initial burst, over capacity",
config: dynamic.RateLimit{
Average: 100,
Burst: 100,
},
loadDuration: 2 * time.Second,
incomingLoad: 200,
burst: 150,
},
{
desc: "burst over average, initial burst, over capacity",
config: dynamic.RateLimit{ config: dynamic.RateLimit{
Average: 100, Average: 100,
Burst: 200, Burst: 200,
}, },
reqCount: 300, loadDuration: 2 * time.Second,
incomingLoad: 200,
burst: 300,
}, },
{ {
desc: "Zero average ==> no rate limiting", desc: "Zero average ==> no rate limiting",
@ -99,26 +133,32 @@ func TestRateLimit(t *testing.T) {
Average: 0, Average: 0,
Burst: 1, Burst: 1,
}, },
reqCount: 100, incomingLoad: 1000,
loadDuration: time.Second,
}, },
} }
for _, test := range testCases { for _, test := range testCases {
test := test test := test
t.Run(test.desc, func(t *testing.T) { t.Run(test.desc, func(t *testing.T) {
t.Parallel() t.Parallel()
reqCount := 0 reqCount := 0
dropped := 0
next := http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { next := http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
reqCount++ reqCount++
}) })
h, err := New(context.Background(), next, test.config, "rate-limiter") h, err := New(context.Background(), next, test.config, "rate-limiter")
require.NoError(t, err) require.NoError(t, err)
period := time.Duration(1e9 / test.incomingLoad)
start := time.Now() start := time.Now()
end := start.Add(test.loadDuration)
ticker := time.NewTicker(period)
defer ticker.Stop()
for { for {
if reqCount >= test.reqCount { if time.Now().After(end) {
break break
} }
@ -127,34 +167,45 @@ func TestRateLimit(t *testing.T) {
w := httptest.NewRecorder() w := httptest.NewRecorder()
h.ServeHTTP(w, req) h.ServeHTTP(w, req)
// TODO(mpl): predict and count the 200 VS the 429? if w.Result().StatusCode != http.StatusOK {
dropped++
}
if test.burst > 0 && reqCount < test.burst {
// if a burst is defined we first hammer the server with test.burst requests as fast as possible
continue
}
<-ticker.C
} }
stop := time.Now() stop := time.Now()
elapsed := stop.Sub(start) elapsed := stop.Sub(start)
if test.config.Average == 0 { if test.config.Average == 0 {
if elapsed > time.Millisecond { if reqCount < 75*test.incomingLoad/100 {
t.Fatalf("rate should not have been limited, but: %d requests in %v", reqCount, elapsed) t.Fatalf("we (arbitrarily) expect at least 75%% of the requests to go through with no rate limiting, and yet only %d/%d went through", reqCount, test.incomingLoad)
}
if dropped != 0 {
t.Fatalf("no request should have been dropped if rate limiting is disabled, and yet %d were", dropped)
} }
return return
} }
// Assume allowed burst is initially consumed in an infinitesimal period of time // Note that even when there is no bursty traffic,
var expectedDuration time.Duration // we take into account the configured burst,
if test.config.Average != 0 { // because it also helps absorbing non-bursty traffic.
expectedDuration = time.Duration((int64(test.reqCount)-test.config.Burst+1)/test.config.Average) * time.Second wantCount := int(test.config.Average*int64(test.loadDuration/time.Second) + test.config.Burst)
}
// Allow for a 2% leeway // Allow for a 2% leeway
minDuration := expectedDuration * 98 / 100 maxCount := wantCount * 102 / 100
maxDuration := expectedDuration * 102 / 100 // With very high CPU loads,
// we can expect some extra delay in addition to the rate limiting we already do,
if elapsed < minDuration { // so we allow for some extra leeway there.
t.Fatalf("rate was faster than expected: %d requests in %v", reqCount, elapsed) // Feel free to adjust wrt to the load on e.g. the CI.
} minCount := wantCount * 95 / 100
if elapsed > maxDuration { if reqCount < minCount {
t.Fatalf("rate was slower than expected: %d requests in %v", reqCount, elapsed) t.Fatalf("rate was slower than expected: %d requests in %v", reqCount, elapsed)
} }
if reqCount > maxCount {
t.Fatalf("rate was faster than expected: %d requests in %v", reqCount, elapsed)
}
}) })
} }
} }