2019-08-26 10:20:06 +00:00
package ratelimiter
import (
"context"
"fmt"
"net/http"
"net/http/httptest"
2021-03-08 08:58:04 +00:00
"os"
2019-08-26 10:20:06 +00:00
"testing"
"time"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
2020-08-17 16:04:03 +00:00
ptypes "github.com/traefik/paerser/types"
2020-09-16 13:46:04 +00:00
"github.com/traefik/traefik/v2/pkg/config/dynamic"
"github.com/traefik/traefik/v2/pkg/testhelpers"
2022-12-05 10:30:05 +00:00
"github.com/vulcand/oxy/v2/utils"
2019-08-26 10:20:06 +00:00
)
func TestNewRateLimiter ( t * testing . T ) {
testCases := [ ] struct {
desc string
config dynamic . RateLimit
expectedMaxDelay time . Duration
expectedSourceIP string
2020-04-29 16:32:05 +00:00
requestHeader string
expectedError string
2019-08-26 10:20:06 +00:00
} {
{
desc : "maxDelay computation" ,
config : dynamic . RateLimit {
Average : 200 ,
Burst : 10 ,
} ,
expectedMaxDelay : 2500 * time . Microsecond ,
} ,
2020-01-08 10:44:04 +00:00
{
desc : "maxDelay computation, low rate regime" ,
config : dynamic . RateLimit {
Average : 2 ,
2020-08-17 16:04:03 +00:00
Period : ptypes . Duration ( 10 * time . Second ) ,
2020-01-08 10:44:04 +00:00
Burst : 10 ,
} ,
expectedMaxDelay : 500 * time . Millisecond ,
} ,
2019-08-26 10:20:06 +00:00
{
desc : "default SourceMatcher is remote address ip strategy" ,
config : dynamic . RateLimit {
Average : 200 ,
Burst : 10 ,
} ,
expectedSourceIP : "127.0.0.1" ,
} ,
2020-04-29 16:32:05 +00:00
{
desc : "SourceCriterion in config is respected" ,
config : dynamic . RateLimit {
Average : 200 ,
Burst : 10 ,
SourceCriterion : & dynamic . SourceCriterion {
RequestHeaderName : "Foo" ,
} ,
} ,
requestHeader : "bar" ,
} ,
{
desc : "SourceCriteria are mutually exclusive" ,
config : dynamic . RateLimit {
Average : 200 ,
Burst : 10 ,
SourceCriterion : & dynamic . SourceCriterion {
IPStrategy : & dynamic . IPStrategy { } ,
RequestHeaderName : "Foo" ,
} ,
} ,
expectedError : "iPStrategy and RequestHeaderName are mutually exclusive" ,
} ,
2019-08-26 10:20:06 +00:00
}
for _ , test := range testCases {
test := test
t . Run ( test . desc , func ( t * testing . T ) {
t . Parallel ( )
next := http . HandlerFunc ( func ( w http . ResponseWriter , r * http . Request ) { } )
h , err := New ( context . Background ( ) , next , test . config , "rate-limiter" )
2020-04-29 16:32:05 +00:00
if test . expectedError != "" {
assert . EqualError ( t , err , test . expectedError )
} else {
require . NoError ( t , err )
}
2019-08-26 10:20:06 +00:00
rtl , _ := h . ( * rateLimiter )
if test . expectedMaxDelay != 0 {
assert . Equal ( t , test . expectedMaxDelay , rtl . maxDelay )
}
if test . expectedSourceIP != "" {
extractor , ok := rtl . sourceMatcher . ( utils . ExtractorFunc )
require . True ( t , ok , "Not an ExtractorFunc" )
req := http . Request {
RemoteAddr : fmt . Sprintf ( "%s:1234" , test . expectedSourceIP ) ,
}
ip , _ , err := extractor ( & req )
assert . NoError ( t , err )
assert . Equal ( t , test . expectedSourceIP , ip )
}
2020-04-29 16:32:05 +00:00
if test . requestHeader != "" {
extractor , ok := rtl . sourceMatcher . ( utils . ExtractorFunc )
require . True ( t , ok , "Not an ExtractorFunc" )
req := http . Request {
Header : map [ string ] [ ] string {
test . config . SourceCriterion . RequestHeaderName : { test . requestHeader } ,
} ,
}
hd , _ , err := extractor ( & req )
assert . NoError ( t , err )
assert . Equal ( t , test . requestHeader , hd )
}
2019-08-26 10:20:06 +00:00
} )
}
}
func TestRateLimit ( t * testing . T ) {
testCases := [ ] struct {
2019-09-09 18:02:04 +00:00
desc string
config dynamic . RateLimit
loadDuration time . Duration
incomingLoad int // in reqs/s
burst int
2019-08-26 10:20:06 +00:00
} {
{
desc : "Average is respected" ,
config : dynamic . RateLimit {
Average : 100 ,
Burst : 1 ,
} ,
2019-09-09 18:02:04 +00:00
loadDuration : 2 * time . Second ,
incomingLoad : 400 ,
2019-08-26 10:20:06 +00:00
} ,
{
2019-09-09 18:02:04 +00:00
desc : "burst allowed, no bursty traffic" ,
config : dynamic . RateLimit {
Average : 100 ,
Burst : 100 ,
} ,
loadDuration : 2 * time . Second ,
incomingLoad : 200 ,
} ,
{
desc : "burst allowed, initial burst, under capacity" ,
config : dynamic . RateLimit {
Average : 100 ,
Burst : 100 ,
} ,
loadDuration : 2 * time . Second ,
incomingLoad : 200 ,
burst : 50 ,
} ,
{
desc : "burst allowed, initial burst, over capacity" ,
config : dynamic . RateLimit {
Average : 100 ,
Burst : 100 ,
} ,
loadDuration : 2 * time . Second ,
incomingLoad : 200 ,
burst : 150 ,
} ,
{
desc : "burst over average, initial burst, over capacity" ,
2019-08-26 10:20:06 +00:00
config : dynamic . RateLimit {
Average : 100 ,
Burst : 200 ,
} ,
2019-09-09 18:02:04 +00:00
loadDuration : 2 * time . Second ,
incomingLoad : 200 ,
burst : 300 ,
2019-08-26 10:20:06 +00:00
} ,
2020-01-08 10:44:04 +00:00
{
desc : "lower than 1/s" ,
config : dynamic . RateLimit {
Average : 5 ,
2020-08-17 16:04:03 +00:00
Period : ptypes . Duration ( 10 * time . Second ) ,
2020-01-08 10:44:04 +00:00
} ,
loadDuration : 2 * time . Second ,
incomingLoad : 100 ,
burst : 0 ,
} ,
{
desc : "lower than 1/s, longer" ,
config : dynamic . RateLimit {
Average : 5 ,
2020-08-17 16:04:03 +00:00
Period : ptypes . Duration ( 10 * time . Second ) ,
2020-01-08 10:44:04 +00:00
} ,
loadDuration : time . Minute ,
incomingLoad : 100 ,
burst : 0 ,
} ,
{
desc : "lower than 1/s, longer, harsher" ,
config : dynamic . RateLimit {
Average : 1 ,
2020-08-17 16:04:03 +00:00
Period : ptypes . Duration ( time . Minute ) ,
2020-01-08 10:44:04 +00:00
} ,
loadDuration : time . Minute ,
incomingLoad : 100 ,
burst : 0 ,
} ,
{
desc : "period below 1 second" ,
config : dynamic . RateLimit {
Average : 50 ,
2020-08-17 16:04:03 +00:00
Period : ptypes . Duration ( 500 * time . Millisecond ) ,
2020-01-08 10:44:04 +00:00
} ,
loadDuration : 2 * time . Second ,
incomingLoad : 300 ,
burst : 0 ,
} ,
2019-09-13 14:46:04 +00:00
// TODO Try to disambiguate when it fails if it is because of too high a load.
// {
// desc: "Zero average ==> no rate limiting",
// config: dynamic.RateLimit{
// Average: 0,
// Burst: 1,
// },
// incomingLoad: 1000,
// loadDuration: time.Second,
// },
2019-08-26 10:20:06 +00:00
}
for _ , test := range testCases {
test := test
t . Run ( test . desc , func ( t * testing . T ) {
2020-01-08 10:44:04 +00:00
if test . loadDuration >= time . Minute && testing . Short ( ) {
t . Skip ( "skipping test in short mode." )
}
2019-08-26 10:20:06 +00:00
t . Parallel ( )
reqCount := 0
2019-09-09 18:02:04 +00:00
dropped := 0
2019-08-26 10:20:06 +00:00
next := http . HandlerFunc ( func ( w http . ResponseWriter , r * http . Request ) {
reqCount ++
} )
h , err := New ( context . Background ( ) , next , test . config , "rate-limiter" )
require . NoError ( t , err )
2020-01-08 10:44:04 +00:00
loadPeriod := time . Duration ( 1e9 / test . incomingLoad )
2019-08-26 10:20:06 +00:00
start := time . Now ( )
2019-09-09 18:02:04 +00:00
end := start . Add ( test . loadDuration )
2020-01-08 10:44:04 +00:00
ticker := time . NewTicker ( loadPeriod )
2019-09-09 18:02:04 +00:00
defer ticker . Stop ( )
2019-08-26 10:20:06 +00:00
for {
2019-09-09 18:02:04 +00:00
if time . Now ( ) . After ( end ) {
2019-08-26 10:20:06 +00:00
break
}
req := testhelpers . MustNewRequest ( http . MethodGet , "http://localhost" , nil )
req . RemoteAddr = "127.0.0.1:1234"
w := httptest . NewRecorder ( )
h . ServeHTTP ( w , req )
2019-09-09 18:02:04 +00:00
if w . Result ( ) . StatusCode != http . StatusOK {
dropped ++
}
if test . burst > 0 && reqCount < test . burst {
// if a burst is defined we first hammer the server with test.burst requests as fast as possible
continue
}
<- ticker . C
2019-08-26 10:20:06 +00:00
}
stop := time . Now ( )
elapsed := stop . Sub ( start )
2019-09-09 18:02:04 +00:00
2020-01-08 10:44:04 +00:00
burst := test . config . Burst
if burst < 1 {
// actual default value
burst = 1
}
2021-03-08 08:58:04 +00:00
2020-01-08 10:44:04 +00:00
period := time . Duration ( test . config . Period )
if period == 0 {
period = time . Second
}
2021-03-08 08:58:04 +00:00
2019-08-26 10:20:06 +00:00
if test . config . Average == 0 {
2019-09-09 18:02:04 +00:00
if reqCount < 75 * test . incomingLoad / 100 {
t . Fatalf ( "we (arbitrarily) expect at least 75%% of the requests to go through with no rate limiting, and yet only %d/%d went through" , reqCount , test . incomingLoad )
}
if dropped != 0 {
t . Fatalf ( "no request should have been dropped if rate limiting is disabled, and yet %d were" , dropped )
2019-08-26 10:20:06 +00:00
}
return
}
2019-09-09 18:02:04 +00:00
// Note that even when there is no bursty traffic,
// we take into account the configured burst,
// because it also helps absorbing non-bursty traffic.
2020-01-08 10:44:04 +00:00
rate := float64 ( test . config . Average ) / float64 ( period )
2021-03-08 08:58:04 +00:00
2020-01-08 10:44:04 +00:00
wantCount := int ( int64 ( rate * float64 ( test . loadDuration ) ) + burst )
2021-03-08 08:58:04 +00:00
2019-08-26 10:20:06 +00:00
// Allow for a 2% leeway
2019-09-09 18:02:04 +00:00
maxCount := wantCount * 102 / 100
2021-03-08 08:58:04 +00:00
2019-09-09 18:02:04 +00:00
// With very high CPU loads,
// we can expect some extra delay in addition to the rate limiting we already do,
// so we allow for some extra leeway there.
// Feel free to adjust wrt to the load on e.g. the CI.
2021-03-08 08:58:04 +00:00
minCount := computeMinCount ( wantCount )
2019-09-09 18:02:04 +00:00
if reqCount < minCount {
2020-01-08 10:44:04 +00:00
t . Fatalf ( "rate was slower than expected: %d requests (wanted > %d) in %v" , reqCount , minCount , elapsed )
2019-08-26 10:20:06 +00:00
}
2019-09-09 18:02:04 +00:00
if reqCount > maxCount {
2020-01-08 10:44:04 +00:00
t . Fatalf ( "rate was faster than expected: %d requests (wanted < %d) in %v" , reqCount , maxCount , elapsed )
2019-09-09 18:02:04 +00:00
}
2019-08-26 10:20:06 +00:00
} )
}
}
2021-03-08 08:58:04 +00:00
func computeMinCount ( wantCount int ) int {
if os . Getenv ( "CI" ) != "" {
return wantCount * 60 / 100
}
return wantCount * 95 / 100
}