Fix flaky tests.
Co-authored-by: Ludovic Fernandez <ldez@users.noreply.github.com>
This commit is contained in:
parent
71ca237478
commit
3c8675bb8b
5 changed files with 33 additions and 15 deletions
|
@ -30,7 +30,7 @@ func TestScalableHistogram(t *testing.T) {
|
||||||
measuredDuration, err := time.ParseDuration(extractedDurationString[0] + "ms")
|
measuredDuration, err := time.ParseDuration(extractedDurationString[0] + "ms")
|
||||||
assert.NoError(t, err)
|
assert.NoError(t, err)
|
||||||
|
|
||||||
assert.InDelta(t, 500*time.Millisecond, measuredDuration, float64(1*time.Millisecond))
|
assert.InDelta(t, 500*time.Millisecond, measuredDuration, float64(15*time.Millisecond))
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestNewMultiRegistry(t *testing.T) {
|
func TestNewMultiRegistry(t *testing.T) {
|
||||||
|
|
|
@ -5,6 +5,7 @@ import (
|
||||||
"fmt"
|
"fmt"
|
||||||
"net/http"
|
"net/http"
|
||||||
"net/http/httptest"
|
"net/http/httptest"
|
||||||
|
"os"
|
||||||
"testing"
|
"testing"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
|
@ -279,10 +280,12 @@ func TestRateLimit(t *testing.T) {
|
||||||
// actual default value
|
// actual default value
|
||||||
burst = 1
|
burst = 1
|
||||||
}
|
}
|
||||||
|
|
||||||
period := time.Duration(test.config.Period)
|
period := time.Duration(test.config.Period)
|
||||||
if period == 0 {
|
if period == 0 {
|
||||||
period = time.Second
|
period = time.Second
|
||||||
}
|
}
|
||||||
|
|
||||||
if test.config.Average == 0 {
|
if test.config.Average == 0 {
|
||||||
if reqCount < 75*test.incomingLoad/100 {
|
if reqCount < 75*test.incomingLoad/100 {
|
||||||
t.Fatalf("we (arbitrarily) expect at least 75%% of the requests to go through with no rate limiting, and yet only %d/%d went through", reqCount, test.incomingLoad)
|
t.Fatalf("we (arbitrarily) expect at least 75%% of the requests to go through with no rate limiting, and yet only %d/%d went through", reqCount, test.incomingLoad)
|
||||||
|
@ -297,14 +300,18 @@ func TestRateLimit(t *testing.T) {
|
||||||
// we take into account the configured burst,
|
// we take into account the configured burst,
|
||||||
// because it also helps absorbing non-bursty traffic.
|
// because it also helps absorbing non-bursty traffic.
|
||||||
rate := float64(test.config.Average) / float64(period)
|
rate := float64(test.config.Average) / float64(period)
|
||||||
|
|
||||||
wantCount := int(int64(rate*float64(test.loadDuration)) + burst)
|
wantCount := int(int64(rate*float64(test.loadDuration)) + burst)
|
||||||
|
|
||||||
// Allow for a 2% leeway
|
// Allow for a 2% leeway
|
||||||
maxCount := wantCount * 102 / 100
|
maxCount := wantCount * 102 / 100
|
||||||
|
|
||||||
// With very high CPU loads,
|
// With very high CPU loads,
|
||||||
// we can expect some extra delay in addition to the rate limiting we already do,
|
// we can expect some extra delay in addition to the rate limiting we already do,
|
||||||
// so we allow for some extra leeway there.
|
// so we allow for some extra leeway there.
|
||||||
// Feel free to adjust wrt to the load on e.g. the CI.
|
// Feel free to adjust wrt to the load on e.g. the CI.
|
||||||
minCount := wantCount * 95 / 100
|
minCount := computeMinCount(wantCount)
|
||||||
|
|
||||||
if reqCount < minCount {
|
if reqCount < minCount {
|
||||||
t.Fatalf("rate was slower than expected: %d requests (wanted > %d) in %v", reqCount, minCount, elapsed)
|
t.Fatalf("rate was slower than expected: %d requests (wanted > %d) in %v", reqCount, minCount, elapsed)
|
||||||
}
|
}
|
||||||
|
@ -314,3 +321,11 @@ func TestRateLimit(t *testing.T) {
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func computeMinCount(wantCount int) int {
|
||||||
|
if os.Getenv("CI") != "" {
|
||||||
|
return wantCount * 60 / 100
|
||||||
|
}
|
||||||
|
|
||||||
|
return wantCount * 95 / 100
|
||||||
|
}
|
||||||
|
|
|
@ -51,7 +51,8 @@ func requireReceivedMessageFromProviders(t *testing.T, cfgCh <-chan dynamic.Mess
|
||||||
|
|
||||||
for range names {
|
for range names {
|
||||||
select {
|
select {
|
||||||
case <-time.After(10 * time.Millisecond):
|
case <-time.After(100 * time.Millisecond):
|
||||||
|
require.Fail(t, "Timeout while waiting for configuration.")
|
||||||
case msg = <-cfgCh:
|
case msg = <-cfgCh:
|
||||||
receivedMessagesFrom = append(receivedMessagesFrom, msg.ProviderName)
|
receivedMessagesFrom = append(receivedMessagesFrom, msg.ProviderName)
|
||||||
}
|
}
|
||||||
|
|
|
@ -216,7 +216,7 @@ func TestListenProvidersDoesNotSkipFlappingConfiguration(t *testing.T) {
|
||||||
defer watcher.Stop()
|
defer watcher.Stop()
|
||||||
|
|
||||||
// give some time so that the configuration can be processed
|
// give some time so that the configuration can be processed
|
||||||
time.Sleep(40 * time.Millisecond)
|
time.Sleep(100 * time.Millisecond)
|
||||||
|
|
||||||
expected := dynamic.Configuration{
|
expected := dynamic.Configuration{
|
||||||
HTTP: th.BuildConfiguration(
|
HTTP: th.BuildConfiguration(
|
||||||
|
|
|
@ -70,6 +70,8 @@ func testShutdown(t *testing.T, router *tcp.Router) {
|
||||||
|
|
||||||
epConfig.LifeCycle.RequestAcceptGraceTimeout = 0
|
epConfig.LifeCycle.RequestAcceptGraceTimeout = 0
|
||||||
epConfig.LifeCycle.GraceTimeOut = ptypes.Duration(5 * time.Second)
|
epConfig.LifeCycle.GraceTimeOut = ptypes.Duration(5 * time.Second)
|
||||||
|
epConfig.RespondingTimeouts.ReadTimeout = ptypes.Duration(5 * time.Second)
|
||||||
|
epConfig.RespondingTimeouts.WriteTimeout = ptypes.Duration(5 * time.Second)
|
||||||
|
|
||||||
entryPoint, err := NewTCPEntryPoint(context.Background(), &static.EntryPoint{
|
entryPoint, err := NewTCPEntryPoint(context.Background(), &static.EntryPoint{
|
||||||
// We explicitly use an IPV4 address because on Alpine, with an IPV6 address
|
// We explicitly use an IPV4 address because on Alpine, with an IPV6 address
|
||||||
|
@ -97,6 +99,11 @@ func testShutdown(t *testing.T, router *tcp.Router) {
|
||||||
err = request.Write(conn)
|
err = request.Write(conn)
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
|
|
||||||
|
reader := bufio.NewReader(conn)
|
||||||
|
// Wait for first byte in response.
|
||||||
|
_, err = reader.Peek(1)
|
||||||
|
require.NoError(t, err)
|
||||||
|
|
||||||
go entryPoint.Shutdown(context.Background())
|
go entryPoint.Shutdown(context.Background())
|
||||||
|
|
||||||
// Make sure that new connections are not permitted anymore.
|
// Make sure that new connections are not permitted anymore.
|
||||||
|
@ -123,7 +130,7 @@ func testShutdown(t *testing.T, router *tcp.Router) {
|
||||||
|
|
||||||
// And make sure that the connection we had opened before shutting things down is still operational
|
// And make sure that the connection we had opened before shutting things down is still operational
|
||||||
|
|
||||||
resp, err := http.ReadResponse(bufio.NewReader(conn), request)
|
resp, err := http.ReadResponse(reader, request)
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
assert.Equal(t, http.StatusOK, resp.StatusCode)
|
assert.Equal(t, http.StatusOK, resp.StatusCode)
|
||||||
}
|
}
|
||||||
|
@ -133,22 +140,17 @@ func startEntrypoint(entryPoint *TCPEntryPoint, router *tcp.Router) (net.Conn, e
|
||||||
|
|
||||||
entryPoint.SwitchRouter(router)
|
entryPoint.SwitchRouter(router)
|
||||||
|
|
||||||
var conn net.Conn
|
|
||||||
var err error
|
|
||||||
var epStarted bool
|
|
||||||
for i := 0; i < 10; i++ {
|
for i := 0; i < 10; i++ {
|
||||||
conn, err = net.Dial("tcp", entryPoint.listener.Addr().String())
|
conn, err := net.Dial("tcp", entryPoint.listener.Addr().String())
|
||||||
if err != nil {
|
if err != nil {
|
||||||
time.Sleep(100 * time.Millisecond)
|
time.Sleep(100 * time.Millisecond)
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
epStarted = true
|
|
||||||
break
|
return conn, err
|
||||||
}
|
}
|
||||||
if !epStarted {
|
|
||||||
return nil, errors.New("entry point never started")
|
return nil, errors.New("entry point never started")
|
||||||
}
|
|
||||||
return conn, err
|
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestReadTimeoutWithoutFirstByte(t *testing.T) {
|
func TestReadTimeoutWithoutFirstByte(t *testing.T) {
|
||||||
|
|
Loading…
Reference in a new issue