Refactor Exponential Backoff

This commit is contained in:
Daniel Adams 2021-11-10 09:34:10 -05:00 committed by GitHub
parent 0a5c9095ac
commit 83a7f10c75
No known key found for this signature in database
GPG key ID: 4AEE18F83AFDEB23

View file

@ -16,6 +16,7 @@ import (
"github.com/traefik/traefik/v2/pkg/config/dynamic" "github.com/traefik/traefik/v2/pkg/config/dynamic"
"github.com/traefik/traefik/v2/pkg/log" "github.com/traefik/traefik/v2/pkg/log"
"github.com/traefik/traefik/v2/pkg/middlewares" "github.com/traefik/traefik/v2/pkg/middlewares"
"github.com/traefik/traefik/v2/pkg/safe"
"github.com/traefik/traefik/v2/pkg/tracing" "github.com/traefik/traefik/v2/pkg/tracing"
) )
@ -37,11 +38,6 @@ type Listener interface {
// each of them about a retry attempt. // each of them about a retry attempt.
type Listeners []Listener type Listeners []Listener
// nexter returns the duration to wait before retrying the operation.
type nexter interface {
NextBackOff() time.Duration
}
// retry is a middleware that retries requests. // retry is a middleware that retries requests.
type retry struct { type retry struct {
attempts int attempts int
@ -73,21 +69,21 @@ func (r *retry) GetTracingInformation() (string, ext.SpanKindEnum) {
} }
func (r *retry) ServeHTTP(rw http.ResponseWriter, req *http.Request) { func (r *retry) ServeHTTP(rw http.ResponseWriter, req *http.Request) {
// if we might make multiple attempts, swap the body for an io.NopCloser if r.attempts == 1 {
// cf https://github.com/traefik/traefik/issues/1008 r.next.ServeHTTP(rw, req)
if r.attempts > 1 { return
body := req.Body
defer body.Close()
req.Body = io.NopCloser(body)
} }
attempts := 1 closableBody := req.Body
backOff := r.newBackOff() defer closableBody.Close()
currentInterval := 0 * time.Millisecond
for {
select {
case <-time.After(currentInterval):
// if we might make multiple attempts, swap the body for an io.NopCloser
// cf https://github.com/traefik/traefik/issues/1008
req.Body = io.NopCloser(closableBody)
attempts := 1
operation := func() error {
shouldRetry := attempts < r.attempts shouldRetry := attempts < r.attempts
retryResponseWriter := newResponseWriter(rw, shouldRetry) retryResponseWriter := newResponseWriter(rw, shouldRetry)
@ -105,25 +101,31 @@ func (r *retry) ServeHTTP(rw http.ResponseWriter, req *http.Request) {
r.next.ServeHTTP(retryResponseWriter, req.WithContext(newCtx)) r.next.ServeHTTP(retryResponseWriter, req.WithContext(newCtx))
if !retryResponseWriter.ShouldRetry() { if !retryResponseWriter.ShouldRetry() {
return return nil
} }
currentInterval = backOff.NextBackOff()
attempts++ attempts++
return fmt.Errorf("attempt %d failed", attempts-1)
}
backOff := backoff.WithContext(r.newBackOff(), req.Context())
notify := func(err error, d time.Duration) {
log.FromContext(middlewares.GetLoggerCtx(req.Context(), r.name, typeName)). log.FromContext(middlewares.GetLoggerCtx(req.Context(), r.name, typeName)).
Debugf("New attempt %d for request: %v", attempts, req.URL) Debugf("New attempt %d for request: %v", attempts, req.URL)
r.listener.Retried(req, attempts) r.listener.Retried(req, attempts)
case <-req.Context().Done():
return
} }
err := backoff.RetryNotify(safe.OperationWithRecover(operation), backOff, notify)
if err != nil {
log.FromContext(middlewares.GetLoggerCtx(req.Context(), r.name, typeName)).
Debugf("Final retry attempt failed: %v", err.Error())
} }
} }
func (r *retry) newBackOff() nexter { func (r *retry) newBackOff() backoff.BackOff {
if r.attempts < 2 || r.initialInterval <= 0 { if r.attempts < 2 || r.initialInterval <= 0 {
return &backoff.ZeroBackOff{} return &backoff.ZeroBackOff{}
} }