chore: update linter.

This commit is contained in:
Ludovic Fernandez 2021-03-04 09:02:03 +01:00 committed by GitHub
parent ec0d03658d
commit 2e7833df49
No known key found for this signature in database
GPG key ID: 4AEE18F83AFDEB23
21 changed files with 179 additions and 140 deletions

View file

@ -30,38 +30,64 @@
lines = 230 # default 60 lines = 230 # default 60
statements = 120 # default 40 statements = 120 # default 40
[linters-settings.forbidigo]
forbid = [
'^print(ln)?$',
'^spew\.Print(f|ln)?$',
'^spew\.Dump$',
]
[linters-settings.depguard]
list-type = "blacklist"
include-go-root = false
packages = ["github.com/pkg/errors"]
[linters-settings.godox]
keywords = ["FIXME"]
[linters-settings.importas]
corev1 = "k8s.io/api/core/v1"
networkingv1beta1 = "k8s.io/api/networking/v1beta1"
extensionsv1beta1 = "k8s.io/api/extensions/v1beta1"
metav1 = "k8s.io/apimachinery/pkg/apis/meta/v1"
kubeerror = "k8s.io/apimachinery/pkg/api/errors"
[linters] [linters]
enable-all = true enable-all = true
disable = [ disable = [
"scopelint", # Deprecated
"interfacer", # Deprecated
"maligned", # Deprecated
"sqlclosecheck", # Not relevant (SQL)
"rowserrcheck", # Not relevant (SQL)
"lll", # Not relevant
"gocyclo", # FIXME must be fixed "gocyclo", # FIXME must be fixed
"gosec", "cyclop", # Duplicate of gocyclo
"dupl", "gocognit", # Too strict
"maligned", "nestif", # Too many false-positive.
"lll", "prealloc", # Too many false-positive.
"unparam", "makezero", # Not relevant
"prealloc", "ifshort", # Not relevant
"scopelint", "dupl", # Too strict
"gosec", # Too strict
"gochecknoinits", "gochecknoinits",
"gochecknoglobals", "gochecknoglobals",
"godox",
"gocognit",
"bodyclose", # Too many false-positive and panics.
"wsl", # Too strict "wsl", # Too strict
"nlreturn", # Not relevant
"gomnd", # Too strict "gomnd", # Too strict
"stylecheck", # skip because report issues related to some generated files. "stylecheck", # skip because report issues related to some generated files.
"testpackage", # Too strict "testpackage", # Too strict
"goerr113", # Too strict
"nestif", # Too many false-positive.
"noctx", # Too strict
"exhaustive", # Too strict
"nlreturn", # Not relevant
"wrapcheck", # Too strict
"tparallel", # Not relevant "tparallel", # Not relevant
"paralleltest", # Not relevant "paralleltest", # Not relevant
"exhaustive", # Not relevant
"exhaustivestruct", # Not relevant "exhaustivestruct", # Not relevant
"makezero", # not relevant "goerr113", # Too strict
"forbidigo", # not relevant "wrapcheck", # Too strict
"ifshort", # not relevant "noctx", # Too strict
"bodyclose", # Too many false-positive and panics.
"unparam", # Too strict
"godox", # Too strict
"forcetypeassert", # Too strict
] ]
[issues] [issues]
@ -69,9 +95,9 @@
max-per-linter = 0 max-per-linter = 0
max-same-issues = 0 max-same-issues = 0
exclude = [ exclude = [
"SA1019: http.CloseNotifier is deprecated: the CloseNotifier interface predates Go's context package. New code should use Request.Context instead.", # FIXME must be fixed
"Error return value of .((os\\.)?std(out|err)\\..*|.*Close|.*Flush|os\\.Remove(All)?|.*printf?|os\\.(Un)?Setenv). is not checked", "Error return value of .((os\\.)?std(out|err)\\..*|.*Close|.*Flush|os\\.Remove(All)?|.*printf?|os\\.(Un)?Setenv). is not checked",
"should have a package comment, unless it's in another file for this package", "should have a package comment, unless it's in another file for this package",
"SA1019: http.CloseNotifier has been deprecated", # FIXME must be fixed
] ]
[[issues.exclude-rules]] [[issues.exclude-rules]]
path = "(.+)_test.go" path = "(.+)_test.go"
@ -88,18 +114,12 @@
[[issues.exclude-rules]] [[issues.exclude-rules]]
path = "pkg/h2c/h2c.go" path = "pkg/h2c/h2c.go"
text = "Error return value of `rw.Write` is not checked" text = "Error return value of `rw.Write` is not checked"
[[issues.exclude-rules]]
path = "pkg/middlewares/recovery/recovery.go"
text = "`logger` can be `github.com/stretchr/testify/assert.TestingT`"
[[issues.exclude-rules]] [[issues.exclude-rules]]
path = "pkg/provider/docker/builder_test.go" path = "pkg/provider/docker/builder_test.go"
text = "(U1000: func )?`(.+)` is unused" text = "(U1000: func )?`(.+)` is unused"
[[issues.exclude-rules]] [[issues.exclude-rules]]
path = "pkg/provider/kubernetes/builder_(endpoint|service)_test.go" path = "pkg/provider/kubernetes/builder_(endpoint|service)_test.go"
text = "(U1000: func )?`(.+)` is unused" text = "(U1000: func )?`(.+)` is unused"
[[issues.exclude-rules]]
path = "pkg/config/parser/.+_test.go"
text = "U1000: field `(foo|fuu)` is unused"
[[issues.exclude-rules]] [[issues.exclude-rules]]
path = "pkg/server/service/bufferpool.go" path = "pkg/server/service/bufferpool.go"
text = "SA6002: argument should be pointer-like to avoid allocations" text = "SA6002: argument should be pointer-like to avoid allocations"
@ -109,9 +129,6 @@
[[issues.exclude-rules]] [[issues.exclude-rules]]
path = "pkg/server/middleware/middlewares.go" path = "pkg/server/middleware/middlewares.go"
text = "Function 'buildConstructor' has too many statements" text = "Function 'buildConstructor' has too many statements"
[[issues.exclude-rules]] # FIXME must be fixed
path = "cmd/context.go"
text = "S1000: should use a simple channel send/receive instead of `select` with a single case"
[[issues.exclude-rules]] [[issues.exclude-rules]]
path = "pkg/tracing/haystack/logger.go" path = "pkg/tracing/haystack/logger.go"
linters = ["goprintffuncname"] linters = ["goprintffuncname"]

View file

@ -63,18 +63,18 @@ generate-webui: build-webui-image
mkdir -p static; \ mkdir -p static; \
docker run --rm -v "$$PWD/static":'/src/static' traefik-webui npm run build:nc; \ docker run --rm -v "$$PWD/static":'/src/static' traefik-webui npm run build:nc; \
docker run --rm -v "$$PWD/static":'/src/static' traefik-webui chown -R $(shell id -u):$(shell id -g) ../static; \ docker run --rm -v "$$PWD/static":'/src/static' traefik-webui chown -R $(shell id -u):$(shell id -g) ../static; \
echo 'For more informations show `webui/readme.md`' > $$PWD/static/DONT-EDIT-FILES-IN-THIS-DIRECTORY.md; \ echo 'For more information show `webui/readme.md`' > $$PWD/static/DONT-EDIT-FILES-IN-THIS-DIRECTORY.md; \
fi fi
## Build the linux binary ## Build the linux binary
binary: generate-webui $(PRE_TARGET) binary: generate-webui $(PRE_TARGET)
$(if $(PRE_TARGET),$(DOCKER_RUN_TRAEFIK)) ./script/make.sh generate binary $(if $(PRE_TARGET),$(DOCKER_RUN_TRAEFIK)) ./script/make.sh generate binary
## Build the binary for the standard plaforms (linux, darwin, windows) ## Build the binary for the standard platforms (linux, darwin, windows)
crossbinary-default: generate-webui build-dev-image crossbinary-default: generate-webui build-dev-image
$(DOCKER_RUN_TRAEFIK_NOTTY) ./script/make.sh generate crossbinary-default $(DOCKER_RUN_TRAEFIK_NOTTY) ./script/make.sh generate crossbinary-default
## Build the binary for the standard plaforms (linux, darwin, windows) in parallel ## Build the binary for the standard platforms (linux, darwin, windows) in parallel
crossbinary-default-parallel: crossbinary-default-parallel:
$(MAKE) generate-webui $(MAKE) generate-webui
$(MAKE) build-dev-image crossbinary-default $(MAKE) build-dev-image crossbinary-default

View file

@ -19,7 +19,7 @@ RUN mkdir -p /usr/local/bin \
&& chmod +x /usr/local/bin/go-bindata && chmod +x /usr/local/bin/go-bindata
# Download golangci-lint binary to bin folder in $GOPATH # Download golangci-lint binary to bin folder in $GOPATH
RUN curl -sfL https://install.goreleaser.com/github.com/golangci/golangci-lint.sh | bash -s -- -b $GOPATH/bin v1.36.0 RUN curl -sfL https://install.goreleaser.com/github.com/golangci/golangci-lint.sh | bash -s -- -b $GOPATH/bin v1.38.0
# Download misspell binary to bin folder in $GOPATH # Download misspell binary to bin folder in $GOPATH
RUN curl -sfL https://raw.githubusercontent.com/client9/misspell/master/install-misspell.sh | bash -s -- -b $GOPATH/bin v0.3.4 RUN curl -sfL https://raw.githubusercontent.com/client9/misspell/master/install-misspell.sh | bash -s -- -b $GOPATH/bin v0.3.4

View file

@ -13,10 +13,8 @@ func ContextWithSignal(ctx context.Context) context.Context {
signals := make(chan os.Signal) signals := make(chan os.Signal)
signal.Notify(signals, syscall.SIGINT, syscall.SIGTERM) signal.Notify(signals, syscall.SIGINT, syscall.SIGTERM)
go func() { go func() {
select { <-signals
case <-signals: cancel()
cancel()
}
}() }()
return newCtx return newCtx
} }

View file

@ -366,30 +366,32 @@ func initACMEProvider(c *static.Configuration, providerAggregator *aggregator.Pr
var resolvers []*acme.Provider var resolvers []*acme.Provider
for name, resolver := range c.CertificatesResolvers { for name, resolver := range c.CertificatesResolvers {
if resolver.ACME != nil { if resolver.ACME == nil {
if localStores[resolver.ACME.Storage] == nil { continue
localStores[resolver.ACME.Storage] = acme.NewLocalStore(resolver.ACME.Storage)
}
p := &acme.Provider{
Configuration: resolver.ACME,
Store: localStores[resolver.ACME.Storage],
ResolverName: name,
HTTPChallengeProvider: httpChallengeProvider,
TLSChallengeProvider: tlsChallengeProvider,
}
if err := providerAggregator.AddProvider(p); err != nil {
log.WithoutContext().Errorf("The ACME resolver %q is skipped from the resolvers list because: %v", name, err)
continue
}
p.SetTLSManager(tlsManager)
p.SetConfigListenerChan(make(chan dynamic.Configuration))
resolvers = append(resolvers, p)
} }
if localStores[resolver.ACME.Storage] == nil {
localStores[resolver.ACME.Storage] = acme.NewLocalStore(resolver.ACME.Storage)
}
p := &acme.Provider{
Configuration: resolver.ACME,
Store: localStores[resolver.ACME.Storage],
ResolverName: name,
HTTPChallengeProvider: httpChallengeProvider,
TLSChallengeProvider: tlsChallengeProvider,
}
if err := providerAggregator.AddProvider(p); err != nil {
log.WithoutContext().Errorf("The ACME resolver %q is skipped from the resolvers list because: %v", name, err)
continue
}
p.SetTLSManager(tlsManager)
p.SetConfigListenerChan(make(chan dynamic.Configuration))
resolvers = append(resolvers, p)
} }
return resolvers return resolvers

View file

@ -112,7 +112,7 @@ func callHelloClientGRPC(name string, secure bool) (string, error) {
} else { } else {
client, closer, err = getHelloClientGRPCh2c() client, closer, err = getHelloClientGRPCh2c()
} }
defer closer() defer func() { _ = closer() }()
if err != nil { if err != nil {
return "", err return "", err
@ -139,6 +139,7 @@ func callStreamExampleClientGRPC() (helloworld.Greeter_StreamExampleClient, func
func (s *GRPCSuite) TestGRPC(c *check.C) { func (s *GRPCSuite) TestGRPC(c *check.C) {
lis, err := net.Listen("tcp", ":0") lis, err := net.Listen("tcp", ":0")
c.Assert(err, check.IsNil)
_, port, err := net.SplitHostPort(lis.Addr().String()) _, port, err := net.SplitHostPort(lis.Addr().String())
c.Assert(err, check.IsNil) c.Assert(err, check.IsNil)
@ -181,6 +182,7 @@ func (s *GRPCSuite) TestGRPC(c *check.C) {
func (s *GRPCSuite) TestGRPCh2c(c *check.C) { func (s *GRPCSuite) TestGRPCh2c(c *check.C) {
lis, err := net.Listen("tcp", ":0") lis, err := net.Listen("tcp", ":0")
c.Assert(err, check.IsNil)
_, port, err := net.SplitHostPort(lis.Addr().String()) _, port, err := net.SplitHostPort(lis.Addr().String())
c.Assert(err, check.IsNil) c.Assert(err, check.IsNil)
@ -219,6 +221,7 @@ func (s *GRPCSuite) TestGRPCh2c(c *check.C) {
func (s *GRPCSuite) TestGRPCh2cTermination(c *check.C) { func (s *GRPCSuite) TestGRPCh2cTermination(c *check.C) {
lis, err := net.Listen("tcp", ":0") lis, err := net.Listen("tcp", ":0")
c.Assert(err, check.IsNil)
_, port, err := net.SplitHostPort(lis.Addr().String()) _, port, err := net.SplitHostPort(lis.Addr().String())
c.Assert(err, check.IsNil) c.Assert(err, check.IsNil)
@ -261,6 +264,7 @@ func (s *GRPCSuite) TestGRPCh2cTermination(c *check.C) {
func (s *GRPCSuite) TestGRPCInsecure(c *check.C) { func (s *GRPCSuite) TestGRPCInsecure(c *check.C) {
lis, err := net.Listen("tcp", ":0") lis, err := net.Listen("tcp", ":0")
c.Assert(err, check.IsNil)
_, port, err := net.SplitHostPort(lis.Addr().String()) _, port, err := net.SplitHostPort(lis.Addr().String())
c.Assert(err, check.IsNil) c.Assert(err, check.IsNil)
@ -340,7 +344,7 @@ func (s *GRPCSuite) TestGRPCBuffer(c *check.C) {
c.Assert(err, check.IsNil) c.Assert(err, check.IsNil)
var client helloworld.Greeter_StreamExampleClient var client helloworld.Greeter_StreamExampleClient
client, closer, err := callStreamExampleClientGRPC() client, closer, err := callStreamExampleClientGRPC()
defer closer() defer func() { _ = closer() }()
c.Assert(err, check.IsNil) c.Assert(err, check.IsNil)
received := make(chan bool) received := make(chan bool)
@ -400,8 +404,10 @@ func (s *GRPCSuite) TestGRPCBufferWithFlushInterval(c *check.C) {
var client helloworld.Greeter_StreamExampleClient var client helloworld.Greeter_StreamExampleClient
client, closer, err := callStreamExampleClientGRPC() client, closer, err := callStreamExampleClientGRPC()
defer closer() defer func() {
defer func() { stopStreamExample <- true }() _ = closer()
stopStreamExample <- true
}()
c.Assert(err, check.IsNil) c.Assert(err, check.IsNil)
received := make(chan bool) received := make(chan bool)
@ -425,6 +431,7 @@ func (s *GRPCSuite) TestGRPCBufferWithFlushInterval(c *check.C) {
func (s *GRPCSuite) TestGRPCWithRetry(c *check.C) { func (s *GRPCSuite) TestGRPCWithRetry(c *check.C) {
lis, err := net.Listen("tcp", ":0") lis, err := net.Listen("tcp", ":0")
c.Assert(err, check.IsNil)
_, port, err := net.SplitHostPort(lis.Addr().String()) _, port, err := net.SplitHostPort(lis.Addr().String())
c.Assert(err, check.IsNil) c.Assert(err, check.IsNil)

View file

@ -203,10 +203,10 @@ func (s *RestSuite) TestSimpleConfiguration(c *check.C) {
} }
for _, test := range testCase { for _, test := range testCase {
json, err := json.Marshal(test.config) data, err := json.Marshal(test.config)
c.Assert(err, checker.IsNil) c.Assert(err, checker.IsNil)
request, err := http.NewRequest(http.MethodPut, "http://127.0.0.1:8000/secure/api/providers/rest", bytes.NewReader(json)) request, err := http.NewRequest(http.MethodPut, "http://127.0.0.1:8000/secure/api/providers/rest", bytes.NewReader(data))
c.Assert(err, checker.IsNil) c.Assert(err, checker.IsNil)
response, err := http.DefaultClient.Do(request) response, err := http.DefaultClient.Do(request)

View file

@ -120,7 +120,7 @@ func Do(timeout time.Duration, operation DoCondition) error {
fmt.Print("*") fmt.Print("*")
if err = operation(); err == nil { if err = operation(); err == nil {
fmt.Println("+") fmt.Println("+")
return err return nil
} }
} }
} }

View file

@ -461,8 +461,7 @@ func (s *WebsocketSuite) TestSSLhttp2(c *check.C) {
})) }))
ts.TLS = &tls.Config{} ts.TLS = &tls.Config{}
ts.TLS.NextProtos = append(ts.TLS.NextProtos, `h2`) ts.TLS.NextProtos = append(ts.TLS.NextProtos, `h2`, `http/1.1`)
ts.TLS.NextProtos = append(ts.TLS.NextProtos, `http/1.1`)
ts.StartTLS() ts.StartTLS()
file := s.adaptFile(c, "fixtures/websocket/config_https.toml", struct { file := s.adaptFile(c, "fixtures/websocket/config_https.toml", struct {

View file

@ -40,7 +40,7 @@ func Do(baseConfig interface{}, indent bool) (string, error) {
} }
func doOnJSON(input string) string { func doOnJSON(input string) string {
mailExp := regexp.MustCompile(`\w[-._\w]*\w@\w[-._\w]*\w\.\w{2,3}"`) mailExp := regexp.MustCompile(`\w[-.\w]*\w@\w[-.\w]*\w\.\w{2,3}"`)
return xurls.Relaxed().ReplaceAllString(mailExp.ReplaceAllString(input, maskLarge+"\""), maskLarge) return xurls.Relaxed().ReplaceAllString(mailExp.ReplaceAllString(input, maskLarge+"\""), maskLarge)
} }

View file

@ -79,7 +79,7 @@ func loadConfigFiles(configFile string, element interface{}) (string, error) {
return "", nil return "", nil
} }
if err = file.Decode(filePath, element); err != nil { if err := file.Decode(filePath, element); err != nil {
return "", err return "", err
} }
return filePath, nil return filePath, nil

View file

@ -24,9 +24,21 @@ func TestJobBackOff(t *testing.T) {
exp.MinJobInterval = testMinJobInterval exp.MinJobInterval = testMinJobInterval
exp.Reset() exp.Reset()
expectedResults := []time.Duration{500, 500, 500, 1000, 2000, 4000, 5000, 5000, 500, 1000, 2000, 4000, 5000, 5000} expectedResults := []time.Duration{
for i, d := range expectedResults { 500 * time.Millisecond,
expectedResults[i] = d * time.Millisecond 500 * time.Millisecond,
500 * time.Millisecond,
1 * time.Second,
2 * time.Second,
4 * time.Second,
5 * time.Second,
5 * time.Second,
500 * time.Millisecond,
1 * time.Second,
2 * time.Second,
4 * time.Second,
5 * time.Second,
5 * time.Second,
} }
for i, expected := range expectedResults { for i, expected := range expectedResults {

View file

@ -390,7 +390,7 @@ func newCollector(metricName string, labels stdprometheus.Labels, c stdprometheu
// collector wraps a Collector object from the Prometheus client library. // collector wraps a Collector object from the Prometheus client library.
// It adds information on how many generations this metric should be present // It adds information on how many generations this metric should be present
// in the /metrics output, relatived to the time it was last tracked. // in the /metrics output, relative to the time it was last tracked.
type collector struct { type collector struct {
id string id string
labels stdprometheus.Labels labels stdprometheus.Labels

View file

@ -711,10 +711,10 @@ func assertValidLogData(t *testing.T, expected string, logData []byte) {
assert.Equal(t, resultExpected[OriginContentSize], result[OriginContentSize], formatErrMessage) assert.Equal(t, resultExpected[OriginContentSize], result[OriginContentSize], formatErrMessage)
assert.Equal(t, resultExpected[RequestRefererHeader], result[RequestRefererHeader], formatErrMessage) assert.Equal(t, resultExpected[RequestRefererHeader], result[RequestRefererHeader], formatErrMessage)
assert.Equal(t, resultExpected[RequestUserAgentHeader], result[RequestUserAgentHeader], formatErrMessage) assert.Equal(t, resultExpected[RequestUserAgentHeader], result[RequestUserAgentHeader], formatErrMessage)
assert.Regexp(t, regexp.MustCompile("[0-9]*"), result[RequestCount], formatErrMessage) assert.Regexp(t, regexp.MustCompile(`\d*`), result[RequestCount], formatErrMessage)
assert.Equal(t, resultExpected[RouterName], result[RouterName], formatErrMessage) assert.Equal(t, resultExpected[RouterName], result[RouterName], formatErrMessage)
assert.Equal(t, resultExpected[ServiceURL], result[ServiceURL], formatErrMessage) assert.Equal(t, resultExpected[ServiceURL], result[ServiceURL], formatErrMessage)
assert.Regexp(t, regexp.MustCompile("[0-9]*ms"), result[Duration], formatErrMessage) assert.Regexp(t, regexp.MustCompile(`\d*ms`), result[Duration], formatErrMessage)
} }
func captureStdout(t *testing.T) (out *os.File, restoreStdout func()) { func captureStdout(t *testing.T) (out *os.File, restoreStdout func()) {

View file

@ -92,39 +92,42 @@ func (c *customErrors) ServeHTTP(rw http.ResponseWriter, req *http.Request) {
// check the recorder code against the configured http status code ranges // check the recorder code against the configured http status code ranges
code := catcher.getCode() code := catcher.getCode()
for _, block := range c.httpCodeRanges { for _, block := range c.httpCodeRanges {
if code >= block[0] && code <= block[1] { if code < block[0] || code > block[1] {
logger.Debugf("Caught HTTP Status Code %d, returning error page", code) continue
}
var query string logger.Debugf("Caught HTTP Status Code %d, returning error page", code)
if len(c.backendQuery) > 0 {
query = "/" + strings.TrimPrefix(c.backendQuery, "/")
query = strings.ReplaceAll(query, "{status}", strconv.Itoa(code))
}
pageReq, err := newRequest(backendURL + query) var query string
if err != nil { if len(c.backendQuery) > 0 {
logger.Error(err) query = "/" + strings.TrimPrefix(c.backendQuery, "/")
rw.WriteHeader(code) query = strings.ReplaceAll(query, "{status}", strconv.Itoa(code))
_, err = fmt.Fprint(rw, http.StatusText(code)) }
if err != nil {
http.Error(rw, err.Error(), http.StatusInternalServerError)
}
return
}
recorderErrorPage := newResponseRecorder(ctx, rw) pageReq, err := newRequest(backendURL + query)
utils.CopyHeaders(pageReq.Header, req.Header) if err != nil {
logger.Error(err)
c.backendHandler.ServeHTTP(recorderErrorPage, pageReq.WithContext(req.Context()))
utils.CopyHeaders(rw.Header(), recorderErrorPage.Header())
rw.WriteHeader(code) rw.WriteHeader(code)
_, err = fmt.Fprint(rw, http.StatusText(code))
if _, err = rw.Write(recorderErrorPage.GetBody().Bytes()); err != nil { if err != nil {
logger.Error(err) http.Error(rw, err.Error(), http.StatusInternalServerError)
} }
return return
} }
recorderErrorPage := newResponseRecorder(ctx, rw)
utils.CopyHeaders(pageReq.Header, req.Header)
c.backendHandler.ServeHTTP(recorderErrorPage, pageReq.WithContext(req.Context()))
utils.CopyHeaders(rw.Header(), recorderErrorPage.Header())
rw.WriteHeader(code)
if _, err = rw.Write(recorderErrorPage.GetBody().Bytes()); err != nil {
logger.Error(err)
}
return
} }
} }

View file

@ -27,12 +27,12 @@ func (n MockTracer) Extract(format, carrier interface{}) (opentracing.SpanContex
return nil, opentracing.ErrSpanContextNotFound return nil, opentracing.ErrSpanContextNotFound
} }
// MockSpanContext. // MockSpanContext a span context mock.
type MockSpanContext struct{} type MockSpanContext struct{}
func (n MockSpanContext) ForeachBaggageItem(handler func(k, v string) bool) {} func (n MockSpanContext) ForeachBaggageItem(handler func(k, v string) bool) {}
// MockSpan. // MockSpan a span mock.
type MockSpan struct { type MockSpan struct {
OpName string OpName string
Tags map[string]interface{} Tags map[string]interface{}

View file

@ -541,23 +541,24 @@ func (p *Provider) makeGatewayStatus(listenerStatuses []v1alpha1.ListenerStatus)
gatewayStatus.Listeners = listenerStatuses gatewayStatus.Listeners = listenerStatuses
// update "Scheduled" status with "ResourcesAvailable" reason gatewayStatus.Conditions = append(gatewayStatus.Conditions,
gatewayStatus.Conditions = append(gatewayStatus.Conditions, metav1.Condition{ // update "Scheduled" status with "ResourcesAvailable" reason
Type: string(v1alpha1.GatewayConditionScheduled), metav1.Condition{
Status: metav1.ConditionTrue, Type: string(v1alpha1.GatewayConditionScheduled),
Reason: "ResourcesAvailable", Status: metav1.ConditionTrue,
Message: "Resources available", Reason: "ResourcesAvailable",
LastTransitionTime: metav1.Now(), Message: "Resources available",
}) LastTransitionTime: metav1.Now(),
},
// update "Ready" status with "ListenersValid" reason // update "Ready" status with "ListenersValid" reason
gatewayStatus.Conditions = append(gatewayStatus.Conditions, metav1.Condition{ metav1.Condition{
Type: string(v1alpha1.GatewayConditionReady), Type: string(v1alpha1.GatewayConditionReady),
Status: metav1.ConditionTrue, Status: metav1.ConditionTrue,
Reason: "ListenersValid", Reason: "ListenersValid",
Message: "Listeners valid", Message: "Listeners valid",
LastTransitionTime: metav1.Now(), LastTransitionTime: metav1.Now(),
}) },
)
return gatewayStatus, nil return gatewayStatus, nil
} }

View file

@ -341,7 +341,7 @@ func (c *clientWrapper) updateIngressStatusOld(src *networkingv1beta1.Ingress, i
} }
// isLoadBalancerIngressEquals returns true if the given slices are equal, false otherwise. // isLoadBalancerIngressEquals returns true if the given slices are equal, false otherwise.
func isLoadBalancerIngressEquals(aSlice []corev1.LoadBalancerIngress, bSlice []corev1.LoadBalancerIngress) bool { func isLoadBalancerIngressEquals(aSlice, bSlice []corev1.LoadBalancerIngress) bool {
if len(aSlice) != len(bSlice) { if len(aSlice) != len(bSlice) {
return false return false
} }

View file

@ -341,11 +341,11 @@ func (ln tcpKeepAliveListener) Accept() (net.Conn, error) {
return nil, err return nil, err
} }
if err = tc.SetKeepAlive(true); err != nil { if err := tc.SetKeepAlive(true); err != nil {
return nil, err return nil, err
} }
if err = tc.SetKeepAlivePeriod(3 * time.Minute); err != nil { if err := tc.SetKeepAlivePeriod(3 * time.Minute); err != nil {
// Some systems, such as OpenBSD, have no user-settable per-socket TCP // Some systems, such as OpenBSD, have no user-settable per-socket TCP
// keepalive options. // keepalive options.
if !errors.Is(err, syscall.ENOPROTOOPT) { if !errors.Is(err, syscall.ENOPROTOOPT) {

View file

@ -147,8 +147,8 @@ func (b blackHoleResponseWriter) Header() http.Header {
return http.Header{} return http.Header{}
} }
func (b blackHoleResponseWriter) Write(bytes []byte) (int, error) { func (b blackHoleResponseWriter) Write(data []byte) (int, error) {
return len(bytes), nil return len(data), nil
} }
func (b blackHoleResponseWriter) WriteHeader(statusCode int) {} func (b blackHoleResponseWriter) WriteHeader(statusCode int) {}

View file

@ -24,6 +24,19 @@ type stickyCookie struct {
httpOnly bool httpOnly bool
} }
// Balancer is a WeightedRoundRobin load balancer based on Earliest Deadline First (EDF).
// (https://en.wikipedia.org/wiki/Earliest_deadline_first_scheduling)
// Each pick from the schedule has the earliest deadline entry selected.
// Entries have deadlines set at currentDeadline + 1 / weight,
// providing weighted round robin behavior with floating point weights and an O(log n) pick time.
type Balancer struct {
stickyCookie *stickyCookie
mutex sync.RWMutex
handlers []*namedHandler
curDeadline float64
}
// New creates a new load balancer. // New creates a new load balancer.
func New(sticky *dynamic.Sticky) *Balancer { func New(sticky *dynamic.Sticky) *Balancer {
balancer := &Balancer{} balancer := &Balancer{}
@ -68,19 +81,6 @@ func (b *Balancer) Pop() interface{} {
return h return h
} }
// Balancer is a WeightedRoundRobin load balancer based on Earliest Deadline First (EDF).
// (https://en.wikipedia.org/wiki/Earliest_deadline_first_scheduling)
// Each pick from the schedule has the earliest deadline entry selected.
// Entries have deadlines set at currentDeadline + 1 / weight,
// providing weighted round robin behavior with floating point weights and an O(log n) pick time.
type Balancer struct {
stickyCookie *stickyCookie
mutex sync.RWMutex
handlers []*namedHandler
curDeadline float64
}
func (b *Balancer) nextServer() (*namedHandler, error) { func (b *Balancer) nextServer() (*namedHandler, error) {
b.mutex.Lock() b.mutex.Lock()
defer b.mutex.Unlock() defer b.mutex.Unlock()