diff --git a/Gopkg.lock b/Gopkg.lock index ce5b3d923..7644eb2d5 100644 --- a/Gopkg.lock +++ b/Gopkg.lock @@ -255,8 +255,8 @@ [[projects]] name = "github.com/containous/staert" packages = ["."] - revision = "cc00c303ccbd2491ddc1dccc9eb7ccadd807557e" - version = "v3.1.0" + revision = "66717a0e0ca950c4b6dc8c87b46da0b8495c6e41" + version = "v3.1.1" [[projects]] name = "github.com/containous/traefik-extra-service-fabric" @@ -1226,7 +1226,7 @@ "roundrobin", "utils" ] - revision = "d5b73186eed4aa34b52748699ad19e90f61d4059" + revision = "c2414f4542f085363f490048da2fbec5e4537eb6" [[projects]] name = "github.com/vulcand/predicate" @@ -1711,6 +1711,6 @@ [solve-meta] analyzer-name = "dep" analyzer-version = 1 - inputs-digest = "63fb25f0e549ec7942fda4d11c25e04bdf756dcc44d31e897b103f2270dc42d9" + inputs-digest = "593c88b41d6384d68bd610a8c80c39017e77584f4e3454b2ca5c26ee904bf1da" solver-name = "gps-cdcl" solver-version = 1 diff --git a/Gopkg.toml b/Gopkg.toml index af0304ae4..235a38871 100644 --- a/Gopkg.toml +++ b/Gopkg.toml @@ -62,7 +62,7 @@ [[constraint]] name = "github.com/containous/staert" - version = "3.1.0" + version = "3.1.1" [[constraint]] name = "github.com/containous/traefik-extra-service-fabric" diff --git a/docs/configuration/acme.md b/docs/configuration/acme.md index 5ba423b65..44e7cf465 100644 --- a/docs/configuration/acme.md +++ b/docs/configuration/acme.md @@ -206,7 +206,7 @@ Here is a list of supported `provider`s, that can automate the DNS verification, | [Exoscale](https://www.exoscale.ch) | `exoscale` | `EXOSCALE_API_KEY`, `EXOSCALE_API_SECRET`, `EXOSCALE_ENDPOINT` | YES | | [Fast DNS](https://www.akamai.com/) | `fastdns` | `AKAMAI_CLIENT_TOKEN`, `AKAMAI_CLIENT_SECRET`, `AKAMAI_ACCESS_TOKEN` | Not tested yet | | [Gandi](https://www.gandi.net) | `gandi` | `GANDI_API_KEY` | Not tested yet | -| [Gandi V5](http://doc.livedns.gandi.net) | `gandiv5` | `GANDIV5_API_KEY` | Not tested yet | +| [Gandi V5](http://doc.livedns.gandi.net) | `gandiv5` | `GANDIV5_API_KEY` | YES | | [Glesys](https://glesys.com/) | `glesys` | `GLESYS_API_USER`, `GLESYS_API_KEY`, `GLESYS_DOMAIN` | Not tested yet | | [GoDaddy](https://godaddy.com/domains) | `godaddy` | `GODADDY_API_KEY`, `GODADDY_API_SECRET` | Not tested yet | | [Google Cloud DNS](https://cloud.google.com/dns/docs/) | `gcloud` | `GCE_PROJECT`, `GCE_SERVICE_ACCOUNT_FILE` | YES | diff --git a/docs/user-guide/swarm-mode.md b/docs/user-guide/swarm-mode.md index 0c36d269b..3d713127c 100644 --- a/docs/user-guide/swarm-mode.md +++ b/docs/user-guide/swarm-mode.md @@ -102,7 +102,7 @@ Let's explain this command: | `--mount type=bind,source=/var/run/docker.sock,target=/var/run/docker.sock` | we bind mount the docker socket where Træfik is scheduled to be able to speak to the daemon. | | `--network traefik-net` | we attach the Træfik service (and thus the underlying container) to the `traefik-net` network. | | `--docker` | enable docker provider, and `--docker.swarmMode` to enable the swarm mode on Træfik. | -| `--api | activate the webUI on port 8080 | +| `--api` | activate the webUI on port 8080 | ## Deploy your apps diff --git a/middlewares/pipelining/pipelining.go b/middlewares/pipelining/pipelining.go new file mode 100644 index 000000000..ce06d79c9 --- /dev/null +++ b/middlewares/pipelining/pipelining.go @@ -0,0 +1,62 @@ +package pipelining + +import ( + "bufio" + "net" + "net/http" +) + +// Pipelining returns a middleware +type Pipelining struct { + next http.Handler +} + +// NewPipelining returns a new Pipelining instance +func NewPipelining(next http.Handler) *Pipelining { + return &Pipelining{ + next: next, + } +} + +func (p *Pipelining) ServeHTTP(rw http.ResponseWriter, r *http.Request) { + // https://github.com/golang/go/blob/3d59583836630cf13ec4bfbed977d27b1b7adbdc/src/net/http/server.go#L201-L218 + if r.Method == http.MethodPut || r.Method == http.MethodPost { + p.next.ServeHTTP(rw, r) + } else { + p.next.ServeHTTP(&writerWithoutCloseNotify{rw}, r) + } + +} + +// writerWithoutCloseNotify helps to disable closeNotify +type writerWithoutCloseNotify struct { + W http.ResponseWriter +} + +// Header returns the response headers. +func (w *writerWithoutCloseNotify) Header() http.Header { + return w.W.Header() +} + +// Write writes the data to the connection as part of an HTTP reply. +func (w *writerWithoutCloseNotify) Write(buf []byte) (int, error) { + return w.W.Write(buf) +} + +// WriteHeader sends an HTTP response header with the provided +// status code. +func (w *writerWithoutCloseNotify) WriteHeader(code int) { + w.W.WriteHeader(code) +} + +// Flush sends any buffered data to the client. +func (w *writerWithoutCloseNotify) Flush() { + if f, ok := w.W.(http.Flusher); ok { + f.Flush() + } +} + +// Hijack hijacks the connection. +func (w *writerWithoutCloseNotify) Hijack() (net.Conn, *bufio.ReadWriter, error) { + return w.W.(http.Hijacker).Hijack() +} diff --git a/middlewares/pipelining/pipelining_test.go b/middlewares/pipelining/pipelining_test.go new file mode 100644 index 000000000..b5b327a41 --- /dev/null +++ b/middlewares/pipelining/pipelining_test.go @@ -0,0 +1,69 @@ +package pipelining + +import ( + "net/http" + "net/http/httptest" + "testing" + + "github.com/stretchr/testify/assert" +) + +type recorderWithCloseNotify struct { + *httptest.ResponseRecorder +} + +func (r *recorderWithCloseNotify) CloseNotify() <-chan bool { + panic("implement me") +} + +func TestNewPipelining(t *testing.T) { + testCases := []struct { + desc string + HTTPMethod string + implementCloseNotifier bool + }{ + { + desc: "should not implement CloseNotifier with GET method", + HTTPMethod: http.MethodGet, + implementCloseNotifier: false, + }, + { + desc: "should implement CloseNotifier with PUT method", + HTTPMethod: http.MethodPut, + implementCloseNotifier: true, + }, + { + desc: "should implement CloseNotifier with POST method", + HTTPMethod: http.MethodPost, + implementCloseNotifier: true, + }, + { + desc: "should not implement CloseNotifier with GET method", + HTTPMethod: http.MethodHead, + implementCloseNotifier: false, + }, + { + desc: "should not implement CloseNotifier with PROPFIND method", + HTTPMethod: "PROPFIND", + implementCloseNotifier: false, + }, + } + + for _, test := range testCases { + test := test + t.Run(test.desc, func(t *testing.T) { + t.Parallel() + + nextHandler := http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + _, ok := w.(http.CloseNotifier) + assert.Equal(t, test.implementCloseNotifier, ok) + w.WriteHeader(http.StatusOK) + }) + handler := NewPipelining(nextHandler) + + req := httptest.NewRequest(test.HTTPMethod, "http://localhost", nil) + + handler.ServeHTTP(&recorderWithCloseNotify{httptest.NewRecorder()}, req) + }) + } +} diff --git a/provider/docker/config.go b/provider/docker/config.go index d8e6e99aa..75331c97c 100644 --- a/provider/docker/config.go +++ b/provider/docker/config.go @@ -2,6 +2,8 @@ package docker import ( "context" + "crypto/md5" + "encoding/hex" "fmt" "net" "strconv" @@ -107,13 +109,11 @@ func (p *Provider) buildConfigurationV2(containersInspected []dockerData) *types } func getServiceNameKey(container dockerData, swarmMode bool, segmentName string) string { - serviceNameKey := container.ServiceName - - if values, err := label.GetStringMultipleStrict(container.Labels, labelDockerComposeProject, labelDockerComposeService); !swarmMode && err == nil { - serviceNameKey = values[labelDockerComposeService] + values[labelDockerComposeProject] + if swarmMode { + return container.ServiceName + segmentName } - return serviceNameKey + segmentName + return getServiceName(container) + segmentName } func (p *Provider) containerFilter(container dockerData) bool { @@ -170,7 +170,7 @@ func checkSegmentPort(labels map[string]string, segmentName string) error { func (p *Provider) getFrontendName(container dockerData, idx int) string { var name string if len(container.SegmentName) > 0 { - name = getBackendName(container) + name = container.SegmentName + "-" + getBackendName(container) } else { name = p.getFrontendRule(container, container.SegmentLabels) + "-" + strconv.Itoa(idx) } @@ -262,17 +262,21 @@ func isBackendLBSwarm(container dockerData) bool { return label.GetBoolValue(container.Labels, labelBackendLoadBalancerSwarm, false) } -func getSegmentBackendName(container dockerData) string { - serviceName := container.ServiceName - if values, err := label.GetStringMultipleStrict(container.Labels, labelDockerComposeProject, labelDockerComposeService); err == nil { - serviceName = provider.Normalize(values[labelDockerComposeService] + "_" + values[labelDockerComposeProject]) +func getBackendName(container dockerData) string { + if len(container.SegmentName) > 0 { + return getSegmentBackendName(container) } + return getDefaultBackendName(container) +} + +func getSegmentBackendName(container dockerData) string { + serviceName := getServiceName(container) if value := label.GetStringValue(container.SegmentLabels, label.TraefikBackend, ""); len(value) > 0 { return provider.Normalize(serviceName + "-" + value) } - return provider.Normalize(serviceName + "-" + getDefaultBackendName(container) + "-" + container.SegmentName) + return provider.Normalize(serviceName + "-" + container.SegmentName) } func getDefaultBackendName(container dockerData) string { @@ -280,19 +284,17 @@ func getDefaultBackendName(container dockerData) string { return provider.Normalize(value) } - if values, err := label.GetStringMultipleStrict(container.Labels, labelDockerComposeProject, labelDockerComposeService); err == nil { - return provider.Normalize(values[labelDockerComposeService] + "_" + values[labelDockerComposeProject]) - } - - return provider.Normalize(container.ServiceName) + return provider.Normalize(getServiceName(container)) } -func getBackendName(container dockerData) string { - if len(container.SegmentName) > 0 { - return getSegmentBackendName(container) +func getServiceName(container dockerData) string { + serviceName := container.ServiceName + + if values, err := label.GetStringMultipleStrict(container.Labels, labelDockerComposeProject, labelDockerComposeService); err == nil { + serviceName = values[labelDockerComposeService] + "_" + values[labelDockerComposeProject] } - return getDefaultBackendName(container) + return serviceName } func getPort(container dockerData) string { @@ -322,7 +324,7 @@ func getPort(container dockerData) string { func (p *Provider) getServers(containers []dockerData) map[string]types.Server { var servers map[string]types.Server - for i, container := range containers { + for _, container := range containers { ip := p.getIPAddress(container) if len(ip) == 0 { log.Warnf("Unable to find the IP address for the container %q: the server is ignored.", container.Name) @@ -336,16 +338,30 @@ func (p *Provider) getServers(containers []dockerData) map[string]types.Server { protocol := label.GetStringValue(container.SegmentLabels, label.TraefikProtocol, label.DefaultProtocol) port := getPort(container) - serverName := "server-" + container.SegmentName + "-" + container.Name - if len(container.SegmentName) > 0 { - serverName += "-" + strconv.Itoa(i) + serverURL := fmt.Sprintf("%s://%s", protocol, net.JoinHostPort(ip, port)) + + serverName := getServerName(container.Name, serverURL) + if _, exist := servers[serverName]; exist { + log.Debugf("Skipping server %q with the same URL.", serverName) + continue } - servers[provider.Normalize(serverName)] = types.Server{ - URL: fmt.Sprintf("%s://%s", protocol, net.JoinHostPort(ip, port)), + servers[serverName] = types.Server{ + URL: serverURL, Weight: label.GetIntValue(container.SegmentLabels, label.TraefikWeight, label.DefaultWeight), } } return servers } + +func getServerName(containerName, url string) string { + hash := md5.New() + _, err := hash.Write([]byte(url)) + if err != nil { + // Impossible case + log.Errorf("Fail to hash server URL %q", url) + } + + return provider.Normalize("server-" + containerName + "-" + hex.EncodeToString(hash.Sum(nil))) +} diff --git a/provider/docker/config_container_docker_test.go b/provider/docker/config_container_docker_test.go index d9d1201f8..50cc52676 100644 --- a/provider/docker/config_container_docker_test.go +++ b/provider/docker/config_container_docker_test.go @@ -55,7 +55,7 @@ func TestDockerBuildConfiguration(t *testing.T) { expectedBackends: map[string]*types.Backend{ "backend-test": { Servers: map[string]types.Server{ - "server-test": { + "server-test-842895ca2aca17f6ee36ddb2f621194d": { URL: "http://127.0.0.1:80", Weight: label.DefaultWeight, }, @@ -91,7 +91,7 @@ func TestDockerBuildConfiguration(t *testing.T) { expectedBackends: map[string]*types.Backend{ "backend-test": { Servers: map[string]types.Server{ - "server-test": { + "server-test-48093b9fc43454203aacd2bc4057a08c": { URL: "http://127.0.0.2:80", Weight: label.DefaultWeight, }, @@ -132,7 +132,7 @@ func TestDockerBuildConfiguration(t *testing.T) { expectedBackends: map[string]*types.Backend{ "backend-test": { Servers: map[string]types.Server{ - "server-test": { + "server-test-405767e9733427148cd8dae6c4d331b0": { URL: "http://127.0.0.3:80", Weight: label.DefaultWeight, }, @@ -352,7 +352,7 @@ func TestDockerBuildConfiguration(t *testing.T) { expectedBackends: map[string]*types.Backend{ "backend-foobar": { Servers: map[string]types.Server{ - "server-test1": { + "server-test1-7f6444e0dff3330c8b0ad2bbbd383b0f": { URL: "https://127.0.0.1:666", Weight: 12, }, @@ -460,10 +460,11 @@ func TestDockerBuildConfiguration(t *testing.T) { expectedBackends: map[string]*types.Backend{ "backend-myService-myProject": { Servers: map[string]types.Server{ - "server-test-0": { + "server-test-0-842895ca2aca17f6ee36ddb2f621194d": { URL: "http://127.0.0.1:80", Weight: label.DefaultWeight, - }, "server-test-1": { + }, + "server-test-1-48093b9fc43454203aacd2bc4057a08c": { URL: "http://127.0.0.2:80", Weight: label.DefaultWeight, }, @@ -472,7 +473,7 @@ func TestDockerBuildConfiguration(t *testing.T) { }, "backend-myService2-myProject": { Servers: map[string]types.Server{ - "server-test-2": { + "server-test-2-405767e9733427148cd8dae6c4d331b0": { URL: "http://127.0.0.3:80", Weight: label.DefaultWeight, }, @@ -1164,7 +1165,7 @@ func TestDockerGetServers(t *testing.T) { })), }, expected: map[string]types.Server{ - "server-test1": { + "server-test1-fb00f762970935200c76ccdaf91458f6": { URL: "http://10.10.10.10:80", Weight: 1, }, @@ -1193,15 +1194,15 @@ func TestDockerGetServers(t *testing.T) { })), }, expected: map[string]types.Server{ - "server-test1": { + "server-test1-743440b6f4a8ffd8737626215f2c5a33": { URL: "http://10.10.10.11:80", Weight: 1, }, - "server-test2": { + "server-test2-547f74bbb5da02b6c8141ce9aa96c13b": { URL: "http://10.10.10.12:81", Weight: 1, }, - "server-test3": { + "server-test3-c57fd8b848c814a3f2a4a4c12e13c179": { URL: "http://10.10.10.13:82", Weight: 1, }, @@ -1230,11 +1231,11 @@ func TestDockerGetServers(t *testing.T) { })), }, expected: map[string]types.Server{ - "server-test2": { + "server-test2-547f74bbb5da02b6c8141ce9aa96c13b": { URL: "http://10.10.10.12:81", Weight: 1, }, - "server-test3": { + "server-test3-c57fd8b848c814a3f2a4a4c12e13c179": { URL: "http://10.10.10.13:82", Weight: 1, }, diff --git a/provider/docker/config_container_swarm_test.go b/provider/docker/config_container_swarm_test.go index f70a77da2..ad982ecfd 100644 --- a/provider/docker/config_container_swarm_test.go +++ b/provider/docker/config_container_swarm_test.go @@ -57,7 +57,7 @@ func TestSwarmBuildConfiguration(t *testing.T) { expectedBackends: map[string]*types.Backend{ "backend-test": { Servers: map[string]types.Server{ - "server-test": { + "server-test-842895ca2aca17f6ee36ddb2f621194d": { URL: "http://127.0.0.1:80", Weight: label.DefaultWeight, }, @@ -243,7 +243,6 @@ func TestSwarmBuildConfiguration(t *testing.T) { ReferrerPolicy: "foo", IsDevelopment: true, }, - Errors: map[string]*types.ErrorPage{ "foo": { Status: []string{"404"}, @@ -281,7 +280,7 @@ func TestSwarmBuildConfiguration(t *testing.T) { expectedBackends: map[string]*types.Backend{ "backend-foobar": { Servers: map[string]types.Server{ - "server-test1": { + "server-test1-7f6444e0dff3330c8b0ad2bbbd383b0f": { URL: "https://127.0.0.1:666", Weight: 12, }, diff --git a/provider/docker/config_segment_test.go b/provider/docker/config_segment_test.go index 23350c4a6..c9c00e71c 100644 --- a/provider/docker/config_segment_test.go +++ b/provider/docker/config_segment_test.go @@ -42,22 +42,22 @@ func TestSegmentBuildConfiguration(t *testing.T) { ), }, expectedFrontends: map[string]*types.Frontend{ - "frontend-foo-foo-sauternes": { - Backend: "backend-foo-foo-sauternes", + "frontend-sauternes-foo-sauternes": { + Backend: "backend-foo-sauternes", PassHostHeader: true, EntryPoints: []string{"http", "https"}, BasicAuth: []string{}, Routes: map[string]types.Route{ - "route-frontend-foo-foo-sauternes": { + "route-frontend-sauternes-foo-sauternes": { Rule: "Host:foo.docker.localhost", }, }, }, }, expectedBackends: map[string]*types.Backend{ - "backend-foo-foo-sauternes": { + "backend-foo-sauternes": { Servers: map[string]types.Server{ - "server-sauternes-foo-0": { + "server-foo-863563a2e23c95502862016417ee95ea": { URL: "http://127.0.0.1:2503", Weight: label.DefaultWeight, }, @@ -133,8 +133,8 @@ func TestSegmentBuildConfiguration(t *testing.T) { ), }, expectedFrontends: map[string]*types.Frontend{ - "frontend-foo-foo-sauternes": { - Backend: "backend-foo-foo-sauternes", + "frontend-sauternes-foo-sauternes": { + Backend: "backend-foo-sauternes", EntryPoints: []string{ "http", "https", @@ -226,16 +226,16 @@ func TestSegmentBuildConfiguration(t *testing.T) { }, Routes: map[string]types.Route{ - "route-frontend-foo-foo-sauternes": { + "route-frontend-sauternes-foo-sauternes": { Rule: "Host:foo.docker.localhost", }, }, }, }, expectedBackends: map[string]*types.Backend{ - "backend-foo-foo-sauternes": { + "backend-foo-sauternes": { Servers: map[string]types.Server{ - "server-sauternes-foo-0": { + "server-foo-7f6444e0dff3330c8b0ad2bbbd383b0f": { URL: "https://127.0.0.1:666", Weight: 12, }, @@ -280,7 +280,7 @@ func TestSegmentBuildConfiguration(t *testing.T) { ), }, expectedFrontends: map[string]*types.Frontend{ - "frontend-test1-foobar": { + "frontend-sauternes-test1-foobar": { Backend: "backend-test1-foobar", PassHostHeader: false, Priority: 5000, @@ -290,18 +290,18 @@ func TestSegmentBuildConfiguration(t *testing.T) { EntryPoint: "https", }, Routes: map[string]types.Route{ - "route-frontend-test1-foobar": { + "route-frontend-sauternes-test1-foobar": { Rule: "Path:/mypath", }, }, }, - "frontend-test2-test2-anothersauternes": { - Backend: "backend-test2-test2-anothersauternes", + "frontend-anothersauternes-test2-anothersauternes": { + Backend: "backend-test2-anothersauternes", PassHostHeader: true, EntryPoints: []string{}, BasicAuth: []string{}, Routes: map[string]types.Route{ - "route-frontend-test2-test2-anothersauternes": { + "route-frontend-anothersauternes-test2-anothersauternes": { Rule: "Path:/anotherpath", }, }, @@ -310,16 +310,16 @@ func TestSegmentBuildConfiguration(t *testing.T) { expectedBackends: map[string]*types.Backend{ "backend-test1-foobar": { Servers: map[string]types.Server{ - "server-sauternes-test1-0": { + "server-test1-79533a101142718f0fdf84c42593c41e": { URL: "https://127.0.0.1:2503", Weight: 80, }, }, CircuitBreaker: nil, }, - "backend-test2-test2-anothersauternes": { + "backend-test2-anothersauternes": { Servers: map[string]types.Server{ - "server-anothersauternes-test2-0": { + "server-test2-e9c1b66f9af919aa46053fbc2391bb4a": { URL: "http://127.0.0.1:8079", Weight: 33, }, @@ -328,6 +328,152 @@ func TestSegmentBuildConfiguration(t *testing.T) { }, }, }, + { + desc: "several segments with the same backend name and same port", + containers: []docker.ContainerJSON{ + containerJSON( + name("test1"), + labels(map[string]string{ + "traefik.port": "2503", + "traefik.protocol": "https", + "traefik.weight": "80", + "traefik.frontend.entryPoints": "http,https", + "traefik.frontend.redirect.entryPoint": "https", + + "traefik.sauternes.backend": "foobar", + "traefik.sauternes.frontend.rule": "Path:/sauternes", + "traefik.sauternes.frontend.priority": "5000", + + "traefik.arbois.backend": "foobar", + "traefik.arbois.frontend.rule": "Path:/arbois", + "traefik.arbois.frontend.priority": "3000", + }), + ports(nat.PortMap{ + "80/tcp": {}, + }), + withNetwork("bridge", ipv4("127.0.0.1")), + ), + }, + expectedFrontends: map[string]*types.Frontend{ + "frontend-sauternes-test1-foobar": { + Backend: "backend-test1-foobar", + PassHostHeader: true, + Priority: 5000, + EntryPoints: []string{"http", "https"}, + BasicAuth: []string{}, + Redirect: &types.Redirect{ + EntryPoint: "https", + }, + Routes: map[string]types.Route{ + "route-frontend-sauternes-test1-foobar": { + Rule: "Path:/sauternes", + }, + }, + }, + "frontend-arbois-test1-foobar": { + Backend: "backend-test1-foobar", + PassHostHeader: true, + Priority: 3000, + EntryPoints: []string{"http", "https"}, + BasicAuth: []string{}, + Redirect: &types.Redirect{ + EntryPoint: "https", + }, + Routes: map[string]types.Route{ + "route-frontend-arbois-test1-foobar": { + Rule: "Path:/arbois", + }, + }, + }, + }, + expectedBackends: map[string]*types.Backend{ + "backend-test1-foobar": { + Servers: map[string]types.Server{ + "server-test1-79533a101142718f0fdf84c42593c41e": { + URL: "https://127.0.0.1:2503", + Weight: 80, + }, + }, + CircuitBreaker: nil, + }, + }, + }, + { + desc: "several segments with the same backend name and different port (wrong behavior)", + containers: []docker.ContainerJSON{ + containerJSON( + name("test1"), + labels(map[string]string{ + "traefik.protocol": "https", + "traefik.frontend.entryPoints": "http,https", + "traefik.frontend.redirect.entryPoint": "https", + + "traefik.sauternes.port": "2503", + "traefik.sauternes.weight": "80", + "traefik.sauternes.backend": "foobar", + "traefik.sauternes.frontend.rule": "Path:/sauternes", + "traefik.sauternes.frontend.priority": "5000", + + "traefik.arbois.port": "2504", + "traefik.arbois.weight": "90", + "traefik.arbois.backend": "foobar", + "traefik.arbois.frontend.rule": "Path:/arbois", + "traefik.arbois.frontend.priority": "3000", + }), + ports(nat.PortMap{ + "80/tcp": {}, + }), + withNetwork("bridge", ipv4("127.0.0.1")), + ), + }, + expectedFrontends: map[string]*types.Frontend{ + "frontend-sauternes-test1-foobar": { + Backend: "backend-test1-foobar", + PassHostHeader: true, + Priority: 5000, + EntryPoints: []string{"http", "https"}, + BasicAuth: []string{}, + Redirect: &types.Redirect{ + EntryPoint: "https", + }, + Routes: map[string]types.Route{ + "route-frontend-sauternes-test1-foobar": { + Rule: "Path:/sauternes", + }, + }, + }, + "frontend-arbois-test1-foobar": { + Backend: "backend-test1-foobar", + PassHostHeader: true, + Priority: 3000, + EntryPoints: []string{"http", "https"}, + BasicAuth: []string{}, + Redirect: &types.Redirect{ + EntryPoint: "https", + }, + Routes: map[string]types.Route{ + "route-frontend-arbois-test1-foobar": { + Rule: "Path:/arbois", + }, + }, + }, + }, + expectedBackends: map[string]*types.Backend{ + "backend-test1-foobar": { + Servers: map[string]types.Server{ + "server-test1-79533a101142718f0fdf84c42593c41e": { + URL: "https://127.0.0.1:2503", + Weight: 80, + }, + "server-test1-315a41140f1bd825b066e39686c18482": { + URL: "https://127.0.0.1:2504", + Weight: 90, + }, + }, + CircuitBreaker: nil, + }, + }, + }, } provider := &Provider{ diff --git a/server/server_configuration.go b/server/server_configuration.go index c8146e44a..ec9b904a8 100644 --- a/server/server_configuration.go +++ b/server/server_configuration.go @@ -16,6 +16,7 @@ import ( "github.com/containous/traefik/log" "github.com/containous/traefik/metrics" "github.com/containous/traefik/middlewares" + "github.com/containous/traefik/middlewares/pipelining" "github.com/containous/traefik/rules" "github.com/containous/traefik/safe" traefiktls "github.com/containous/traefik/tls" @@ -254,6 +255,8 @@ func (s *Server) buildForwarder(entryPointName string, entryPoint *configuration }) } + fwd = pipelining.NewPipelining(fwd) + return fwd, nil } diff --git a/vendor/github.com/containous/staert/kv.go b/vendor/github.com/containous/staert/kv.go index bec34d173..60ad3c2d6 100644 --- a/vendor/github.com/containous/staert/kv.go +++ b/vendor/github.com/containous/staert/kv.go @@ -46,16 +46,16 @@ func (kv *KvSource) Parse(cmd *flaeg.Command) (*flaeg.Command, error) { // LoadConfig loads data from the KV Store into the config structure (given by reference) func (kv *KvSource) LoadConfig(config interface{}) error { - pairs := map[string][]byte{} - if err := kv.ListRecursive(kv.Prefix, pairs); err != nil { + pairs, err := kv.ListValuedPairWithPrefix(kv.Prefix) + if err != nil { return err } - // fmt.Printf("pairs : %#v\n", pairs) + mapStruct, err := generateMapstructure(convertPairs(pairs), kv.Prefix) if err != nil { return err } - // fmt.Printf("mapStruct : %#v\n", mapStruct) + configDecoder := &mapstructure.DecoderConfig{ Metadata: nil, Result: config, @@ -77,11 +77,11 @@ func generateMapstructure(pairs []*store.KVPair, prefix string) (map[string]inte for _, p := range pairs { // Trim the prefix off our key first key := strings.TrimPrefix(strings.Trim(p.Key, "/"), strings.Trim(prefix, "/")+"/") - raw, err := processKV(key, p.Value, raw) + var err error + raw, err = processKV(key, p.Value, raw) if err != nil { return raw, err } - } return raw, nil } @@ -313,15 +313,23 @@ func collateKvRecursive(objValue reflect.Value, kv map[string]string, key string func writeCompressedData(data []byte) (string, error) { var buffer bytes.Buffer gzipWriter := gzip.NewWriter(&buffer) + _, err := gzipWriter.Write(data) if err != nil { return "", err } - gzipWriter.Close() + + err = gzipWriter.Close() + if err != nil { + return "", err + } + return buffer.String(), nil } // ListRecursive lists all key value children under key +// Replaced by ListValuedPairWithPrefix +// Deprecated func (kv *KvSource) ListRecursive(key string, pairs map[string][]byte) error { pairsN1, err := kv.List(key, nil) if err == store.ErrKeyNotFound { @@ -342,14 +350,37 @@ func (kv *KvSource) ListRecursive(key string, pairs map[string][]byte) error { return nil } for _, p := range pairsN1 { - err := kv.ListRecursive(p.Key, pairs) - if err != nil { - return err + if p.Key != key { + err := kv.ListRecursive(p.Key, pairs) + if err != nil { + return err + } } } return nil } +// ListValuedPairWithPrefix lists all key value children under key +func (kv *KvSource) ListValuedPairWithPrefix(key string) (map[string][]byte, error) { + pairs := make(map[string][]byte) + + pairsN1, err := kv.List(key, nil) + if err == store.ErrKeyNotFound { + return pairs, nil + } + if err != nil { + return pairs, err + } + + for _, p := range pairsN1 { + if len(p.Value) > 0 { + pairs[p.Key] = p.Value + } + } + + return pairs, nil +} + func convertPairs(pairs map[string][]byte) []*store.KVPair { slicePairs := make([]*store.KVPair, len(pairs)) i := 0 diff --git a/vendor/github.com/containous/staert/staert.go b/vendor/github.com/containous/staert/staert.go index cf56d39ee..fa2fa6f14 100644 --- a/vendor/github.com/containous/staert/staert.go +++ b/vendor/github.com/containous/staert/staert.go @@ -2,12 +2,8 @@ package staert import ( "fmt" - "os" - "path/filepath" "reflect" - "strings" - "github.com/BurntSushi/toml" "github.com/containous/flaeg" ) @@ -24,10 +20,7 @@ type Staert struct { // NewStaert creates and return a pointer on Staert. Need defaultConfig and defaultPointersConfig given by references func NewStaert(rootCommand *flaeg.Command) *Staert { - s := Staert{ - command: rootCommand, - } - return &s + return &Staert{command: rootCommand} } // AddSource adds new Source to Staert, give it by reference @@ -35,40 +28,31 @@ func (s *Staert) AddSource(src Source) { s.sources = append(s.sources, src) } -// getConfig for a flaeg.Command run sources Parse func in the raw -func (s *Staert) parseConfigAllSources(cmd *flaeg.Command) error { - for _, src := range s.sources { - var err error - _, err = src.Parse(cmd) - if err != nil { - return err - } - } - return nil -} - // LoadConfig check which command is called and parses config // It returns the the parsed config or an error if it fails func (s *Staert) LoadConfig() (interface{}, error) { for _, src := range s.sources { - //Type assertion - f, ok := src.(*flaeg.Flaeg) - if ok { - if fCmd, err := f.GetCommand(); err != nil { + // Type assertion + if flg, ok := src.(*flaeg.Flaeg); ok { + fCmd, err := flg.GetCommand() + if err != nil { return nil, err - } else if s.command != fCmd { - //IF fleag sub-command + } + + // if fleag sub-command + if s.command != fCmd { + // if parseAllSources if fCmd.Metadata["parseAllSources"] == "true" { - //IF parseAllSources fCmdConfigType := reflect.TypeOf(fCmd.Config) sCmdConfigType := reflect.TypeOf(s.command.Config) if fCmdConfigType != sCmdConfigType { - return nil, fmt.Errorf("command %s : Config type doesn't match with root command config type. Expected %s got %s", fCmd.Name, sCmdConfigType.Name(), fCmdConfigType.Name()) + return nil, fmt.Errorf("command %s : Config type doesn't match with root command config type. Expected %s got %s", + fCmd.Name, sCmdConfigType.Name(), fCmdConfigType.Name()) } s.command = fCmd } else { - // ELSE (not parseAllSources) - s.command, err = f.Parse(fCmd) + // (not parseAllSources) + s.command, err = flg.Parse(fCmd) return s.command.Config, err } } @@ -78,117 +62,19 @@ func (s *Staert) LoadConfig() (interface{}, error) { return s.command.Config, err } +// parseConfigAllSources getConfig for a flaeg.Command run sources Parse func in the raw +func (s *Staert) parseConfigAllSources(cmd *flaeg.Command) error { + for _, src := range s.sources { + _, err := src.Parse(cmd) + if err != nil { + return err + } + } + return nil +} + // Run calls the Run func of the command // Warning, Run doesn't parse the config func (s *Staert) Run() error { return s.command.Run() } - -//TomlSource impement Source -type TomlSource struct { - filename string - dirNfullpath []string - fullpath string -} - -// NewTomlSource creates and return a pointer on TomlSource. -// Parameter filename is the file name (without extension type, ".toml" will be added) -// dirNfullpath may contain directories or fullpath to the file. -func NewTomlSource(filename string, dirNfullpath []string) *TomlSource { - return &TomlSource{filename, dirNfullpath, ""} -} - -// ConfigFileUsed return config file used -func (ts *TomlSource) ConfigFileUsed() string { - return ts.fullpath -} - -func preprocessDir(dirIn string) (string, error) { - dirOut := dirIn - expanded := os.ExpandEnv(dirIn) - dirOut, err := filepath.Abs(expanded) - return dirOut, err -} - -func findFile(filename string, dirNfile []string) string { - for _, df := range dirNfile { - if df != "" { - fullPath, _ := preprocessDir(df) - if fileInfo, err := os.Stat(fullPath); err == nil && !fileInfo.IsDir() { - return fullPath - } - - fullPath = filepath.Join(fullPath, filename+".toml") - if fileInfo, err := os.Stat(fullPath); err == nil && !fileInfo.IsDir() { - return fullPath - } - } - } - return "" -} - -// Parse calls toml.DecodeFile() func -func (ts *TomlSource) Parse(cmd *flaeg.Command) (*flaeg.Command, error) { - ts.fullpath = findFile(ts.filename, ts.dirNfullpath) - if len(ts.fullpath) < 2 { - return cmd, nil - } - metadata, err := toml.DecodeFile(ts.fullpath, cmd.Config) - if err != nil { - return nil, err - } - boolFlags, err := flaeg.GetBoolFlags(cmd.Config) - if err != nil { - return nil, err - } - flaegArgs, hasUnderField, err := generateArgs(metadata, boolFlags) - if err != nil { - return nil, err - } - - // fmt.Println(flaegArgs) - err = flaeg.Load(cmd.Config, cmd.DefaultPointersConfig, flaegArgs) - //if err!= missing parser err - if err != nil && err != flaeg.ErrParserNotFound { - return nil, err - } - if hasUnderField { - _, err := toml.DecodeFile(ts.fullpath, cmd.Config) - if err != nil { - return nil, err - } - } - - return cmd, nil -} - -func generateArgs(metadata toml.MetaData, flags []string) ([]string, bool, error) { - var flaegArgs []string - keys := metadata.Keys() - hasUnderField := false - for i, key := range keys { - // fmt.Println(key) - if metadata.Type(key.String()) == "Hash" { - // TOML hashes correspond to Go structs or maps. - // fmt.Printf("%s could be a ptr on a struct, or a map\n", key) - for j := i; j < len(keys); j++ { - // fmt.Printf("%s =? %s\n", keys[j].String(), "."+key.String()) - if strings.Contains(keys[j].String(), key.String()+".") { - hasUnderField = true - break - } - } - match := false - for _, flag := range flags { - if flag == strings.ToLower(key.String()) { - match = true - break - } - } - if match { - flaegArgs = append(flaegArgs, "--"+strings.ToLower(key.String())) - } - } - } - return flaegArgs, hasUnderField, nil -} diff --git a/vendor/github.com/containous/staert/toml.go b/vendor/github.com/containous/staert/toml.go new file mode 100644 index 000000000..9f6fb7a23 --- /dev/null +++ b/vendor/github.com/containous/staert/toml.go @@ -0,0 +1,121 @@ +package staert + +import ( + "os" + "path/filepath" + "strings" + + "github.com/BurntSushi/toml" + "github.com/containous/flaeg" +) + +var _ Source = (*TomlSource)(nil) + +// TomlSource implement staert.Source +type TomlSource struct { + filename string + dirNFullPath []string + fullPath string +} + +// NewTomlSource creates and return a pointer on Source. +// Parameter filename is the file name (without extension type, ".toml" will be added) +// dirNFullPath may contain directories or fullPath to the file. +func NewTomlSource(filename string, dirNFullPath []string) *TomlSource { + return &TomlSource{filename, dirNFullPath, ""} +} + +// ConfigFileUsed return config file used +func (ts *TomlSource) ConfigFileUsed() string { + return ts.fullPath +} + +// Parse calls toml.DecodeFile() func +func (ts *TomlSource) Parse(cmd *flaeg.Command) (*flaeg.Command, error) { + ts.fullPath = findFile(ts.filename, ts.dirNFullPath) + if len(ts.fullPath) < 2 { + return cmd, nil + } + + metadata, err := toml.DecodeFile(ts.fullPath, cmd.Config) + if err != nil { + return nil, err + } + + boolFlags, err := flaeg.GetBoolFlags(cmd.Config) + if err != nil { + return nil, err + } + + flgArgs, hasUnderField, err := generateArgs(metadata, boolFlags) + if err != nil { + return nil, err + } + + err = flaeg.Load(cmd.Config, cmd.DefaultPointersConfig, flgArgs) + if err != nil && err != flaeg.ErrParserNotFound { + return nil, err + } + + if hasUnderField { + _, err := toml.DecodeFile(ts.fullPath, cmd.Config) + if err != nil { + return nil, err + } + } + + return cmd, nil +} + +func preProcessDir(dirIn string) (string, error) { + expanded := os.ExpandEnv(dirIn) + return filepath.Abs(expanded) +} + +func findFile(filename string, dirNFile []string) string { + for _, df := range dirNFile { + if df != "" { + fullPath, _ := preProcessDir(df) + if fileInfo, err := os.Stat(fullPath); err == nil && !fileInfo.IsDir() { + return fullPath + } + + fullPath = filepath.Join(fullPath, filename+".toml") + if fileInfo, err := os.Stat(fullPath); err == nil && !fileInfo.IsDir() { + return fullPath + } + } + } + return "" +} + +func generateArgs(metadata toml.MetaData, flags []string) ([]string, bool, error) { + var flgArgs []string + keys := metadata.Keys() + hasUnderField := false + + for i, key := range keys { + if metadata.Type(key.String()) == "Hash" { + // TOML hashes correspond to Go structs or maps. + for j := i; j < len(keys); j++ { + if strings.Contains(keys[j].String(), key.String()+".") { + hasUnderField = true + break + } + } + + match := false + for _, flag := range flags { + if flag == strings.ToLower(key.String()) { + match = true + break + } + } + if match { + flgArgs = append(flgArgs, "--"+strings.ToLower(key.String())) + } + } + } + + return flgArgs, hasUnderField, nil +} diff --git a/vendor/github.com/vulcand/oxy/cbreaker/cbreaker.go b/vendor/github.com/vulcand/oxy/cbreaker/cbreaker.go index e7f92f71f..5991a8474 100644 --- a/vendor/github.com/vulcand/oxy/cbreaker/cbreaker.go +++ b/vendor/github.com/vulcand/oxy/cbreaker/cbreaker.go @@ -156,7 +156,7 @@ func (c *CircuitBreaker) activateFallback(w http.ResponseWriter, req *http.Reque func (c *CircuitBreaker) serve(w http.ResponseWriter, req *http.Request) { start := c.clock.UtcNow() - p := utils.NewSimpleProxyWriter(w) + p := utils.NewProxyWriter(w) c.next.ServeHTTP(p, req) diff --git a/vendor/github.com/vulcand/oxy/forward/fwd.go b/vendor/github.com/vulcand/oxy/forward/fwd.go index 668cd920b..abeb3c08e 100644 --- a/vendor/github.com/vulcand/oxy/forward/fwd.go +++ b/vendor/github.com/vulcand/oxy/forward/fwd.go @@ -466,16 +466,6 @@ func (f *httpForwarder) serveHTTP(w http.ResponseWriter, inReq *http.Request, ct defer logEntry.Debug("vulcand/oxy/forward/http: completed ServeHttp on request") } - var pw utils.ProxyWriter - - // Disable closeNotify when method GET for http pipelining - // Waiting for https://github.com/golang/go/issues/23921 - if inReq.Method == http.MethodGet { - pw = utils.NewProxyWriterWithoutCloseNotify(w) - } else { - pw = utils.NewSimpleProxyWriter(w) - } - start := time.Now().UTC() outReq := new(http.Request) @@ -490,18 +480,24 @@ func (f *httpForwarder) serveHTTP(w http.ResponseWriter, inReq *http.Request, ct ModifyResponse: f.modifyResponse, BufferPool: f.bufferPool, } - revproxy.ServeHTTP(pw, outReq) - if inReq.TLS != nil { - f.log.Debugf("vulcand/oxy/forward/http: Round trip: %v, code: %v, Length: %v, duration: %v tls:version: %x, tls:resume:%t, tls:csuite:%x, tls:server:%v", - inReq.URL, pw.StatusCode(), pw.GetLength(), time.Now().UTC().Sub(start), - inReq.TLS.Version, - inReq.TLS.DidResume, - inReq.TLS.CipherSuite, - inReq.TLS.ServerName) + if f.log.GetLevel() >= log.DebugLevel { + pw := utils.NewProxyWriter(w) + revproxy.ServeHTTP(pw, outReq) + + if inReq.TLS != nil { + f.log.Debugf("vulcand/oxy/forward/http: Round trip: %v, code: %v, Length: %v, duration: %v tls:version: %x, tls:resume:%t, tls:csuite:%x, tls:server:%v", + inReq.URL, pw.StatusCode(), pw.GetLength(), time.Now().UTC().Sub(start), + inReq.TLS.Version, + inReq.TLS.DidResume, + inReq.TLS.CipherSuite, + inReq.TLS.ServerName) + } else { + f.log.Debugf("vulcand/oxy/forward/http: Round trip: %v, code: %v, Length: %v, duration: %v", + inReq.URL, pw.StatusCode(), pw.GetLength(), time.Now().UTC().Sub(start)) + } } else { - f.log.Debugf("vulcand/oxy/forward/http: Round trip: %v, code: %v, Length: %v, duration: %v", - inReq.URL, pw.StatusCode(), pw.GetLength(), time.Now().UTC().Sub(start)) + revproxy.ServeHTTP(w, outReq) } } diff --git a/vendor/github.com/vulcand/oxy/roundrobin/rebalancer.go b/vendor/github.com/vulcand/oxy/roundrobin/rebalancer.go index 81f916b74..fec74d26b 100644 --- a/vendor/github.com/vulcand/oxy/roundrobin/rebalancer.go +++ b/vendor/github.com/vulcand/oxy/roundrobin/rebalancer.go @@ -148,7 +148,7 @@ func (rb *Rebalancer) ServeHTTP(w http.ResponseWriter, req *http.Request) { defer logEntry.Debug("vulcand/oxy/roundrobin/rebalancer: completed ServeHttp on request") } - pw := utils.NewSimpleProxyWriter(w) + pw := utils.NewProxyWriter(w) start := rb.clock.UtcNow() // make shallow copy of request before changing anything to avoid side effects diff --git a/vendor/github.com/vulcand/oxy/utils/netutils.go b/vendor/github.com/vulcand/oxy/utils/netutils.go index e6e6eb6a4..95c30e7e5 100644 --- a/vendor/github.com/vulcand/oxy/utils/netutils.go +++ b/vendor/github.com/vulcand/oxy/utils/netutils.go @@ -12,89 +12,65 @@ import ( log "github.com/sirupsen/logrus" ) -type ProxyWriter interface { - http.ResponseWriter - GetLength() int64 - StatusCode() int - GetWriter() http.ResponseWriter -} - -// ProxyWriterWithoutCloseNotify helps to capture response headers and status code -// from the ServeHTTP. It can be safely passed to ServeHTTP handler, -// wrapping the real response writer. -type ProxyWriterWithoutCloseNotify struct { +type ProxyWriter struct { W http.ResponseWriter - Code int - Length int64 + code int + length int64 } -func NewProxyWriterWithoutCloseNotify(writer http.ResponseWriter) *ProxyWriterWithoutCloseNotify { - return &ProxyWriterWithoutCloseNotify{ +func NewProxyWriter(writer http.ResponseWriter) *ProxyWriter { + return &ProxyWriter{ W: writer, } } -func NewSimpleProxyWriter(writer http.ResponseWriter) *SimpleProxyWriter { - return &SimpleProxyWriter{ - ProxyWriterWithoutCloseNotify: NewProxyWriterWithoutCloseNotify(writer), - } -} - -type SimpleProxyWriter struct { - *ProxyWriterWithoutCloseNotify -} - -func (p *ProxyWriterWithoutCloseNotify) GetWriter() http.ResponseWriter { - return p.W -} - -func (p *ProxyWriterWithoutCloseNotify) StatusCode() int { - if p.Code == 0 { +func (p *ProxyWriter) StatusCode() int { + if p.code == 0 { // per contract standard lib will set this to http.StatusOK if not set // by user, here we avoid the confusion by mirroring this logic return http.StatusOK } - return p.Code + return p.code } -func (p *ProxyWriterWithoutCloseNotify) Header() http.Header { +func (p *ProxyWriter) GetLength() int64 { + return p.length +} + +func (p *ProxyWriter) Header() http.Header { return p.W.Header() } -func (p *ProxyWriterWithoutCloseNotify) Write(buf []byte) (int, error) { - p.Length = p.Length + int64(len(buf)) +func (p *ProxyWriter) Write(buf []byte) (int, error) { + p.length = p.length + int64(len(buf)) return p.W.Write(buf) } -func (p *ProxyWriterWithoutCloseNotify) WriteHeader(code int) { - p.Code = code +func (p *ProxyWriter) WriteHeader(code int) { + p.code = code p.W.WriteHeader(code) } -func (p *ProxyWriterWithoutCloseNotify) Flush() { +func (p *ProxyWriter) Flush() { if f, ok := p.W.(http.Flusher); ok { f.Flush() } } -func (p *ProxyWriterWithoutCloseNotify) GetLength() int64 { - return p.Length -} - -func (p *SimpleProxyWriter) CloseNotify() <-chan bool { - if cn, ok := p.GetWriter().(http.CloseNotifier); ok { +func (p *ProxyWriter) CloseNotify() <-chan bool { + if cn, ok := p.W.(http.CloseNotifier); ok { return cn.CloseNotify() } - log.Warningf("Upstream ResponseWriter of type %v does not implement http.CloseNotifier. Returning dummy channel.", reflect.TypeOf(p.GetWriter())) + log.Debugf("Upstream ResponseWriter of type %v does not implement http.CloseNotifier. Returning dummy channel.", reflect.TypeOf(p.W)) return make(<-chan bool) } -func (p *ProxyWriterWithoutCloseNotify) Hijack() (net.Conn, *bufio.ReadWriter, error) { +func (p *ProxyWriter) Hijack() (net.Conn, *bufio.ReadWriter, error) { if hi, ok := p.W.(http.Hijacker); ok { return hi.Hijack() } - log.Warningf("Upstream ResponseWriter of type %v does not implement http.Hijacker. Returning dummy channel.", reflect.TypeOf(p.W)) - return nil, nil, fmt.Errorf("The response writer that was wrapped in this proxy, does not implement http.Hijacker. It is of type: %v", reflect.TypeOf(p.W)) + log.Debugf("Upstream ResponseWriter of type %v does not implement http.Hijacker. Returning dummy channel.", reflect.TypeOf(p.W)) + return nil, nil, fmt.Errorf("the response writer that was wrapped in this proxy, does not implement http.Hijacker. It is of type: %v", reflect.TypeOf(p.W)) } func NewBufferWriter(w io.WriteCloser) *BufferWriter { @@ -139,8 +115,8 @@ func (b *BufferWriter) Hijack() (net.Conn, *bufio.ReadWriter, error) { if hi, ok := b.W.(http.Hijacker); ok { return hi.Hijack() } - log.Warningf("Upstream ResponseWriter of type %v does not implement http.Hijacker. Returning dummy channel.", reflect.TypeOf(b.W)) - return nil, nil, fmt.Errorf("The response writer that was wrapped in this proxy, does not implement http.Hijacker. It is of type: %v", reflect.TypeOf(b.W)) + log.Debugf("Upstream ResponseWriter of type %v does not implement http.Hijacker. Returning dummy channel.", reflect.TypeOf(b.W)) + return nil, nil, fmt.Errorf("the response writer that was wrapped in this proxy, does not implement http.Hijacker. It is of type: %v", reflect.TypeOf(b.W)) } type nopWriteCloser struct {