Merge branch 'v1.6' into master
This commit is contained in:
commit
df600d6f3c
18 changed files with 594 additions and 288 deletions
8
Gopkg.lock
generated
8
Gopkg.lock
generated
|
@ -255,8 +255,8 @@
|
||||||
[[projects]]
|
[[projects]]
|
||||||
name = "github.com/containous/staert"
|
name = "github.com/containous/staert"
|
||||||
packages = ["."]
|
packages = ["."]
|
||||||
revision = "cc00c303ccbd2491ddc1dccc9eb7ccadd807557e"
|
revision = "66717a0e0ca950c4b6dc8c87b46da0b8495c6e41"
|
||||||
version = "v3.1.0"
|
version = "v3.1.1"
|
||||||
|
|
||||||
[[projects]]
|
[[projects]]
|
||||||
name = "github.com/containous/traefik-extra-service-fabric"
|
name = "github.com/containous/traefik-extra-service-fabric"
|
||||||
|
@ -1226,7 +1226,7 @@
|
||||||
"roundrobin",
|
"roundrobin",
|
||||||
"utils"
|
"utils"
|
||||||
]
|
]
|
||||||
revision = "d5b73186eed4aa34b52748699ad19e90f61d4059"
|
revision = "c2414f4542f085363f490048da2fbec5e4537eb6"
|
||||||
|
|
||||||
[[projects]]
|
[[projects]]
|
||||||
name = "github.com/vulcand/predicate"
|
name = "github.com/vulcand/predicate"
|
||||||
|
@ -1711,6 +1711,6 @@
|
||||||
[solve-meta]
|
[solve-meta]
|
||||||
analyzer-name = "dep"
|
analyzer-name = "dep"
|
||||||
analyzer-version = 1
|
analyzer-version = 1
|
||||||
inputs-digest = "63fb25f0e549ec7942fda4d11c25e04bdf756dcc44d31e897b103f2270dc42d9"
|
inputs-digest = "593c88b41d6384d68bd610a8c80c39017e77584f4e3454b2ca5c26ee904bf1da"
|
||||||
solver-name = "gps-cdcl"
|
solver-name = "gps-cdcl"
|
||||||
solver-version = 1
|
solver-version = 1
|
||||||
|
|
|
@ -62,7 +62,7 @@
|
||||||
|
|
||||||
[[constraint]]
|
[[constraint]]
|
||||||
name = "github.com/containous/staert"
|
name = "github.com/containous/staert"
|
||||||
version = "3.1.0"
|
version = "3.1.1"
|
||||||
|
|
||||||
[[constraint]]
|
[[constraint]]
|
||||||
name = "github.com/containous/traefik-extra-service-fabric"
|
name = "github.com/containous/traefik-extra-service-fabric"
|
||||||
|
|
|
@ -206,7 +206,7 @@ Here is a list of supported `provider`s, that can automate the DNS verification,
|
||||||
| [Exoscale](https://www.exoscale.ch) | `exoscale` | `EXOSCALE_API_KEY`, `EXOSCALE_API_SECRET`, `EXOSCALE_ENDPOINT` | YES |
|
| [Exoscale](https://www.exoscale.ch) | `exoscale` | `EXOSCALE_API_KEY`, `EXOSCALE_API_SECRET`, `EXOSCALE_ENDPOINT` | YES |
|
||||||
| [Fast DNS](https://www.akamai.com/) | `fastdns` | `AKAMAI_CLIENT_TOKEN`, `AKAMAI_CLIENT_SECRET`, `AKAMAI_ACCESS_TOKEN` | Not tested yet |
|
| [Fast DNS](https://www.akamai.com/) | `fastdns` | `AKAMAI_CLIENT_TOKEN`, `AKAMAI_CLIENT_SECRET`, `AKAMAI_ACCESS_TOKEN` | Not tested yet |
|
||||||
| [Gandi](https://www.gandi.net) | `gandi` | `GANDI_API_KEY` | Not tested yet |
|
| [Gandi](https://www.gandi.net) | `gandi` | `GANDI_API_KEY` | Not tested yet |
|
||||||
| [Gandi V5](http://doc.livedns.gandi.net) | `gandiv5` | `GANDIV5_API_KEY` | Not tested yet |
|
| [Gandi V5](http://doc.livedns.gandi.net) | `gandiv5` | `GANDIV5_API_KEY` | YES |
|
||||||
| [Glesys](https://glesys.com/) | `glesys` | `GLESYS_API_USER`, `GLESYS_API_KEY`, `GLESYS_DOMAIN` | Not tested yet |
|
| [Glesys](https://glesys.com/) | `glesys` | `GLESYS_API_USER`, `GLESYS_API_KEY`, `GLESYS_DOMAIN` | Not tested yet |
|
||||||
| [GoDaddy](https://godaddy.com/domains) | `godaddy` | `GODADDY_API_KEY`, `GODADDY_API_SECRET` | Not tested yet |
|
| [GoDaddy](https://godaddy.com/domains) | `godaddy` | `GODADDY_API_KEY`, `GODADDY_API_SECRET` | Not tested yet |
|
||||||
| [Google Cloud DNS](https://cloud.google.com/dns/docs/) | `gcloud` | `GCE_PROJECT`, `GCE_SERVICE_ACCOUNT_FILE` | YES |
|
| [Google Cloud DNS](https://cloud.google.com/dns/docs/) | `gcloud` | `GCE_PROJECT`, `GCE_SERVICE_ACCOUNT_FILE` | YES |
|
||||||
|
|
|
@ -102,7 +102,7 @@ Let's explain this command:
|
||||||
| `--mount type=bind,source=/var/run/docker.sock,target=/var/run/docker.sock` | we bind mount the docker socket where Træfik is scheduled to be able to speak to the daemon. |
|
| `--mount type=bind,source=/var/run/docker.sock,target=/var/run/docker.sock` | we bind mount the docker socket where Træfik is scheduled to be able to speak to the daemon. |
|
||||||
| `--network traefik-net` | we attach the Træfik service (and thus the underlying container) to the `traefik-net` network. |
|
| `--network traefik-net` | we attach the Træfik service (and thus the underlying container) to the `traefik-net` network. |
|
||||||
| `--docker` | enable docker provider, and `--docker.swarmMode` to enable the swarm mode on Træfik. |
|
| `--docker` | enable docker provider, and `--docker.swarmMode` to enable the swarm mode on Træfik. |
|
||||||
| `--api | activate the webUI on port 8080 |
|
| `--api` | activate the webUI on port 8080 |
|
||||||
|
|
||||||
|
|
||||||
## Deploy your apps
|
## Deploy your apps
|
||||||
|
|
62
middlewares/pipelining/pipelining.go
Normal file
62
middlewares/pipelining/pipelining.go
Normal file
|
@ -0,0 +1,62 @@
|
||||||
|
package pipelining
|
||||||
|
|
||||||
|
import (
|
||||||
|
"bufio"
|
||||||
|
"net"
|
||||||
|
"net/http"
|
||||||
|
)
|
||||||
|
|
||||||
|
// Pipelining returns a middleware
|
||||||
|
type Pipelining struct {
|
||||||
|
next http.Handler
|
||||||
|
}
|
||||||
|
|
||||||
|
// NewPipelining returns a new Pipelining instance
|
||||||
|
func NewPipelining(next http.Handler) *Pipelining {
|
||||||
|
return &Pipelining{
|
||||||
|
next: next,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (p *Pipelining) ServeHTTP(rw http.ResponseWriter, r *http.Request) {
|
||||||
|
// https://github.com/golang/go/blob/3d59583836630cf13ec4bfbed977d27b1b7adbdc/src/net/http/server.go#L201-L218
|
||||||
|
if r.Method == http.MethodPut || r.Method == http.MethodPost {
|
||||||
|
p.next.ServeHTTP(rw, r)
|
||||||
|
} else {
|
||||||
|
p.next.ServeHTTP(&writerWithoutCloseNotify{rw}, r)
|
||||||
|
}
|
||||||
|
|
||||||
|
}
|
||||||
|
|
||||||
|
// writerWithoutCloseNotify helps to disable closeNotify
|
||||||
|
type writerWithoutCloseNotify struct {
|
||||||
|
W http.ResponseWriter
|
||||||
|
}
|
||||||
|
|
||||||
|
// Header returns the response headers.
|
||||||
|
func (w *writerWithoutCloseNotify) Header() http.Header {
|
||||||
|
return w.W.Header()
|
||||||
|
}
|
||||||
|
|
||||||
|
// Write writes the data to the connection as part of an HTTP reply.
|
||||||
|
func (w *writerWithoutCloseNotify) Write(buf []byte) (int, error) {
|
||||||
|
return w.W.Write(buf)
|
||||||
|
}
|
||||||
|
|
||||||
|
// WriteHeader sends an HTTP response header with the provided
|
||||||
|
// status code.
|
||||||
|
func (w *writerWithoutCloseNotify) WriteHeader(code int) {
|
||||||
|
w.W.WriteHeader(code)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Flush sends any buffered data to the client.
|
||||||
|
func (w *writerWithoutCloseNotify) Flush() {
|
||||||
|
if f, ok := w.W.(http.Flusher); ok {
|
||||||
|
f.Flush()
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Hijack hijacks the connection.
|
||||||
|
func (w *writerWithoutCloseNotify) Hijack() (net.Conn, *bufio.ReadWriter, error) {
|
||||||
|
return w.W.(http.Hijacker).Hijack()
|
||||||
|
}
|
69
middlewares/pipelining/pipelining_test.go
Normal file
69
middlewares/pipelining/pipelining_test.go
Normal file
|
@ -0,0 +1,69 @@
|
||||||
|
package pipelining
|
||||||
|
|
||||||
|
import (
|
||||||
|
"net/http"
|
||||||
|
"net/http/httptest"
|
||||||
|
"testing"
|
||||||
|
|
||||||
|
"github.com/stretchr/testify/assert"
|
||||||
|
)
|
||||||
|
|
||||||
|
type recorderWithCloseNotify struct {
|
||||||
|
*httptest.ResponseRecorder
|
||||||
|
}
|
||||||
|
|
||||||
|
func (r *recorderWithCloseNotify) CloseNotify() <-chan bool {
|
||||||
|
panic("implement me")
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestNewPipelining(t *testing.T) {
|
||||||
|
testCases := []struct {
|
||||||
|
desc string
|
||||||
|
HTTPMethod string
|
||||||
|
implementCloseNotifier bool
|
||||||
|
}{
|
||||||
|
{
|
||||||
|
desc: "should not implement CloseNotifier with GET method",
|
||||||
|
HTTPMethod: http.MethodGet,
|
||||||
|
implementCloseNotifier: false,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
desc: "should implement CloseNotifier with PUT method",
|
||||||
|
HTTPMethod: http.MethodPut,
|
||||||
|
implementCloseNotifier: true,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
desc: "should implement CloseNotifier with POST method",
|
||||||
|
HTTPMethod: http.MethodPost,
|
||||||
|
implementCloseNotifier: true,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
desc: "should not implement CloseNotifier with GET method",
|
||||||
|
HTTPMethod: http.MethodHead,
|
||||||
|
implementCloseNotifier: false,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
desc: "should not implement CloseNotifier with PROPFIND method",
|
||||||
|
HTTPMethod: "PROPFIND",
|
||||||
|
implementCloseNotifier: false,
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
for _, test := range testCases {
|
||||||
|
test := test
|
||||||
|
t.Run(test.desc, func(t *testing.T) {
|
||||||
|
t.Parallel()
|
||||||
|
|
||||||
|
nextHandler := http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
|
||||||
|
_, ok := w.(http.CloseNotifier)
|
||||||
|
assert.Equal(t, test.implementCloseNotifier, ok)
|
||||||
|
w.WriteHeader(http.StatusOK)
|
||||||
|
})
|
||||||
|
handler := NewPipelining(nextHandler)
|
||||||
|
|
||||||
|
req := httptest.NewRequest(test.HTTPMethod, "http://localhost", nil)
|
||||||
|
|
||||||
|
handler.ServeHTTP(&recorderWithCloseNotify{httptest.NewRecorder()}, req)
|
||||||
|
})
|
||||||
|
}
|
||||||
|
}
|
|
@ -2,6 +2,8 @@ package docker
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"context"
|
"context"
|
||||||
|
"crypto/md5"
|
||||||
|
"encoding/hex"
|
||||||
"fmt"
|
"fmt"
|
||||||
"net"
|
"net"
|
||||||
"strconv"
|
"strconv"
|
||||||
|
@ -107,13 +109,11 @@ func (p *Provider) buildConfigurationV2(containersInspected []dockerData) *types
|
||||||
}
|
}
|
||||||
|
|
||||||
func getServiceNameKey(container dockerData, swarmMode bool, segmentName string) string {
|
func getServiceNameKey(container dockerData, swarmMode bool, segmentName string) string {
|
||||||
serviceNameKey := container.ServiceName
|
if swarmMode {
|
||||||
|
return container.ServiceName + segmentName
|
||||||
if values, err := label.GetStringMultipleStrict(container.Labels, labelDockerComposeProject, labelDockerComposeService); !swarmMode && err == nil {
|
|
||||||
serviceNameKey = values[labelDockerComposeService] + values[labelDockerComposeProject]
|
|
||||||
}
|
}
|
||||||
|
|
||||||
return serviceNameKey + segmentName
|
return getServiceName(container) + segmentName
|
||||||
}
|
}
|
||||||
|
|
||||||
func (p *Provider) containerFilter(container dockerData) bool {
|
func (p *Provider) containerFilter(container dockerData) bool {
|
||||||
|
@ -170,7 +170,7 @@ func checkSegmentPort(labels map[string]string, segmentName string) error {
|
||||||
func (p *Provider) getFrontendName(container dockerData, idx int) string {
|
func (p *Provider) getFrontendName(container dockerData, idx int) string {
|
||||||
var name string
|
var name string
|
||||||
if len(container.SegmentName) > 0 {
|
if len(container.SegmentName) > 0 {
|
||||||
name = getBackendName(container)
|
name = container.SegmentName + "-" + getBackendName(container)
|
||||||
} else {
|
} else {
|
||||||
name = p.getFrontendRule(container, container.SegmentLabels) + "-" + strconv.Itoa(idx)
|
name = p.getFrontendRule(container, container.SegmentLabels) + "-" + strconv.Itoa(idx)
|
||||||
}
|
}
|
||||||
|
@ -262,17 +262,21 @@ func isBackendLBSwarm(container dockerData) bool {
|
||||||
return label.GetBoolValue(container.Labels, labelBackendLoadBalancerSwarm, false)
|
return label.GetBoolValue(container.Labels, labelBackendLoadBalancerSwarm, false)
|
||||||
}
|
}
|
||||||
|
|
||||||
func getSegmentBackendName(container dockerData) string {
|
func getBackendName(container dockerData) string {
|
||||||
serviceName := container.ServiceName
|
if len(container.SegmentName) > 0 {
|
||||||
if values, err := label.GetStringMultipleStrict(container.Labels, labelDockerComposeProject, labelDockerComposeService); err == nil {
|
return getSegmentBackendName(container)
|
||||||
serviceName = provider.Normalize(values[labelDockerComposeService] + "_" + values[labelDockerComposeProject])
|
|
||||||
}
|
}
|
||||||
|
|
||||||
|
return getDefaultBackendName(container)
|
||||||
|
}
|
||||||
|
|
||||||
|
func getSegmentBackendName(container dockerData) string {
|
||||||
|
serviceName := getServiceName(container)
|
||||||
if value := label.GetStringValue(container.SegmentLabels, label.TraefikBackend, ""); len(value) > 0 {
|
if value := label.GetStringValue(container.SegmentLabels, label.TraefikBackend, ""); len(value) > 0 {
|
||||||
return provider.Normalize(serviceName + "-" + value)
|
return provider.Normalize(serviceName + "-" + value)
|
||||||
}
|
}
|
||||||
|
|
||||||
return provider.Normalize(serviceName + "-" + getDefaultBackendName(container) + "-" + container.SegmentName)
|
return provider.Normalize(serviceName + "-" + container.SegmentName)
|
||||||
}
|
}
|
||||||
|
|
||||||
func getDefaultBackendName(container dockerData) string {
|
func getDefaultBackendName(container dockerData) string {
|
||||||
|
@ -280,19 +284,17 @@ func getDefaultBackendName(container dockerData) string {
|
||||||
return provider.Normalize(value)
|
return provider.Normalize(value)
|
||||||
}
|
}
|
||||||
|
|
||||||
if values, err := label.GetStringMultipleStrict(container.Labels, labelDockerComposeProject, labelDockerComposeService); err == nil {
|
return provider.Normalize(getServiceName(container))
|
||||||
return provider.Normalize(values[labelDockerComposeService] + "_" + values[labelDockerComposeProject])
|
|
||||||
}
|
|
||||||
|
|
||||||
return provider.Normalize(container.ServiceName)
|
|
||||||
}
|
}
|
||||||
|
|
||||||
func getBackendName(container dockerData) string {
|
func getServiceName(container dockerData) string {
|
||||||
if len(container.SegmentName) > 0 {
|
serviceName := container.ServiceName
|
||||||
return getSegmentBackendName(container)
|
|
||||||
|
if values, err := label.GetStringMultipleStrict(container.Labels, labelDockerComposeProject, labelDockerComposeService); err == nil {
|
||||||
|
serviceName = values[labelDockerComposeService] + "_" + values[labelDockerComposeProject]
|
||||||
}
|
}
|
||||||
|
|
||||||
return getDefaultBackendName(container)
|
return serviceName
|
||||||
}
|
}
|
||||||
|
|
||||||
func getPort(container dockerData) string {
|
func getPort(container dockerData) string {
|
||||||
|
@ -322,7 +324,7 @@ func getPort(container dockerData) string {
|
||||||
func (p *Provider) getServers(containers []dockerData) map[string]types.Server {
|
func (p *Provider) getServers(containers []dockerData) map[string]types.Server {
|
||||||
var servers map[string]types.Server
|
var servers map[string]types.Server
|
||||||
|
|
||||||
for i, container := range containers {
|
for _, container := range containers {
|
||||||
ip := p.getIPAddress(container)
|
ip := p.getIPAddress(container)
|
||||||
if len(ip) == 0 {
|
if len(ip) == 0 {
|
||||||
log.Warnf("Unable to find the IP address for the container %q: the server is ignored.", container.Name)
|
log.Warnf("Unable to find the IP address for the container %q: the server is ignored.", container.Name)
|
||||||
|
@ -336,16 +338,30 @@ func (p *Provider) getServers(containers []dockerData) map[string]types.Server {
|
||||||
protocol := label.GetStringValue(container.SegmentLabels, label.TraefikProtocol, label.DefaultProtocol)
|
protocol := label.GetStringValue(container.SegmentLabels, label.TraefikProtocol, label.DefaultProtocol)
|
||||||
port := getPort(container)
|
port := getPort(container)
|
||||||
|
|
||||||
serverName := "server-" + container.SegmentName + "-" + container.Name
|
serverURL := fmt.Sprintf("%s://%s", protocol, net.JoinHostPort(ip, port))
|
||||||
if len(container.SegmentName) > 0 {
|
|
||||||
serverName += "-" + strconv.Itoa(i)
|
serverName := getServerName(container.Name, serverURL)
|
||||||
|
if _, exist := servers[serverName]; exist {
|
||||||
|
log.Debugf("Skipping server %q with the same URL.", serverName)
|
||||||
|
continue
|
||||||
}
|
}
|
||||||
|
|
||||||
servers[provider.Normalize(serverName)] = types.Server{
|
servers[serverName] = types.Server{
|
||||||
URL: fmt.Sprintf("%s://%s", protocol, net.JoinHostPort(ip, port)),
|
URL: serverURL,
|
||||||
Weight: label.GetIntValue(container.SegmentLabels, label.TraefikWeight, label.DefaultWeight),
|
Weight: label.GetIntValue(container.SegmentLabels, label.TraefikWeight, label.DefaultWeight),
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
return servers
|
return servers
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func getServerName(containerName, url string) string {
|
||||||
|
hash := md5.New()
|
||||||
|
_, err := hash.Write([]byte(url))
|
||||||
|
if err != nil {
|
||||||
|
// Impossible case
|
||||||
|
log.Errorf("Fail to hash server URL %q", url)
|
||||||
|
}
|
||||||
|
|
||||||
|
return provider.Normalize("server-" + containerName + "-" + hex.EncodeToString(hash.Sum(nil)))
|
||||||
|
}
|
||||||
|
|
|
@ -55,7 +55,7 @@ func TestDockerBuildConfiguration(t *testing.T) {
|
||||||
expectedBackends: map[string]*types.Backend{
|
expectedBackends: map[string]*types.Backend{
|
||||||
"backend-test": {
|
"backend-test": {
|
||||||
Servers: map[string]types.Server{
|
Servers: map[string]types.Server{
|
||||||
"server-test": {
|
"server-test-842895ca2aca17f6ee36ddb2f621194d": {
|
||||||
URL: "http://127.0.0.1:80",
|
URL: "http://127.0.0.1:80",
|
||||||
Weight: label.DefaultWeight,
|
Weight: label.DefaultWeight,
|
||||||
},
|
},
|
||||||
|
@ -91,7 +91,7 @@ func TestDockerBuildConfiguration(t *testing.T) {
|
||||||
expectedBackends: map[string]*types.Backend{
|
expectedBackends: map[string]*types.Backend{
|
||||||
"backend-test": {
|
"backend-test": {
|
||||||
Servers: map[string]types.Server{
|
Servers: map[string]types.Server{
|
||||||
"server-test": {
|
"server-test-48093b9fc43454203aacd2bc4057a08c": {
|
||||||
URL: "http://127.0.0.2:80",
|
URL: "http://127.0.0.2:80",
|
||||||
Weight: label.DefaultWeight,
|
Weight: label.DefaultWeight,
|
||||||
},
|
},
|
||||||
|
@ -132,7 +132,7 @@ func TestDockerBuildConfiguration(t *testing.T) {
|
||||||
expectedBackends: map[string]*types.Backend{
|
expectedBackends: map[string]*types.Backend{
|
||||||
"backend-test": {
|
"backend-test": {
|
||||||
Servers: map[string]types.Server{
|
Servers: map[string]types.Server{
|
||||||
"server-test": {
|
"server-test-405767e9733427148cd8dae6c4d331b0": {
|
||||||
URL: "http://127.0.0.3:80",
|
URL: "http://127.0.0.3:80",
|
||||||
Weight: label.DefaultWeight,
|
Weight: label.DefaultWeight,
|
||||||
},
|
},
|
||||||
|
@ -352,7 +352,7 @@ func TestDockerBuildConfiguration(t *testing.T) {
|
||||||
expectedBackends: map[string]*types.Backend{
|
expectedBackends: map[string]*types.Backend{
|
||||||
"backend-foobar": {
|
"backend-foobar": {
|
||||||
Servers: map[string]types.Server{
|
Servers: map[string]types.Server{
|
||||||
"server-test1": {
|
"server-test1-7f6444e0dff3330c8b0ad2bbbd383b0f": {
|
||||||
URL: "https://127.0.0.1:666",
|
URL: "https://127.0.0.1:666",
|
||||||
Weight: 12,
|
Weight: 12,
|
||||||
},
|
},
|
||||||
|
@ -460,10 +460,11 @@ func TestDockerBuildConfiguration(t *testing.T) {
|
||||||
expectedBackends: map[string]*types.Backend{
|
expectedBackends: map[string]*types.Backend{
|
||||||
"backend-myService-myProject": {
|
"backend-myService-myProject": {
|
||||||
Servers: map[string]types.Server{
|
Servers: map[string]types.Server{
|
||||||
"server-test-0": {
|
"server-test-0-842895ca2aca17f6ee36ddb2f621194d": {
|
||||||
URL: "http://127.0.0.1:80",
|
URL: "http://127.0.0.1:80",
|
||||||
Weight: label.DefaultWeight,
|
Weight: label.DefaultWeight,
|
||||||
}, "server-test-1": {
|
},
|
||||||
|
"server-test-1-48093b9fc43454203aacd2bc4057a08c": {
|
||||||
URL: "http://127.0.0.2:80",
|
URL: "http://127.0.0.2:80",
|
||||||
Weight: label.DefaultWeight,
|
Weight: label.DefaultWeight,
|
||||||
},
|
},
|
||||||
|
@ -472,7 +473,7 @@ func TestDockerBuildConfiguration(t *testing.T) {
|
||||||
},
|
},
|
||||||
"backend-myService2-myProject": {
|
"backend-myService2-myProject": {
|
||||||
Servers: map[string]types.Server{
|
Servers: map[string]types.Server{
|
||||||
"server-test-2": {
|
"server-test-2-405767e9733427148cd8dae6c4d331b0": {
|
||||||
URL: "http://127.0.0.3:80",
|
URL: "http://127.0.0.3:80",
|
||||||
Weight: label.DefaultWeight,
|
Weight: label.DefaultWeight,
|
||||||
},
|
},
|
||||||
|
@ -1164,7 +1165,7 @@ func TestDockerGetServers(t *testing.T) {
|
||||||
})),
|
})),
|
||||||
},
|
},
|
||||||
expected: map[string]types.Server{
|
expected: map[string]types.Server{
|
||||||
"server-test1": {
|
"server-test1-fb00f762970935200c76ccdaf91458f6": {
|
||||||
URL: "http://10.10.10.10:80",
|
URL: "http://10.10.10.10:80",
|
||||||
Weight: 1,
|
Weight: 1,
|
||||||
},
|
},
|
||||||
|
@ -1193,15 +1194,15 @@ func TestDockerGetServers(t *testing.T) {
|
||||||
})),
|
})),
|
||||||
},
|
},
|
||||||
expected: map[string]types.Server{
|
expected: map[string]types.Server{
|
||||||
"server-test1": {
|
"server-test1-743440b6f4a8ffd8737626215f2c5a33": {
|
||||||
URL: "http://10.10.10.11:80",
|
URL: "http://10.10.10.11:80",
|
||||||
Weight: 1,
|
Weight: 1,
|
||||||
},
|
},
|
||||||
"server-test2": {
|
"server-test2-547f74bbb5da02b6c8141ce9aa96c13b": {
|
||||||
URL: "http://10.10.10.12:81",
|
URL: "http://10.10.10.12:81",
|
||||||
Weight: 1,
|
Weight: 1,
|
||||||
},
|
},
|
||||||
"server-test3": {
|
"server-test3-c57fd8b848c814a3f2a4a4c12e13c179": {
|
||||||
URL: "http://10.10.10.13:82",
|
URL: "http://10.10.10.13:82",
|
||||||
Weight: 1,
|
Weight: 1,
|
||||||
},
|
},
|
||||||
|
@ -1230,11 +1231,11 @@ func TestDockerGetServers(t *testing.T) {
|
||||||
})),
|
})),
|
||||||
},
|
},
|
||||||
expected: map[string]types.Server{
|
expected: map[string]types.Server{
|
||||||
"server-test2": {
|
"server-test2-547f74bbb5da02b6c8141ce9aa96c13b": {
|
||||||
URL: "http://10.10.10.12:81",
|
URL: "http://10.10.10.12:81",
|
||||||
Weight: 1,
|
Weight: 1,
|
||||||
},
|
},
|
||||||
"server-test3": {
|
"server-test3-c57fd8b848c814a3f2a4a4c12e13c179": {
|
||||||
URL: "http://10.10.10.13:82",
|
URL: "http://10.10.10.13:82",
|
||||||
Weight: 1,
|
Weight: 1,
|
||||||
},
|
},
|
||||||
|
|
|
@ -57,7 +57,7 @@ func TestSwarmBuildConfiguration(t *testing.T) {
|
||||||
expectedBackends: map[string]*types.Backend{
|
expectedBackends: map[string]*types.Backend{
|
||||||
"backend-test": {
|
"backend-test": {
|
||||||
Servers: map[string]types.Server{
|
Servers: map[string]types.Server{
|
||||||
"server-test": {
|
"server-test-842895ca2aca17f6ee36ddb2f621194d": {
|
||||||
URL: "http://127.0.0.1:80",
|
URL: "http://127.0.0.1:80",
|
||||||
Weight: label.DefaultWeight,
|
Weight: label.DefaultWeight,
|
||||||
},
|
},
|
||||||
|
@ -243,7 +243,6 @@ func TestSwarmBuildConfiguration(t *testing.T) {
|
||||||
ReferrerPolicy: "foo",
|
ReferrerPolicy: "foo",
|
||||||
IsDevelopment: true,
|
IsDevelopment: true,
|
||||||
},
|
},
|
||||||
|
|
||||||
Errors: map[string]*types.ErrorPage{
|
Errors: map[string]*types.ErrorPage{
|
||||||
"foo": {
|
"foo": {
|
||||||
Status: []string{"404"},
|
Status: []string{"404"},
|
||||||
|
@ -281,7 +280,7 @@ func TestSwarmBuildConfiguration(t *testing.T) {
|
||||||
expectedBackends: map[string]*types.Backend{
|
expectedBackends: map[string]*types.Backend{
|
||||||
"backend-foobar": {
|
"backend-foobar": {
|
||||||
Servers: map[string]types.Server{
|
Servers: map[string]types.Server{
|
||||||
"server-test1": {
|
"server-test1-7f6444e0dff3330c8b0ad2bbbd383b0f": {
|
||||||
URL: "https://127.0.0.1:666",
|
URL: "https://127.0.0.1:666",
|
||||||
Weight: 12,
|
Weight: 12,
|
||||||
},
|
},
|
||||||
|
|
|
@ -42,22 +42,22 @@ func TestSegmentBuildConfiguration(t *testing.T) {
|
||||||
),
|
),
|
||||||
},
|
},
|
||||||
expectedFrontends: map[string]*types.Frontend{
|
expectedFrontends: map[string]*types.Frontend{
|
||||||
"frontend-foo-foo-sauternes": {
|
"frontend-sauternes-foo-sauternes": {
|
||||||
Backend: "backend-foo-foo-sauternes",
|
Backend: "backend-foo-sauternes",
|
||||||
PassHostHeader: true,
|
PassHostHeader: true,
|
||||||
EntryPoints: []string{"http", "https"},
|
EntryPoints: []string{"http", "https"},
|
||||||
BasicAuth: []string{},
|
BasicAuth: []string{},
|
||||||
Routes: map[string]types.Route{
|
Routes: map[string]types.Route{
|
||||||
"route-frontend-foo-foo-sauternes": {
|
"route-frontend-sauternes-foo-sauternes": {
|
||||||
Rule: "Host:foo.docker.localhost",
|
Rule: "Host:foo.docker.localhost",
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
expectedBackends: map[string]*types.Backend{
|
expectedBackends: map[string]*types.Backend{
|
||||||
"backend-foo-foo-sauternes": {
|
"backend-foo-sauternes": {
|
||||||
Servers: map[string]types.Server{
|
Servers: map[string]types.Server{
|
||||||
"server-sauternes-foo-0": {
|
"server-foo-863563a2e23c95502862016417ee95ea": {
|
||||||
URL: "http://127.0.0.1:2503",
|
URL: "http://127.0.0.1:2503",
|
||||||
Weight: label.DefaultWeight,
|
Weight: label.DefaultWeight,
|
||||||
},
|
},
|
||||||
|
@ -133,8 +133,8 @@ func TestSegmentBuildConfiguration(t *testing.T) {
|
||||||
),
|
),
|
||||||
},
|
},
|
||||||
expectedFrontends: map[string]*types.Frontend{
|
expectedFrontends: map[string]*types.Frontend{
|
||||||
"frontend-foo-foo-sauternes": {
|
"frontend-sauternes-foo-sauternes": {
|
||||||
Backend: "backend-foo-foo-sauternes",
|
Backend: "backend-foo-sauternes",
|
||||||
EntryPoints: []string{
|
EntryPoints: []string{
|
||||||
"http",
|
"http",
|
||||||
"https",
|
"https",
|
||||||
|
@ -226,16 +226,16 @@ func TestSegmentBuildConfiguration(t *testing.T) {
|
||||||
},
|
},
|
||||||
|
|
||||||
Routes: map[string]types.Route{
|
Routes: map[string]types.Route{
|
||||||
"route-frontend-foo-foo-sauternes": {
|
"route-frontend-sauternes-foo-sauternes": {
|
||||||
Rule: "Host:foo.docker.localhost",
|
Rule: "Host:foo.docker.localhost",
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
expectedBackends: map[string]*types.Backend{
|
expectedBackends: map[string]*types.Backend{
|
||||||
"backend-foo-foo-sauternes": {
|
"backend-foo-sauternes": {
|
||||||
Servers: map[string]types.Server{
|
Servers: map[string]types.Server{
|
||||||
"server-sauternes-foo-0": {
|
"server-foo-7f6444e0dff3330c8b0ad2bbbd383b0f": {
|
||||||
URL: "https://127.0.0.1:666",
|
URL: "https://127.0.0.1:666",
|
||||||
Weight: 12,
|
Weight: 12,
|
||||||
},
|
},
|
||||||
|
@ -280,7 +280,7 @@ func TestSegmentBuildConfiguration(t *testing.T) {
|
||||||
),
|
),
|
||||||
},
|
},
|
||||||
expectedFrontends: map[string]*types.Frontend{
|
expectedFrontends: map[string]*types.Frontend{
|
||||||
"frontend-test1-foobar": {
|
"frontend-sauternes-test1-foobar": {
|
||||||
Backend: "backend-test1-foobar",
|
Backend: "backend-test1-foobar",
|
||||||
PassHostHeader: false,
|
PassHostHeader: false,
|
||||||
Priority: 5000,
|
Priority: 5000,
|
||||||
|
@ -290,18 +290,18 @@ func TestSegmentBuildConfiguration(t *testing.T) {
|
||||||
EntryPoint: "https",
|
EntryPoint: "https",
|
||||||
},
|
},
|
||||||
Routes: map[string]types.Route{
|
Routes: map[string]types.Route{
|
||||||
"route-frontend-test1-foobar": {
|
"route-frontend-sauternes-test1-foobar": {
|
||||||
Rule: "Path:/mypath",
|
Rule: "Path:/mypath",
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
"frontend-test2-test2-anothersauternes": {
|
"frontend-anothersauternes-test2-anothersauternes": {
|
||||||
Backend: "backend-test2-test2-anothersauternes",
|
Backend: "backend-test2-anothersauternes",
|
||||||
PassHostHeader: true,
|
PassHostHeader: true,
|
||||||
EntryPoints: []string{},
|
EntryPoints: []string{},
|
||||||
BasicAuth: []string{},
|
BasicAuth: []string{},
|
||||||
Routes: map[string]types.Route{
|
Routes: map[string]types.Route{
|
||||||
"route-frontend-test2-test2-anothersauternes": {
|
"route-frontend-anothersauternes-test2-anothersauternes": {
|
||||||
Rule: "Path:/anotherpath",
|
Rule: "Path:/anotherpath",
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
|
@ -310,16 +310,16 @@ func TestSegmentBuildConfiguration(t *testing.T) {
|
||||||
expectedBackends: map[string]*types.Backend{
|
expectedBackends: map[string]*types.Backend{
|
||||||
"backend-test1-foobar": {
|
"backend-test1-foobar": {
|
||||||
Servers: map[string]types.Server{
|
Servers: map[string]types.Server{
|
||||||
"server-sauternes-test1-0": {
|
"server-test1-79533a101142718f0fdf84c42593c41e": {
|
||||||
URL: "https://127.0.0.1:2503",
|
URL: "https://127.0.0.1:2503",
|
||||||
Weight: 80,
|
Weight: 80,
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
CircuitBreaker: nil,
|
CircuitBreaker: nil,
|
||||||
},
|
},
|
||||||
"backend-test2-test2-anothersauternes": {
|
"backend-test2-anothersauternes": {
|
||||||
Servers: map[string]types.Server{
|
Servers: map[string]types.Server{
|
||||||
"server-anothersauternes-test2-0": {
|
"server-test2-e9c1b66f9af919aa46053fbc2391bb4a": {
|
||||||
URL: "http://127.0.0.1:8079",
|
URL: "http://127.0.0.1:8079",
|
||||||
Weight: 33,
|
Weight: 33,
|
||||||
},
|
},
|
||||||
|
@ -328,6 +328,152 @@ func TestSegmentBuildConfiguration(t *testing.T) {
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
|
{
|
||||||
|
desc: "several segments with the same backend name and same port",
|
||||||
|
containers: []docker.ContainerJSON{
|
||||||
|
containerJSON(
|
||||||
|
name("test1"),
|
||||||
|
labels(map[string]string{
|
||||||
|
"traefik.port": "2503",
|
||||||
|
"traefik.protocol": "https",
|
||||||
|
"traefik.weight": "80",
|
||||||
|
"traefik.frontend.entryPoints": "http,https",
|
||||||
|
"traefik.frontend.redirect.entryPoint": "https",
|
||||||
|
|
||||||
|
"traefik.sauternes.backend": "foobar",
|
||||||
|
"traefik.sauternes.frontend.rule": "Path:/sauternes",
|
||||||
|
"traefik.sauternes.frontend.priority": "5000",
|
||||||
|
|
||||||
|
"traefik.arbois.backend": "foobar",
|
||||||
|
"traefik.arbois.frontend.rule": "Path:/arbois",
|
||||||
|
"traefik.arbois.frontend.priority": "3000",
|
||||||
|
}),
|
||||||
|
ports(nat.PortMap{
|
||||||
|
"80/tcp": {},
|
||||||
|
}),
|
||||||
|
withNetwork("bridge", ipv4("127.0.0.1")),
|
||||||
|
),
|
||||||
|
},
|
||||||
|
expectedFrontends: map[string]*types.Frontend{
|
||||||
|
"frontend-sauternes-test1-foobar": {
|
||||||
|
Backend: "backend-test1-foobar",
|
||||||
|
PassHostHeader: true,
|
||||||
|
Priority: 5000,
|
||||||
|
EntryPoints: []string{"http", "https"},
|
||||||
|
BasicAuth: []string{},
|
||||||
|
Redirect: &types.Redirect{
|
||||||
|
EntryPoint: "https",
|
||||||
|
},
|
||||||
|
Routes: map[string]types.Route{
|
||||||
|
"route-frontend-sauternes-test1-foobar": {
|
||||||
|
Rule: "Path:/sauternes",
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
"frontend-arbois-test1-foobar": {
|
||||||
|
Backend: "backend-test1-foobar",
|
||||||
|
PassHostHeader: true,
|
||||||
|
Priority: 3000,
|
||||||
|
EntryPoints: []string{"http", "https"},
|
||||||
|
BasicAuth: []string{},
|
||||||
|
Redirect: &types.Redirect{
|
||||||
|
EntryPoint: "https",
|
||||||
|
},
|
||||||
|
Routes: map[string]types.Route{
|
||||||
|
"route-frontend-arbois-test1-foobar": {
|
||||||
|
Rule: "Path:/arbois",
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
expectedBackends: map[string]*types.Backend{
|
||||||
|
"backend-test1-foobar": {
|
||||||
|
Servers: map[string]types.Server{
|
||||||
|
"server-test1-79533a101142718f0fdf84c42593c41e": {
|
||||||
|
URL: "https://127.0.0.1:2503",
|
||||||
|
Weight: 80,
|
||||||
|
},
|
||||||
|
},
|
||||||
|
CircuitBreaker: nil,
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
desc: "several segments with the same backend name and different port (wrong behavior)",
|
||||||
|
containers: []docker.ContainerJSON{
|
||||||
|
containerJSON(
|
||||||
|
name("test1"),
|
||||||
|
labels(map[string]string{
|
||||||
|
"traefik.protocol": "https",
|
||||||
|
"traefik.frontend.entryPoints": "http,https",
|
||||||
|
"traefik.frontend.redirect.entryPoint": "https",
|
||||||
|
|
||||||
|
"traefik.sauternes.port": "2503",
|
||||||
|
"traefik.sauternes.weight": "80",
|
||||||
|
"traefik.sauternes.backend": "foobar",
|
||||||
|
"traefik.sauternes.frontend.rule": "Path:/sauternes",
|
||||||
|
"traefik.sauternes.frontend.priority": "5000",
|
||||||
|
|
||||||
|
"traefik.arbois.port": "2504",
|
||||||
|
"traefik.arbois.weight": "90",
|
||||||
|
"traefik.arbois.backend": "foobar",
|
||||||
|
"traefik.arbois.frontend.rule": "Path:/arbois",
|
||||||
|
"traefik.arbois.frontend.priority": "3000",
|
||||||
|
}),
|
||||||
|
ports(nat.PortMap{
|
||||||
|
"80/tcp": {},
|
||||||
|
}),
|
||||||
|
withNetwork("bridge", ipv4("127.0.0.1")),
|
||||||
|
),
|
||||||
|
},
|
||||||
|
expectedFrontends: map[string]*types.Frontend{
|
||||||
|
"frontend-sauternes-test1-foobar": {
|
||||||
|
Backend: "backend-test1-foobar",
|
||||||
|
PassHostHeader: true,
|
||||||
|
Priority: 5000,
|
||||||
|
EntryPoints: []string{"http", "https"},
|
||||||
|
BasicAuth: []string{},
|
||||||
|
Redirect: &types.Redirect{
|
||||||
|
EntryPoint: "https",
|
||||||
|
},
|
||||||
|
Routes: map[string]types.Route{
|
||||||
|
"route-frontend-sauternes-test1-foobar": {
|
||||||
|
Rule: "Path:/sauternes",
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
"frontend-arbois-test1-foobar": {
|
||||||
|
Backend: "backend-test1-foobar",
|
||||||
|
PassHostHeader: true,
|
||||||
|
Priority: 3000,
|
||||||
|
EntryPoints: []string{"http", "https"},
|
||||||
|
BasicAuth: []string{},
|
||||||
|
Redirect: &types.Redirect{
|
||||||
|
EntryPoint: "https",
|
||||||
|
},
|
||||||
|
Routes: map[string]types.Route{
|
||||||
|
"route-frontend-arbois-test1-foobar": {
|
||||||
|
Rule: "Path:/arbois",
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
expectedBackends: map[string]*types.Backend{
|
||||||
|
"backend-test1-foobar": {
|
||||||
|
Servers: map[string]types.Server{
|
||||||
|
"server-test1-79533a101142718f0fdf84c42593c41e": {
|
||||||
|
URL: "https://127.0.0.1:2503",
|
||||||
|
Weight: 80,
|
||||||
|
},
|
||||||
|
"server-test1-315a41140f1bd825b066e39686c18482": {
|
||||||
|
URL: "https://127.0.0.1:2504",
|
||||||
|
Weight: 90,
|
||||||
|
},
|
||||||
|
},
|
||||||
|
CircuitBreaker: nil,
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
}
|
}
|
||||||
|
|
||||||
provider := &Provider{
|
provider := &Provider{
|
||||||
|
|
|
@ -16,6 +16,7 @@ import (
|
||||||
"github.com/containous/traefik/log"
|
"github.com/containous/traefik/log"
|
||||||
"github.com/containous/traefik/metrics"
|
"github.com/containous/traefik/metrics"
|
||||||
"github.com/containous/traefik/middlewares"
|
"github.com/containous/traefik/middlewares"
|
||||||
|
"github.com/containous/traefik/middlewares/pipelining"
|
||||||
"github.com/containous/traefik/rules"
|
"github.com/containous/traefik/rules"
|
||||||
"github.com/containous/traefik/safe"
|
"github.com/containous/traefik/safe"
|
||||||
traefiktls "github.com/containous/traefik/tls"
|
traefiktls "github.com/containous/traefik/tls"
|
||||||
|
@ -254,6 +255,8 @@ func (s *Server) buildForwarder(entryPointName string, entryPoint *configuration
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
|
fwd = pipelining.NewPipelining(fwd)
|
||||||
|
|
||||||
return fwd, nil
|
return fwd, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
45
vendor/github.com/containous/staert/kv.go
generated
vendored
45
vendor/github.com/containous/staert/kv.go
generated
vendored
|
@ -46,16 +46,16 @@ func (kv *KvSource) Parse(cmd *flaeg.Command) (*flaeg.Command, error) {
|
||||||
|
|
||||||
// LoadConfig loads data from the KV Store into the config structure (given by reference)
|
// LoadConfig loads data from the KV Store into the config structure (given by reference)
|
||||||
func (kv *KvSource) LoadConfig(config interface{}) error {
|
func (kv *KvSource) LoadConfig(config interface{}) error {
|
||||||
pairs := map[string][]byte{}
|
pairs, err := kv.ListValuedPairWithPrefix(kv.Prefix)
|
||||||
if err := kv.ListRecursive(kv.Prefix, pairs); err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
// fmt.Printf("pairs : %#v\n", pairs)
|
|
||||||
mapStruct, err := generateMapstructure(convertPairs(pairs), kv.Prefix)
|
mapStruct, err := generateMapstructure(convertPairs(pairs), kv.Prefix)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
// fmt.Printf("mapStruct : %#v\n", mapStruct)
|
|
||||||
configDecoder := &mapstructure.DecoderConfig{
|
configDecoder := &mapstructure.DecoderConfig{
|
||||||
Metadata: nil,
|
Metadata: nil,
|
||||||
Result: config,
|
Result: config,
|
||||||
|
@ -77,11 +77,11 @@ func generateMapstructure(pairs []*store.KVPair, prefix string) (map[string]inte
|
||||||
for _, p := range pairs {
|
for _, p := range pairs {
|
||||||
// Trim the prefix off our key first
|
// Trim the prefix off our key first
|
||||||
key := strings.TrimPrefix(strings.Trim(p.Key, "/"), strings.Trim(prefix, "/")+"/")
|
key := strings.TrimPrefix(strings.Trim(p.Key, "/"), strings.Trim(prefix, "/")+"/")
|
||||||
raw, err := processKV(key, p.Value, raw)
|
var err error
|
||||||
|
raw, err = processKV(key, p.Value, raw)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return raw, err
|
return raw, err
|
||||||
}
|
}
|
||||||
|
|
||||||
}
|
}
|
||||||
return raw, nil
|
return raw, nil
|
||||||
}
|
}
|
||||||
|
@ -313,15 +313,23 @@ func collateKvRecursive(objValue reflect.Value, kv map[string]string, key string
|
||||||
func writeCompressedData(data []byte) (string, error) {
|
func writeCompressedData(data []byte) (string, error) {
|
||||||
var buffer bytes.Buffer
|
var buffer bytes.Buffer
|
||||||
gzipWriter := gzip.NewWriter(&buffer)
|
gzipWriter := gzip.NewWriter(&buffer)
|
||||||
|
|
||||||
_, err := gzipWriter.Write(data)
|
_, err := gzipWriter.Write(data)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return "", err
|
return "", err
|
||||||
}
|
}
|
||||||
gzipWriter.Close()
|
|
||||||
|
err = gzipWriter.Close()
|
||||||
|
if err != nil {
|
||||||
|
return "", err
|
||||||
|
}
|
||||||
|
|
||||||
return buffer.String(), nil
|
return buffer.String(), nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// ListRecursive lists all key value children under key
|
// ListRecursive lists all key value children under key
|
||||||
|
// Replaced by ListValuedPairWithPrefix
|
||||||
|
// Deprecated
|
||||||
func (kv *KvSource) ListRecursive(key string, pairs map[string][]byte) error {
|
func (kv *KvSource) ListRecursive(key string, pairs map[string][]byte) error {
|
||||||
pairsN1, err := kv.List(key, nil)
|
pairsN1, err := kv.List(key, nil)
|
||||||
if err == store.ErrKeyNotFound {
|
if err == store.ErrKeyNotFound {
|
||||||
|
@ -342,14 +350,37 @@ func (kv *KvSource) ListRecursive(key string, pairs map[string][]byte) error {
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
for _, p := range pairsN1 {
|
for _, p := range pairsN1 {
|
||||||
|
if p.Key != key {
|
||||||
err := kv.ListRecursive(p.Key, pairs)
|
err := kv.ListRecursive(p.Key, pairs)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
}
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// ListValuedPairWithPrefix lists all key value children under key
|
||||||
|
func (kv *KvSource) ListValuedPairWithPrefix(key string) (map[string][]byte, error) {
|
||||||
|
pairs := make(map[string][]byte)
|
||||||
|
|
||||||
|
pairsN1, err := kv.List(key, nil)
|
||||||
|
if err == store.ErrKeyNotFound {
|
||||||
|
return pairs, nil
|
||||||
|
}
|
||||||
|
if err != nil {
|
||||||
|
return pairs, err
|
||||||
|
}
|
||||||
|
|
||||||
|
for _, p := range pairsN1 {
|
||||||
|
if len(p.Value) > 0 {
|
||||||
|
pairs[p.Key] = p.Value
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return pairs, nil
|
||||||
|
}
|
||||||
|
|
||||||
func convertPairs(pairs map[string][]byte) []*store.KVPair {
|
func convertPairs(pairs map[string][]byte) []*store.KVPair {
|
||||||
slicePairs := make([]*store.KVPair, len(pairs))
|
slicePairs := make([]*store.KVPair, len(pairs))
|
||||||
i := 0
|
i := 0
|
||||||
|
|
164
vendor/github.com/containous/staert/staert.go
generated
vendored
164
vendor/github.com/containous/staert/staert.go
generated
vendored
|
@ -2,12 +2,8 @@ package staert
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"fmt"
|
"fmt"
|
||||||
"os"
|
|
||||||
"path/filepath"
|
|
||||||
"reflect"
|
"reflect"
|
||||||
"strings"
|
|
||||||
|
|
||||||
"github.com/BurntSushi/toml"
|
|
||||||
"github.com/containous/flaeg"
|
"github.com/containous/flaeg"
|
||||||
)
|
)
|
||||||
|
|
||||||
|
@ -24,10 +20,7 @@ type Staert struct {
|
||||||
|
|
||||||
// NewStaert creates and return a pointer on Staert. Need defaultConfig and defaultPointersConfig given by references
|
// NewStaert creates and return a pointer on Staert. Need defaultConfig and defaultPointersConfig given by references
|
||||||
func NewStaert(rootCommand *flaeg.Command) *Staert {
|
func NewStaert(rootCommand *flaeg.Command) *Staert {
|
||||||
s := Staert{
|
return &Staert{command: rootCommand}
|
||||||
command: rootCommand,
|
|
||||||
}
|
|
||||||
return &s
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// AddSource adds new Source to Staert, give it by reference
|
// AddSource adds new Source to Staert, give it by reference
|
||||||
|
@ -35,40 +28,31 @@ func (s *Staert) AddSource(src Source) {
|
||||||
s.sources = append(s.sources, src)
|
s.sources = append(s.sources, src)
|
||||||
}
|
}
|
||||||
|
|
||||||
// getConfig for a flaeg.Command run sources Parse func in the raw
|
|
||||||
func (s *Staert) parseConfigAllSources(cmd *flaeg.Command) error {
|
|
||||||
for _, src := range s.sources {
|
|
||||||
var err error
|
|
||||||
_, err = src.Parse(cmd)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// LoadConfig check which command is called and parses config
|
// LoadConfig check which command is called and parses config
|
||||||
// It returns the the parsed config or an error if it fails
|
// It returns the the parsed config or an error if it fails
|
||||||
func (s *Staert) LoadConfig() (interface{}, error) {
|
func (s *Staert) LoadConfig() (interface{}, error) {
|
||||||
for _, src := range s.sources {
|
for _, src := range s.sources {
|
||||||
//Type assertion
|
// Type assertion
|
||||||
f, ok := src.(*flaeg.Flaeg)
|
if flg, ok := src.(*flaeg.Flaeg); ok {
|
||||||
if ok {
|
fCmd, err := flg.GetCommand()
|
||||||
if fCmd, err := f.GetCommand(); err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
} else if s.command != fCmd {
|
}
|
||||||
//IF fleag sub-command
|
|
||||||
|
// if fleag sub-command
|
||||||
|
if s.command != fCmd {
|
||||||
|
// if parseAllSources
|
||||||
if fCmd.Metadata["parseAllSources"] == "true" {
|
if fCmd.Metadata["parseAllSources"] == "true" {
|
||||||
//IF parseAllSources
|
|
||||||
fCmdConfigType := reflect.TypeOf(fCmd.Config)
|
fCmdConfigType := reflect.TypeOf(fCmd.Config)
|
||||||
sCmdConfigType := reflect.TypeOf(s.command.Config)
|
sCmdConfigType := reflect.TypeOf(s.command.Config)
|
||||||
if fCmdConfigType != sCmdConfigType {
|
if fCmdConfigType != sCmdConfigType {
|
||||||
return nil, fmt.Errorf("command %s : Config type doesn't match with root command config type. Expected %s got %s", fCmd.Name, sCmdConfigType.Name(), fCmdConfigType.Name())
|
return nil, fmt.Errorf("command %s : Config type doesn't match with root command config type. Expected %s got %s",
|
||||||
|
fCmd.Name, sCmdConfigType.Name(), fCmdConfigType.Name())
|
||||||
}
|
}
|
||||||
s.command = fCmd
|
s.command = fCmd
|
||||||
} else {
|
} else {
|
||||||
// ELSE (not parseAllSources)
|
// (not parseAllSources)
|
||||||
s.command, err = f.Parse(fCmd)
|
s.command, err = flg.Parse(fCmd)
|
||||||
return s.command.Config, err
|
return s.command.Config, err
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -78,117 +62,19 @@ func (s *Staert) LoadConfig() (interface{}, error) {
|
||||||
return s.command.Config, err
|
return s.command.Config, err
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// parseConfigAllSources getConfig for a flaeg.Command run sources Parse func in the raw
|
||||||
|
func (s *Staert) parseConfigAllSources(cmd *flaeg.Command) error {
|
||||||
|
for _, src := range s.sources {
|
||||||
|
_, err := src.Parse(cmd)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
// Run calls the Run func of the command
|
// Run calls the Run func of the command
|
||||||
// Warning, Run doesn't parse the config
|
// Warning, Run doesn't parse the config
|
||||||
func (s *Staert) Run() error {
|
func (s *Staert) Run() error {
|
||||||
return s.command.Run()
|
return s.command.Run()
|
||||||
}
|
}
|
||||||
|
|
||||||
//TomlSource impement Source
|
|
||||||
type TomlSource struct {
|
|
||||||
filename string
|
|
||||||
dirNfullpath []string
|
|
||||||
fullpath string
|
|
||||||
}
|
|
||||||
|
|
||||||
// NewTomlSource creates and return a pointer on TomlSource.
|
|
||||||
// Parameter filename is the file name (without extension type, ".toml" will be added)
|
|
||||||
// dirNfullpath may contain directories or fullpath to the file.
|
|
||||||
func NewTomlSource(filename string, dirNfullpath []string) *TomlSource {
|
|
||||||
return &TomlSource{filename, dirNfullpath, ""}
|
|
||||||
}
|
|
||||||
|
|
||||||
// ConfigFileUsed return config file used
|
|
||||||
func (ts *TomlSource) ConfigFileUsed() string {
|
|
||||||
return ts.fullpath
|
|
||||||
}
|
|
||||||
|
|
||||||
func preprocessDir(dirIn string) (string, error) {
|
|
||||||
dirOut := dirIn
|
|
||||||
expanded := os.ExpandEnv(dirIn)
|
|
||||||
dirOut, err := filepath.Abs(expanded)
|
|
||||||
return dirOut, err
|
|
||||||
}
|
|
||||||
|
|
||||||
func findFile(filename string, dirNfile []string) string {
|
|
||||||
for _, df := range dirNfile {
|
|
||||||
if df != "" {
|
|
||||||
fullPath, _ := preprocessDir(df)
|
|
||||||
if fileInfo, err := os.Stat(fullPath); err == nil && !fileInfo.IsDir() {
|
|
||||||
return fullPath
|
|
||||||
}
|
|
||||||
|
|
||||||
fullPath = filepath.Join(fullPath, filename+".toml")
|
|
||||||
if fileInfo, err := os.Stat(fullPath); err == nil && !fileInfo.IsDir() {
|
|
||||||
return fullPath
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return ""
|
|
||||||
}
|
|
||||||
|
|
||||||
// Parse calls toml.DecodeFile() func
|
|
||||||
func (ts *TomlSource) Parse(cmd *flaeg.Command) (*flaeg.Command, error) {
|
|
||||||
ts.fullpath = findFile(ts.filename, ts.dirNfullpath)
|
|
||||||
if len(ts.fullpath) < 2 {
|
|
||||||
return cmd, nil
|
|
||||||
}
|
|
||||||
metadata, err := toml.DecodeFile(ts.fullpath, cmd.Config)
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
boolFlags, err := flaeg.GetBoolFlags(cmd.Config)
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
flaegArgs, hasUnderField, err := generateArgs(metadata, boolFlags)
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
|
|
||||||
// fmt.Println(flaegArgs)
|
|
||||||
err = flaeg.Load(cmd.Config, cmd.DefaultPointersConfig, flaegArgs)
|
|
||||||
//if err!= missing parser err
|
|
||||||
if err != nil && err != flaeg.ErrParserNotFound {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
if hasUnderField {
|
|
||||||
_, err := toml.DecodeFile(ts.fullpath, cmd.Config)
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
return cmd, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func generateArgs(metadata toml.MetaData, flags []string) ([]string, bool, error) {
|
|
||||||
var flaegArgs []string
|
|
||||||
keys := metadata.Keys()
|
|
||||||
hasUnderField := false
|
|
||||||
for i, key := range keys {
|
|
||||||
// fmt.Println(key)
|
|
||||||
if metadata.Type(key.String()) == "Hash" {
|
|
||||||
// TOML hashes correspond to Go structs or maps.
|
|
||||||
// fmt.Printf("%s could be a ptr on a struct, or a map\n", key)
|
|
||||||
for j := i; j < len(keys); j++ {
|
|
||||||
// fmt.Printf("%s =? %s\n", keys[j].String(), "."+key.String())
|
|
||||||
if strings.Contains(keys[j].String(), key.String()+".") {
|
|
||||||
hasUnderField = true
|
|
||||||
break
|
|
||||||
}
|
|
||||||
}
|
|
||||||
match := false
|
|
||||||
for _, flag := range flags {
|
|
||||||
if flag == strings.ToLower(key.String()) {
|
|
||||||
match = true
|
|
||||||
break
|
|
||||||
}
|
|
||||||
}
|
|
||||||
if match {
|
|
||||||
flaegArgs = append(flaegArgs, "--"+strings.ToLower(key.String()))
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return flaegArgs, hasUnderField, nil
|
|
||||||
}
|
|
||||||
|
|
121
vendor/github.com/containous/staert/toml.go
generated
vendored
Normal file
121
vendor/github.com/containous/staert/toml.go
generated
vendored
Normal file
|
@ -0,0 +1,121 @@
|
||||||
|
package staert
|
||||||
|
|
||||||
|
import (
|
||||||
|
"os"
|
||||||
|
"path/filepath"
|
||||||
|
"strings"
|
||||||
|
|
||||||
|
"github.com/BurntSushi/toml"
|
||||||
|
"github.com/containous/flaeg"
|
||||||
|
)
|
||||||
|
|
||||||
|
var _ Source = (*TomlSource)(nil)
|
||||||
|
|
||||||
|
// TomlSource implement staert.Source
|
||||||
|
type TomlSource struct {
|
||||||
|
filename string
|
||||||
|
dirNFullPath []string
|
||||||
|
fullPath string
|
||||||
|
}
|
||||||
|
|
||||||
|
// NewTomlSource creates and return a pointer on Source.
|
||||||
|
// Parameter filename is the file name (without extension type, ".toml" will be added)
|
||||||
|
// dirNFullPath may contain directories or fullPath to the file.
|
||||||
|
func NewTomlSource(filename string, dirNFullPath []string) *TomlSource {
|
||||||
|
return &TomlSource{filename, dirNFullPath, ""}
|
||||||
|
}
|
||||||
|
|
||||||
|
// ConfigFileUsed return config file used
|
||||||
|
func (ts *TomlSource) ConfigFileUsed() string {
|
||||||
|
return ts.fullPath
|
||||||
|
}
|
||||||
|
|
||||||
|
// Parse calls toml.DecodeFile() func
|
||||||
|
func (ts *TomlSource) Parse(cmd *flaeg.Command) (*flaeg.Command, error) {
|
||||||
|
ts.fullPath = findFile(ts.filename, ts.dirNFullPath)
|
||||||
|
if len(ts.fullPath) < 2 {
|
||||||
|
return cmd, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
metadata, err := toml.DecodeFile(ts.fullPath, cmd.Config)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
boolFlags, err := flaeg.GetBoolFlags(cmd.Config)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
flgArgs, hasUnderField, err := generateArgs(metadata, boolFlags)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
err = flaeg.Load(cmd.Config, cmd.DefaultPointersConfig, flgArgs)
|
||||||
|
if err != nil && err != flaeg.ErrParserNotFound {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
if hasUnderField {
|
||||||
|
_, err := toml.DecodeFile(ts.fullPath, cmd.Config)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return cmd, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func preProcessDir(dirIn string) (string, error) {
|
||||||
|
expanded := os.ExpandEnv(dirIn)
|
||||||
|
return filepath.Abs(expanded)
|
||||||
|
}
|
||||||
|
|
||||||
|
func findFile(filename string, dirNFile []string) string {
|
||||||
|
for _, df := range dirNFile {
|
||||||
|
if df != "" {
|
||||||
|
fullPath, _ := preProcessDir(df)
|
||||||
|
if fileInfo, err := os.Stat(fullPath); err == nil && !fileInfo.IsDir() {
|
||||||
|
return fullPath
|
||||||
|
}
|
||||||
|
|
||||||
|
fullPath = filepath.Join(fullPath, filename+".toml")
|
||||||
|
if fileInfo, err := os.Stat(fullPath); err == nil && !fileInfo.IsDir() {
|
||||||
|
return fullPath
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return ""
|
||||||
|
}
|
||||||
|
|
||||||
|
func generateArgs(metadata toml.MetaData, flags []string) ([]string, bool, error) {
|
||||||
|
var flgArgs []string
|
||||||
|
keys := metadata.Keys()
|
||||||
|
hasUnderField := false
|
||||||
|
|
||||||
|
for i, key := range keys {
|
||||||
|
if metadata.Type(key.String()) == "Hash" {
|
||||||
|
// TOML hashes correspond to Go structs or maps.
|
||||||
|
for j := i; j < len(keys); j++ {
|
||||||
|
if strings.Contains(keys[j].String(), key.String()+".") {
|
||||||
|
hasUnderField = true
|
||||||
|
break
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
match := false
|
||||||
|
for _, flag := range flags {
|
||||||
|
if flag == strings.ToLower(key.String()) {
|
||||||
|
match = true
|
||||||
|
break
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if match {
|
||||||
|
flgArgs = append(flgArgs, "--"+strings.ToLower(key.String()))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return flgArgs, hasUnderField, nil
|
||||||
|
}
|
2
vendor/github.com/vulcand/oxy/cbreaker/cbreaker.go
generated
vendored
2
vendor/github.com/vulcand/oxy/cbreaker/cbreaker.go
generated
vendored
|
@ -156,7 +156,7 @@ func (c *CircuitBreaker) activateFallback(w http.ResponseWriter, req *http.Reque
|
||||||
|
|
||||||
func (c *CircuitBreaker) serve(w http.ResponseWriter, req *http.Request) {
|
func (c *CircuitBreaker) serve(w http.ResponseWriter, req *http.Request) {
|
||||||
start := c.clock.UtcNow()
|
start := c.clock.UtcNow()
|
||||||
p := utils.NewSimpleProxyWriter(w)
|
p := utils.NewProxyWriter(w)
|
||||||
|
|
||||||
c.next.ServeHTTP(p, req)
|
c.next.ServeHTTP(p, req)
|
||||||
|
|
||||||
|
|
16
vendor/github.com/vulcand/oxy/forward/fwd.go
generated
vendored
16
vendor/github.com/vulcand/oxy/forward/fwd.go
generated
vendored
|
@ -466,16 +466,6 @@ func (f *httpForwarder) serveHTTP(w http.ResponseWriter, inReq *http.Request, ct
|
||||||
defer logEntry.Debug("vulcand/oxy/forward/http: completed ServeHttp on request")
|
defer logEntry.Debug("vulcand/oxy/forward/http: completed ServeHttp on request")
|
||||||
}
|
}
|
||||||
|
|
||||||
var pw utils.ProxyWriter
|
|
||||||
|
|
||||||
// Disable closeNotify when method GET for http pipelining
|
|
||||||
// Waiting for https://github.com/golang/go/issues/23921
|
|
||||||
if inReq.Method == http.MethodGet {
|
|
||||||
pw = utils.NewProxyWriterWithoutCloseNotify(w)
|
|
||||||
} else {
|
|
||||||
pw = utils.NewSimpleProxyWriter(w)
|
|
||||||
}
|
|
||||||
|
|
||||||
start := time.Now().UTC()
|
start := time.Now().UTC()
|
||||||
|
|
||||||
outReq := new(http.Request)
|
outReq := new(http.Request)
|
||||||
|
@ -490,6 +480,9 @@ func (f *httpForwarder) serveHTTP(w http.ResponseWriter, inReq *http.Request, ct
|
||||||
ModifyResponse: f.modifyResponse,
|
ModifyResponse: f.modifyResponse,
|
||||||
BufferPool: f.bufferPool,
|
BufferPool: f.bufferPool,
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if f.log.GetLevel() >= log.DebugLevel {
|
||||||
|
pw := utils.NewProxyWriter(w)
|
||||||
revproxy.ServeHTTP(pw, outReq)
|
revproxy.ServeHTTP(pw, outReq)
|
||||||
|
|
||||||
if inReq.TLS != nil {
|
if inReq.TLS != nil {
|
||||||
|
@ -503,6 +496,9 @@ func (f *httpForwarder) serveHTTP(w http.ResponseWriter, inReq *http.Request, ct
|
||||||
f.log.Debugf("vulcand/oxy/forward/http: Round trip: %v, code: %v, Length: %v, duration: %v",
|
f.log.Debugf("vulcand/oxy/forward/http: Round trip: %v, code: %v, Length: %v, duration: %v",
|
||||||
inReq.URL, pw.StatusCode(), pw.GetLength(), time.Now().UTC().Sub(start))
|
inReq.URL, pw.StatusCode(), pw.GetLength(), time.Now().UTC().Sub(start))
|
||||||
}
|
}
|
||||||
|
} else {
|
||||||
|
revproxy.ServeHTTP(w, outReq)
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// isWebsocketRequest determines if the specified HTTP request is a
|
// isWebsocketRequest determines if the specified HTTP request is a
|
||||||
|
|
2
vendor/github.com/vulcand/oxy/roundrobin/rebalancer.go
generated
vendored
2
vendor/github.com/vulcand/oxy/roundrobin/rebalancer.go
generated
vendored
|
@ -148,7 +148,7 @@ func (rb *Rebalancer) ServeHTTP(w http.ResponseWriter, req *http.Request) {
|
||||||
defer logEntry.Debug("vulcand/oxy/roundrobin/rebalancer: completed ServeHttp on request")
|
defer logEntry.Debug("vulcand/oxy/roundrobin/rebalancer: completed ServeHttp on request")
|
||||||
}
|
}
|
||||||
|
|
||||||
pw := utils.NewSimpleProxyWriter(w)
|
pw := utils.NewProxyWriter(w)
|
||||||
start := rb.clock.UtcNow()
|
start := rb.clock.UtcNow()
|
||||||
|
|
||||||
// make shallow copy of request before changing anything to avoid side effects
|
// make shallow copy of request before changing anything to avoid side effects
|
||||||
|
|
76
vendor/github.com/vulcand/oxy/utils/netutils.go
generated
vendored
76
vendor/github.com/vulcand/oxy/utils/netutils.go
generated
vendored
|
@ -12,89 +12,65 @@ import (
|
||||||
log "github.com/sirupsen/logrus"
|
log "github.com/sirupsen/logrus"
|
||||||
)
|
)
|
||||||
|
|
||||||
type ProxyWriter interface {
|
type ProxyWriter struct {
|
||||||
http.ResponseWriter
|
|
||||||
GetLength() int64
|
|
||||||
StatusCode() int
|
|
||||||
GetWriter() http.ResponseWriter
|
|
||||||
}
|
|
||||||
|
|
||||||
// ProxyWriterWithoutCloseNotify helps to capture response headers and status code
|
|
||||||
// from the ServeHTTP. It can be safely passed to ServeHTTP handler,
|
|
||||||
// wrapping the real response writer.
|
|
||||||
type ProxyWriterWithoutCloseNotify struct {
|
|
||||||
W http.ResponseWriter
|
W http.ResponseWriter
|
||||||
Code int
|
code int
|
||||||
Length int64
|
length int64
|
||||||
}
|
}
|
||||||
|
|
||||||
func NewProxyWriterWithoutCloseNotify(writer http.ResponseWriter) *ProxyWriterWithoutCloseNotify {
|
func NewProxyWriter(writer http.ResponseWriter) *ProxyWriter {
|
||||||
return &ProxyWriterWithoutCloseNotify{
|
return &ProxyWriter{
|
||||||
W: writer,
|
W: writer,
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func NewSimpleProxyWriter(writer http.ResponseWriter) *SimpleProxyWriter {
|
func (p *ProxyWriter) StatusCode() int {
|
||||||
return &SimpleProxyWriter{
|
if p.code == 0 {
|
||||||
ProxyWriterWithoutCloseNotify: NewProxyWriterWithoutCloseNotify(writer),
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
type SimpleProxyWriter struct {
|
|
||||||
*ProxyWriterWithoutCloseNotify
|
|
||||||
}
|
|
||||||
|
|
||||||
func (p *ProxyWriterWithoutCloseNotify) GetWriter() http.ResponseWriter {
|
|
||||||
return p.W
|
|
||||||
}
|
|
||||||
|
|
||||||
func (p *ProxyWriterWithoutCloseNotify) StatusCode() int {
|
|
||||||
if p.Code == 0 {
|
|
||||||
// per contract standard lib will set this to http.StatusOK if not set
|
// per contract standard lib will set this to http.StatusOK if not set
|
||||||
// by user, here we avoid the confusion by mirroring this logic
|
// by user, here we avoid the confusion by mirroring this logic
|
||||||
return http.StatusOK
|
return http.StatusOK
|
||||||
}
|
}
|
||||||
return p.Code
|
return p.code
|
||||||
}
|
}
|
||||||
|
|
||||||
func (p *ProxyWriterWithoutCloseNotify) Header() http.Header {
|
func (p *ProxyWriter) GetLength() int64 {
|
||||||
|
return p.length
|
||||||
|
}
|
||||||
|
|
||||||
|
func (p *ProxyWriter) Header() http.Header {
|
||||||
return p.W.Header()
|
return p.W.Header()
|
||||||
}
|
}
|
||||||
|
|
||||||
func (p *ProxyWriterWithoutCloseNotify) Write(buf []byte) (int, error) {
|
func (p *ProxyWriter) Write(buf []byte) (int, error) {
|
||||||
p.Length = p.Length + int64(len(buf))
|
p.length = p.length + int64(len(buf))
|
||||||
return p.W.Write(buf)
|
return p.W.Write(buf)
|
||||||
}
|
}
|
||||||
|
|
||||||
func (p *ProxyWriterWithoutCloseNotify) WriteHeader(code int) {
|
func (p *ProxyWriter) WriteHeader(code int) {
|
||||||
p.Code = code
|
p.code = code
|
||||||
p.W.WriteHeader(code)
|
p.W.WriteHeader(code)
|
||||||
}
|
}
|
||||||
|
|
||||||
func (p *ProxyWriterWithoutCloseNotify) Flush() {
|
func (p *ProxyWriter) Flush() {
|
||||||
if f, ok := p.W.(http.Flusher); ok {
|
if f, ok := p.W.(http.Flusher); ok {
|
||||||
f.Flush()
|
f.Flush()
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func (p *ProxyWriterWithoutCloseNotify) GetLength() int64 {
|
func (p *ProxyWriter) CloseNotify() <-chan bool {
|
||||||
return p.Length
|
if cn, ok := p.W.(http.CloseNotifier); ok {
|
||||||
}
|
|
||||||
|
|
||||||
func (p *SimpleProxyWriter) CloseNotify() <-chan bool {
|
|
||||||
if cn, ok := p.GetWriter().(http.CloseNotifier); ok {
|
|
||||||
return cn.CloseNotify()
|
return cn.CloseNotify()
|
||||||
}
|
}
|
||||||
log.Warningf("Upstream ResponseWriter of type %v does not implement http.CloseNotifier. Returning dummy channel.", reflect.TypeOf(p.GetWriter()))
|
log.Debugf("Upstream ResponseWriter of type %v does not implement http.CloseNotifier. Returning dummy channel.", reflect.TypeOf(p.W))
|
||||||
return make(<-chan bool)
|
return make(<-chan bool)
|
||||||
}
|
}
|
||||||
|
|
||||||
func (p *ProxyWriterWithoutCloseNotify) Hijack() (net.Conn, *bufio.ReadWriter, error) {
|
func (p *ProxyWriter) Hijack() (net.Conn, *bufio.ReadWriter, error) {
|
||||||
if hi, ok := p.W.(http.Hijacker); ok {
|
if hi, ok := p.W.(http.Hijacker); ok {
|
||||||
return hi.Hijack()
|
return hi.Hijack()
|
||||||
}
|
}
|
||||||
log.Warningf("Upstream ResponseWriter of type %v does not implement http.Hijacker. Returning dummy channel.", reflect.TypeOf(p.W))
|
log.Debugf("Upstream ResponseWriter of type %v does not implement http.Hijacker. Returning dummy channel.", reflect.TypeOf(p.W))
|
||||||
return nil, nil, fmt.Errorf("The response writer that was wrapped in this proxy, does not implement http.Hijacker. It is of type: %v", reflect.TypeOf(p.W))
|
return nil, nil, fmt.Errorf("the response writer that was wrapped in this proxy, does not implement http.Hijacker. It is of type: %v", reflect.TypeOf(p.W))
|
||||||
}
|
}
|
||||||
|
|
||||||
func NewBufferWriter(w io.WriteCloser) *BufferWriter {
|
func NewBufferWriter(w io.WriteCloser) *BufferWriter {
|
||||||
|
@ -139,8 +115,8 @@ func (b *BufferWriter) Hijack() (net.Conn, *bufio.ReadWriter, error) {
|
||||||
if hi, ok := b.W.(http.Hijacker); ok {
|
if hi, ok := b.W.(http.Hijacker); ok {
|
||||||
return hi.Hijack()
|
return hi.Hijack()
|
||||||
}
|
}
|
||||||
log.Warningf("Upstream ResponseWriter of type %v does not implement http.Hijacker. Returning dummy channel.", reflect.TypeOf(b.W))
|
log.Debugf("Upstream ResponseWriter of type %v does not implement http.Hijacker. Returning dummy channel.", reflect.TypeOf(b.W))
|
||||||
return nil, nil, fmt.Errorf("The response writer that was wrapped in this proxy, does not implement http.Hijacker. It is of type: %v", reflect.TypeOf(b.W))
|
return nil, nil, fmt.Errorf("the response writer that was wrapped in this proxy, does not implement http.Hijacker. It is of type: %v", reflect.TypeOf(b.W))
|
||||||
}
|
}
|
||||||
|
|
||||||
type nopWriteCloser struct {
|
type nopWriteCloser struct {
|
||||||
|
|
Loading…
Reference in a new issue