Merge 'v1.4.0-rc4' into master
This commit is contained in:
commit
cf508b6d48
54 changed files with 2903 additions and 541 deletions
22
CHANGELOG.md
22
CHANGELOG.md
|
@ -1,5 +1,27 @@
|
||||||
# Change Log
|
# Change Log
|
||||||
|
|
||||||
|
## [v1.4.0-rc4](https://github.com/containous/traefik/tree/v1.4.0-rc4) (2017-10-02)
|
||||||
|
[All Commits](https://github.com/containous/traefik/compare/v1.4.0-rc3...v1.4.0-rc4)
|
||||||
|
|
||||||
|
**Bug fixes:**
|
||||||
|
- **[cluster,kv]** Be certain to clear our marshalled representation before reloading it ([#2165](https://github.com/containous/traefik/pull/2165) by [gozer](https://github.com/gozer))
|
||||||
|
- **[consulcatalog]** Consul catalog failed to remove service ([#2157](https://github.com/containous/traefik/pull/2157) by [Juliens](https://github.com/Juliens))
|
||||||
|
- **[consulcatalog]** Flaky tests and refresh problem in consul catalog ([#2148](https://github.com/containous/traefik/pull/2148) by [Juliens](https://github.com/Juliens))
|
||||||
|
- **[ecs]** Handle empty ECS Clusters properly ([#2170](https://github.com/containous/traefik/pull/2170) by [jeffreykoetsier](https://github.com/jeffreykoetsier))
|
||||||
|
- **[middleware]** Fix SSE subscriptions when retries are enabled ([#2145](https://github.com/containous/traefik/pull/2145) by [marco-jantke](https://github.com/marco-jantke))
|
||||||
|
- **[websocket]** Forward upgrade error from backend ([#2187](https://github.com/containous/traefik/pull/2187) by [Juliens](https://github.com/Juliens))
|
||||||
|
- `bug` command. ([#2178](https://github.com/containous/traefik/pull/2178) by [ldez](https://github.com/ldez))
|
||||||
|
- Fix deprecated IdleTimeout config ([#2143](https://github.com/containous/traefik/pull/2143) by [marco-jantke](https://github.com/marco-jantke))
|
||||||
|
|
||||||
|
**Documentation:**
|
||||||
|
- **[docker]** Updating Docker output and curl for sticky sessions ([#2150](https://github.com/containous/traefik/pull/2150) by [jtyr](https://github.com/jtyr))
|
||||||
|
- **[middleware]** Improve compression documentation ([#2184](https://github.com/containous/traefik/pull/2184) by [errm](https://github.com/errm))
|
||||||
|
- Fix grammar mistake in the kv-config docs ([#2197](https://github.com/containous/traefik/pull/2197) by [chr4](https://github.com/chr4))
|
||||||
|
- Update gRPC example ([#2191](https://github.com/containous/traefik/pull/2191) by [jsenon](https://github.com/jsenon))
|
||||||
|
|
||||||
|
**Misc:**
|
||||||
|
- **[websocket]** Add tests for urlencoded part in url ([#2199](https://github.com/containous/traefik/pull/2199) by [Juliens](https://github.com/Juliens))
|
||||||
|
|
||||||
## [v1.4.0-rc3](https://github.com/containous/traefik/tree/v1.4.0-rc3) (2017-09-18)
|
## [v1.4.0-rc3](https://github.com/containous/traefik/tree/v1.4.0-rc3) (2017-09-18)
|
||||||
[All Commits](https://github.com/containous/traefik/compare/v1.4.0-rc2...v1.4.0-rc3)
|
[All Commits](https://github.com/containous/traefik/compare/v1.4.0-rc2...v1.4.0-rc3)
|
||||||
|
|
||||||
|
|
2
Makefile
2
Makefile
|
@ -114,7 +114,7 @@ fmt:
|
||||||
gofmt -s -l -w $(SRCS)
|
gofmt -s -l -w $(SRCS)
|
||||||
|
|
||||||
pull-images:
|
pull-images:
|
||||||
cat ./integration/resources/compose/*.yml | grep -E '^\s+image:' | awk '{print $$2}' | sort | uniq | xargs -n 1 docker pull
|
grep --no-filename -E '^\s+image:' ./integration/resources/compose/*.yml | awk '{print $$2}' | sort | uniq | xargs -P 6 -n 1 docker pull
|
||||||
|
|
||||||
help: ## this help
|
help: ## this help
|
||||||
@awk 'BEGIN {FS = ":.*?## "} /^[a-zA-Z_-]+:.*?## / {sub("\\\\n",sprintf("\n%22c"," "), $$2);printf "\033[36m%-20s\033[0m %s\n", $$1, $$2}' $(MAKEFILE_LIST)
|
@awk 'BEGIN {FS = ":.*?## "} /^[a-zA-Z_-]+:.*?## / {sub("\\\\n",sprintf("\n%22c"," "), $$2);printf "\033[36m%-20s\033[0m %s\n", $$1, $$2}' $(MAKEFILE_LIST)
|
||||||
|
|
|
@ -199,6 +199,10 @@ func (d *Datastore) get() *Metadata {
|
||||||
func (d *Datastore) Load() (Object, error) {
|
func (d *Datastore) Load() (Object, error) {
|
||||||
d.localLock.Lock()
|
d.localLock.Lock()
|
||||||
defer d.localLock.Unlock()
|
defer d.localLock.Unlock()
|
||||||
|
|
||||||
|
// clear Object first, as mapstructure's decoder doesn't have ZeroFields set to true for merging purposes
|
||||||
|
d.meta.Object = d.meta.Object[:0]
|
||||||
|
|
||||||
err := d.kv.LoadConfig(d.meta)
|
err := d.kv.LoadConfig(d.meta)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
|
|
136
cmd/traefik/anonymize/anonymize.go
Normal file
136
cmd/traefik/anonymize/anonymize.go
Normal file
|
@ -0,0 +1,136 @@
|
||||||
|
package anonymize
|
||||||
|
|
||||||
|
import (
|
||||||
|
"encoding/json"
|
||||||
|
"fmt"
|
||||||
|
"reflect"
|
||||||
|
"regexp"
|
||||||
|
|
||||||
|
"github.com/mitchellh/copystructure"
|
||||||
|
"github.com/mvdan/xurls"
|
||||||
|
)
|
||||||
|
|
||||||
|
const (
|
||||||
|
maskShort = "xxxx"
|
||||||
|
maskLarge = maskShort + maskShort + maskShort + maskShort + maskShort + maskShort + maskShort + maskShort
|
||||||
|
)
|
||||||
|
|
||||||
|
// Do configuration.
|
||||||
|
func Do(baseConfig interface{}, indent bool) (string, error) {
|
||||||
|
anomConfig, err := copystructure.Copy(baseConfig)
|
||||||
|
if err != nil {
|
||||||
|
return "", err
|
||||||
|
}
|
||||||
|
|
||||||
|
val := reflect.ValueOf(anomConfig)
|
||||||
|
|
||||||
|
err = doOnStruct(val)
|
||||||
|
if err != nil {
|
||||||
|
return "", err
|
||||||
|
}
|
||||||
|
|
||||||
|
configJSON, err := marshal(anomConfig, indent)
|
||||||
|
if err != nil {
|
||||||
|
return "", err
|
||||||
|
}
|
||||||
|
|
||||||
|
return doOnJSON(string(configJSON)), nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func doOnJSON(input string) string {
|
||||||
|
mailExp := regexp.MustCompile(`\w[-._\w]*\w@\w[-._\w]*\w\.\w{2,3}"`)
|
||||||
|
return xurls.Relaxed.ReplaceAllString(mailExp.ReplaceAllString(input, maskLarge+"\""), maskLarge)
|
||||||
|
}
|
||||||
|
|
||||||
|
func doOnStruct(field reflect.Value) error {
|
||||||
|
switch field.Kind() {
|
||||||
|
case reflect.Ptr:
|
||||||
|
if !field.IsNil() {
|
||||||
|
if err := doOnStruct(field.Elem()); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
case reflect.Struct:
|
||||||
|
for i := 0; i < field.NumField(); i++ {
|
||||||
|
fld := field.Field(i)
|
||||||
|
stField := field.Type().Field(i)
|
||||||
|
if !isExported(stField) {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
if stField.Tag.Get("export") == "true" {
|
||||||
|
if err := doOnStruct(fld); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
if err := reset(fld, stField.Name); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
case reflect.Map:
|
||||||
|
for _, key := range field.MapKeys() {
|
||||||
|
if err := doOnStruct(field.MapIndex(key)); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
case reflect.Slice:
|
||||||
|
for j := 0; j < field.Len(); j++ {
|
||||||
|
if err := doOnStruct(field.Index(j)); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func reset(field reflect.Value, name string) error {
|
||||||
|
if !field.CanSet() {
|
||||||
|
return fmt.Errorf("cannot reset field %s", name)
|
||||||
|
}
|
||||||
|
|
||||||
|
switch field.Kind() {
|
||||||
|
case reflect.Ptr:
|
||||||
|
if !field.IsNil() {
|
||||||
|
field.Set(reflect.Zero(field.Type()))
|
||||||
|
}
|
||||||
|
case reflect.Struct:
|
||||||
|
if field.IsValid() {
|
||||||
|
field.Set(reflect.Zero(field.Type()))
|
||||||
|
}
|
||||||
|
case reflect.String:
|
||||||
|
if field.String() != "" {
|
||||||
|
field.Set(reflect.ValueOf(maskShort))
|
||||||
|
}
|
||||||
|
case reflect.Map:
|
||||||
|
if field.Len() > 0 {
|
||||||
|
field.Set(reflect.MakeMap(field.Type()))
|
||||||
|
}
|
||||||
|
case reflect.Slice:
|
||||||
|
if field.Len() > 0 {
|
||||||
|
field.Set(reflect.MakeSlice(field.Type(), 0, 0))
|
||||||
|
}
|
||||||
|
case reflect.Interface:
|
||||||
|
if !field.IsNil() {
|
||||||
|
return reset(field.Elem(), "")
|
||||||
|
}
|
||||||
|
default:
|
||||||
|
// Primitive type
|
||||||
|
field.Set(reflect.Zero(field.Type()))
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// isExported return true is a struct field is exported, else false
|
||||||
|
func isExported(f reflect.StructField) bool {
|
||||||
|
if f.PkgPath != "" && !f.Anonymous {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
|
||||||
|
func marshal(anomConfig interface{}, indent bool) ([]byte, error) {
|
||||||
|
if indent {
|
||||||
|
return json.MarshalIndent(anomConfig, "", " ")
|
||||||
|
}
|
||||||
|
return json.Marshal(anomConfig)
|
||||||
|
}
|
666
cmd/traefik/anonymize/anonymize_config_test.go
Normal file
666
cmd/traefik/anonymize/anonymize_config_test.go
Normal file
|
@ -0,0 +1,666 @@
|
||||||
|
package anonymize
|
||||||
|
|
||||||
|
import (
|
||||||
|
"crypto/tls"
|
||||||
|
"testing"
|
||||||
|
"time"
|
||||||
|
|
||||||
|
"github.com/containous/flaeg"
|
||||||
|
"github.com/containous/traefik/acme"
|
||||||
|
"github.com/containous/traefik/configuration"
|
||||||
|
"github.com/containous/traefik/middlewares"
|
||||||
|
"github.com/containous/traefik/provider"
|
||||||
|
"github.com/containous/traefik/provider/boltdb"
|
||||||
|
"github.com/containous/traefik/provider/consul"
|
||||||
|
"github.com/containous/traefik/provider/docker"
|
||||||
|
"github.com/containous/traefik/provider/dynamodb"
|
||||||
|
"github.com/containous/traefik/provider/ecs"
|
||||||
|
"github.com/containous/traefik/provider/etcd"
|
||||||
|
"github.com/containous/traefik/provider/eureka"
|
||||||
|
"github.com/containous/traefik/provider/file"
|
||||||
|
"github.com/containous/traefik/provider/kubernetes"
|
||||||
|
"github.com/containous/traefik/provider/kv"
|
||||||
|
"github.com/containous/traefik/provider/marathon"
|
||||||
|
"github.com/containous/traefik/provider/mesos"
|
||||||
|
"github.com/containous/traefik/provider/rancher"
|
||||||
|
"github.com/containous/traefik/provider/web"
|
||||||
|
"github.com/containous/traefik/provider/zk"
|
||||||
|
"github.com/containous/traefik/safe"
|
||||||
|
"github.com/containous/traefik/types"
|
||||||
|
thoas_stats "github.com/thoas/stats"
|
||||||
|
)
|
||||||
|
|
||||||
|
func TestDo_globalConfiguration(t *testing.T) {
|
||||||
|
|
||||||
|
config := &configuration.GlobalConfiguration{}
|
||||||
|
|
||||||
|
config.GraceTimeOut = flaeg.Duration(666 * time.Second)
|
||||||
|
config.Debug = true
|
||||||
|
config.CheckNewVersion = true
|
||||||
|
config.AccessLogsFile = "AccessLogsFile"
|
||||||
|
config.AccessLog = &types.AccessLog{
|
||||||
|
FilePath: "AccessLog FilePath",
|
||||||
|
Format: "AccessLog Format",
|
||||||
|
}
|
||||||
|
config.TraefikLogsFile = "TraefikLogsFile"
|
||||||
|
config.LogLevel = "LogLevel"
|
||||||
|
config.EntryPoints = configuration.EntryPoints{
|
||||||
|
"foo": {
|
||||||
|
Network: "foo Network",
|
||||||
|
Address: "foo Address",
|
||||||
|
TLS: &configuration.TLS{
|
||||||
|
MinVersion: "foo MinVersion",
|
||||||
|
CipherSuites: []string{"foo CipherSuites 1", "foo CipherSuites 2", "foo CipherSuites 3"},
|
||||||
|
Certificates: configuration.Certificates{
|
||||||
|
{CertFile: "CertFile 1", KeyFile: "KeyFile 1"},
|
||||||
|
{CertFile: "CertFile 2", KeyFile: "KeyFile 2"},
|
||||||
|
},
|
||||||
|
ClientCAFiles: []string{"foo ClientCAFiles 1", "foo ClientCAFiles 2", "foo ClientCAFiles 3"},
|
||||||
|
},
|
||||||
|
Redirect: &configuration.Redirect{
|
||||||
|
Replacement: "foo Replacement",
|
||||||
|
Regex: "foo Regex",
|
||||||
|
EntryPoint: "foo EntryPoint",
|
||||||
|
},
|
||||||
|
Auth: &types.Auth{
|
||||||
|
Basic: &types.Basic{
|
||||||
|
UsersFile: "foo Basic UsersFile",
|
||||||
|
Users: types.Users{"foo Basic Users 1", "foo Basic Users 2", "foo Basic Users 3"},
|
||||||
|
},
|
||||||
|
Digest: &types.Digest{
|
||||||
|
UsersFile: "foo Digest UsersFile",
|
||||||
|
Users: types.Users{"foo Digest Users 1", "foo Digest Users 2", "foo Digest Users 3"},
|
||||||
|
},
|
||||||
|
Forward: &types.Forward{
|
||||||
|
Address: "foo Address",
|
||||||
|
TLS: &types.ClientTLS{
|
||||||
|
CA: "foo CA",
|
||||||
|
Cert: "foo Cert",
|
||||||
|
Key: "foo Key",
|
||||||
|
InsecureSkipVerify: true,
|
||||||
|
},
|
||||||
|
TrustForwardHeader: true,
|
||||||
|
},
|
||||||
|
},
|
||||||
|
WhitelistSourceRange: []string{"foo WhitelistSourceRange 1", "foo WhitelistSourceRange 2", "foo WhitelistSourceRange 3"},
|
||||||
|
Compress: true,
|
||||||
|
ProxyProtocol: true,
|
||||||
|
},
|
||||||
|
"fii": {
|
||||||
|
Network: "fii Network",
|
||||||
|
Address: "fii Address",
|
||||||
|
TLS: &configuration.TLS{
|
||||||
|
MinVersion: "fii MinVersion",
|
||||||
|
CipherSuites: []string{"fii CipherSuites 1", "fii CipherSuites 2", "fii CipherSuites 3"},
|
||||||
|
Certificates: configuration.Certificates{
|
||||||
|
{CertFile: "CertFile 1", KeyFile: "KeyFile 1"},
|
||||||
|
{CertFile: "CertFile 2", KeyFile: "KeyFile 2"},
|
||||||
|
},
|
||||||
|
ClientCAFiles: []string{"fii ClientCAFiles 1", "fii ClientCAFiles 2", "fii ClientCAFiles 3"},
|
||||||
|
},
|
||||||
|
Redirect: &configuration.Redirect{
|
||||||
|
Replacement: "fii Replacement",
|
||||||
|
Regex: "fii Regex",
|
||||||
|
EntryPoint: "fii EntryPoint",
|
||||||
|
},
|
||||||
|
Auth: &types.Auth{
|
||||||
|
Basic: &types.Basic{
|
||||||
|
UsersFile: "fii Basic UsersFile",
|
||||||
|
Users: types.Users{"fii Basic Users 1", "fii Basic Users 2", "fii Basic Users 3"},
|
||||||
|
},
|
||||||
|
Digest: &types.Digest{
|
||||||
|
UsersFile: "fii Digest UsersFile",
|
||||||
|
Users: types.Users{"fii Digest Users 1", "fii Digest Users 2", "fii Digest Users 3"},
|
||||||
|
},
|
||||||
|
Forward: &types.Forward{
|
||||||
|
Address: "fii Address",
|
||||||
|
TLS: &types.ClientTLS{
|
||||||
|
CA: "fii CA",
|
||||||
|
Cert: "fii Cert",
|
||||||
|
Key: "fii Key",
|
||||||
|
InsecureSkipVerify: true,
|
||||||
|
},
|
||||||
|
TrustForwardHeader: true,
|
||||||
|
},
|
||||||
|
},
|
||||||
|
WhitelistSourceRange: []string{"fii WhitelistSourceRange 1", "fii WhitelistSourceRange 2", "fii WhitelistSourceRange 3"},
|
||||||
|
Compress: true,
|
||||||
|
ProxyProtocol: true,
|
||||||
|
},
|
||||||
|
}
|
||||||
|
config.Cluster = &types.Cluster{
|
||||||
|
Node: "Cluster Node",
|
||||||
|
Store: &types.Store{
|
||||||
|
Prefix: "Cluster Store Prefix",
|
||||||
|
// ...
|
||||||
|
},
|
||||||
|
}
|
||||||
|
config.Constraints = types.Constraints{
|
||||||
|
{
|
||||||
|
Key: "Constraints Key 1",
|
||||||
|
Regex: "Constraints Regex 2",
|
||||||
|
MustMatch: true,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
Key: "Constraints Key 1",
|
||||||
|
Regex: "Constraints Regex 2",
|
||||||
|
MustMatch: true,
|
||||||
|
},
|
||||||
|
}
|
||||||
|
config.ACME = &acme.ACME{
|
||||||
|
Email: "acme Email",
|
||||||
|
Domains: []acme.Domain{
|
||||||
|
{
|
||||||
|
Main: "Domains Main",
|
||||||
|
SANs: []string{"Domains acme SANs 1", "Domains acme SANs 2", "Domains acme SANs 3"},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
Storage: "Storage",
|
||||||
|
StorageFile: "StorageFile",
|
||||||
|
OnDemand: true,
|
||||||
|
OnHostRule: true,
|
||||||
|
CAServer: "CAServer",
|
||||||
|
EntryPoint: "EntryPoint",
|
||||||
|
DNSProvider: "DNSProvider",
|
||||||
|
DelayDontCheckDNS: 666,
|
||||||
|
ACMELogging: true,
|
||||||
|
TLSConfig: &tls.Config{
|
||||||
|
InsecureSkipVerify: true,
|
||||||
|
// ...
|
||||||
|
},
|
||||||
|
}
|
||||||
|
config.DefaultEntryPoints = configuration.DefaultEntryPoints{"DefaultEntryPoints 1", "DefaultEntryPoints 2", "DefaultEntryPoints 3"}
|
||||||
|
config.ProvidersThrottleDuration = flaeg.Duration(666 * time.Second)
|
||||||
|
config.MaxIdleConnsPerHost = 666
|
||||||
|
config.IdleTimeout = flaeg.Duration(666 * time.Second)
|
||||||
|
config.InsecureSkipVerify = true
|
||||||
|
config.RootCAs = configuration.RootCAs{"RootCAs 1", "RootCAs 2", "RootCAs 3"}
|
||||||
|
config.Retry = &configuration.Retry{
|
||||||
|
Attempts: 666,
|
||||||
|
}
|
||||||
|
config.HealthCheck = &configuration.HealthCheckConfig{
|
||||||
|
Interval: flaeg.Duration(666 * time.Second),
|
||||||
|
}
|
||||||
|
config.RespondingTimeouts = &configuration.RespondingTimeouts{
|
||||||
|
ReadTimeout: flaeg.Duration(666 * time.Second),
|
||||||
|
WriteTimeout: flaeg.Duration(666 * time.Second),
|
||||||
|
IdleTimeout: flaeg.Duration(666 * time.Second),
|
||||||
|
}
|
||||||
|
config.ForwardingTimeouts = &configuration.ForwardingTimeouts{
|
||||||
|
DialTimeout: flaeg.Duration(666 * time.Second),
|
||||||
|
ResponseHeaderTimeout: flaeg.Duration(666 * time.Second),
|
||||||
|
}
|
||||||
|
config.Docker = &docker.Provider{
|
||||||
|
BaseProvider: provider.BaseProvider{
|
||||||
|
Watch: true,
|
||||||
|
Filename: "docker Filename",
|
||||||
|
Constraints: types.Constraints{
|
||||||
|
{
|
||||||
|
Key: "docker Constraints Key 1",
|
||||||
|
Regex: "docker Constraints Regex 2",
|
||||||
|
MustMatch: true,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
Key: "docker Constraints Key 1",
|
||||||
|
Regex: "docker Constraints Regex 2",
|
||||||
|
MustMatch: true,
|
||||||
|
},
|
||||||
|
},
|
||||||
|
Trace: true,
|
||||||
|
DebugLogGeneratedTemplate: true,
|
||||||
|
},
|
||||||
|
Endpoint: "docker Endpoint",
|
||||||
|
Domain: "docker Domain",
|
||||||
|
TLS: &types.ClientTLS{
|
||||||
|
CA: "docker CA",
|
||||||
|
Cert: "docker Cert",
|
||||||
|
Key: "docker Key",
|
||||||
|
InsecureSkipVerify: true,
|
||||||
|
},
|
||||||
|
ExposedByDefault: true,
|
||||||
|
UseBindPortIP: true,
|
||||||
|
SwarmMode: true,
|
||||||
|
}
|
||||||
|
config.File = &file.Provider{
|
||||||
|
BaseProvider: provider.BaseProvider{
|
||||||
|
Watch: true,
|
||||||
|
Filename: "file Filename",
|
||||||
|
Constraints: types.Constraints{
|
||||||
|
{
|
||||||
|
Key: "file Constraints Key 1",
|
||||||
|
Regex: "file Constraints Regex 2",
|
||||||
|
MustMatch: true,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
Key: "file Constraints Key 1",
|
||||||
|
Regex: "file Constraints Regex 2",
|
||||||
|
MustMatch: true,
|
||||||
|
},
|
||||||
|
},
|
||||||
|
Trace: true,
|
||||||
|
DebugLogGeneratedTemplate: true,
|
||||||
|
},
|
||||||
|
Directory: "file Directory",
|
||||||
|
}
|
||||||
|
config.Web = &web.Provider{
|
||||||
|
Address: "web Address",
|
||||||
|
CertFile: "web CertFile",
|
||||||
|
KeyFile: "web KeyFile",
|
||||||
|
ReadOnly: true,
|
||||||
|
Statistics: &types.Statistics{
|
||||||
|
RecentErrors: 666,
|
||||||
|
},
|
||||||
|
Metrics: &types.Metrics{
|
||||||
|
Prometheus: &types.Prometheus{
|
||||||
|
Buckets: types.Buckets{6.5, 6.6, 6.7},
|
||||||
|
},
|
||||||
|
Datadog: &types.Datadog{
|
||||||
|
Address: "Datadog Address",
|
||||||
|
PushInterval: "Datadog PushInterval",
|
||||||
|
},
|
||||||
|
StatsD: &types.Statsd{
|
||||||
|
Address: "StatsD Address",
|
||||||
|
PushInterval: "StatsD PushInterval",
|
||||||
|
},
|
||||||
|
},
|
||||||
|
Path: "web Path",
|
||||||
|
Auth: &types.Auth{
|
||||||
|
Basic: &types.Basic{
|
||||||
|
UsersFile: "web Basic UsersFile",
|
||||||
|
Users: types.Users{"web Basic Users 1", "web Basic Users 2", "web Basic Users 3"},
|
||||||
|
},
|
||||||
|
Digest: &types.Digest{
|
||||||
|
UsersFile: "web Digest UsersFile",
|
||||||
|
Users: types.Users{"web Digest Users 1", "web Digest Users 2", "web Digest Users 3"},
|
||||||
|
},
|
||||||
|
Forward: &types.Forward{
|
||||||
|
Address: "web Address",
|
||||||
|
TLS: &types.ClientTLS{
|
||||||
|
CA: "web CA",
|
||||||
|
Cert: "web Cert",
|
||||||
|
Key: "web Key",
|
||||||
|
InsecureSkipVerify: true,
|
||||||
|
},
|
||||||
|
TrustForwardHeader: true,
|
||||||
|
},
|
||||||
|
},
|
||||||
|
Debug: true,
|
||||||
|
CurrentConfigurations: &safe.Safe{},
|
||||||
|
Stats: &thoas_stats.Stats{
|
||||||
|
Uptime: time.Now(),
|
||||||
|
Pid: 666,
|
||||||
|
ResponseCounts: map[string]int{"foo": 1, "fii": 2, "fuu": 3},
|
||||||
|
TotalResponseCounts: map[string]int{"foo": 1, "fii": 2, "fuu": 3},
|
||||||
|
TotalResponseTime: time.Now(),
|
||||||
|
},
|
||||||
|
StatsRecorder: &middlewares.StatsRecorder{},
|
||||||
|
}
|
||||||
|
config.Marathon = &marathon.Provider{
|
||||||
|
BaseProvider: provider.BaseProvider{
|
||||||
|
Watch: true,
|
||||||
|
Filename: "marathon Filename",
|
||||||
|
Constraints: types.Constraints{
|
||||||
|
{
|
||||||
|
Key: "marathon Constraints Key 1",
|
||||||
|
Regex: "marathon Constraints Regex 2",
|
||||||
|
MustMatch: true,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
Key: "marathon Constraints Key 1",
|
||||||
|
Regex: "marathon Constraints Regex 2",
|
||||||
|
MustMatch: true,
|
||||||
|
},
|
||||||
|
},
|
||||||
|
Trace: true,
|
||||||
|
DebugLogGeneratedTemplate: true,
|
||||||
|
},
|
||||||
|
Endpoint: "",
|
||||||
|
Domain: "",
|
||||||
|
ExposedByDefault: true,
|
||||||
|
GroupsAsSubDomains: true,
|
||||||
|
DCOSToken: "",
|
||||||
|
MarathonLBCompatibility: true,
|
||||||
|
TLS: &types.ClientTLS{
|
||||||
|
CA: "marathon CA",
|
||||||
|
Cert: "marathon Cert",
|
||||||
|
Key: "marathon Key",
|
||||||
|
InsecureSkipVerify: true,
|
||||||
|
},
|
||||||
|
DialerTimeout: flaeg.Duration(666 * time.Second),
|
||||||
|
KeepAlive: flaeg.Duration(666 * time.Second),
|
||||||
|
ForceTaskHostname: true,
|
||||||
|
Basic: &marathon.Basic{
|
||||||
|
HTTPBasicAuthUser: "marathon HTTPBasicAuthUser",
|
||||||
|
HTTPBasicPassword: "marathon HTTPBasicPassword",
|
||||||
|
},
|
||||||
|
RespectReadinessChecks: true,
|
||||||
|
}
|
||||||
|
config.ConsulCatalog = &consul.CatalogProvider{
|
||||||
|
BaseProvider: provider.BaseProvider{
|
||||||
|
Watch: true,
|
||||||
|
Filename: "ConsulCatalog Filename",
|
||||||
|
Constraints: types.Constraints{
|
||||||
|
{
|
||||||
|
Key: "ConsulCatalog Constraints Key 1",
|
||||||
|
Regex: "ConsulCatalog Constraints Regex 2",
|
||||||
|
MustMatch: true,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
Key: "ConsulCatalog Constraints Key 1",
|
||||||
|
Regex: "ConsulCatalog Constraints Regex 2",
|
||||||
|
MustMatch: true,
|
||||||
|
},
|
||||||
|
},
|
||||||
|
Trace: true,
|
||||||
|
DebugLogGeneratedTemplate: true,
|
||||||
|
},
|
||||||
|
Endpoint: "ConsulCatalog Endpoint",
|
||||||
|
Domain: "ConsulCatalog Domain",
|
||||||
|
ExposedByDefault: true,
|
||||||
|
Prefix: "ConsulCatalog Prefix",
|
||||||
|
FrontEndRule: "ConsulCatalog FrontEndRule",
|
||||||
|
}
|
||||||
|
config.Kubernetes = &kubernetes.Provider{
|
||||||
|
BaseProvider: provider.BaseProvider{
|
||||||
|
Watch: true,
|
||||||
|
Filename: "k8s Filename",
|
||||||
|
Constraints: types.Constraints{
|
||||||
|
{
|
||||||
|
Key: "k8s Constraints Key 1",
|
||||||
|
Regex: "k8s Constraints Regex 2",
|
||||||
|
MustMatch: true,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
Key: "k8s Constraints Key 1",
|
||||||
|
Regex: "k8s Constraints Regex 2",
|
||||||
|
MustMatch: true,
|
||||||
|
},
|
||||||
|
},
|
||||||
|
Trace: true,
|
||||||
|
DebugLogGeneratedTemplate: true,
|
||||||
|
},
|
||||||
|
Endpoint: "k8s Endpoint",
|
||||||
|
Token: "k8s Token",
|
||||||
|
CertAuthFilePath: "k8s CertAuthFilePath",
|
||||||
|
DisablePassHostHeaders: true,
|
||||||
|
Namespaces: kubernetes.Namespaces{"k8s Namespaces 1", "k8s Namespaces 2", "k8s Namespaces 3"},
|
||||||
|
LabelSelector: "k8s LabelSelector",
|
||||||
|
}
|
||||||
|
config.Mesos = &mesos.Provider{
|
||||||
|
BaseProvider: provider.BaseProvider{
|
||||||
|
Watch: true,
|
||||||
|
Filename: "mesos Filename",
|
||||||
|
Constraints: types.Constraints{
|
||||||
|
{
|
||||||
|
Key: "mesos Constraints Key 1",
|
||||||
|
Regex: "mesos Constraints Regex 2",
|
||||||
|
MustMatch: true,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
Key: "mesos Constraints Key 1",
|
||||||
|
Regex: "mesos Constraints Regex 2",
|
||||||
|
MustMatch: true,
|
||||||
|
},
|
||||||
|
},
|
||||||
|
Trace: true,
|
||||||
|
DebugLogGeneratedTemplate: true,
|
||||||
|
},
|
||||||
|
Endpoint: "mesos Endpoint",
|
||||||
|
Domain: "mesos Domain",
|
||||||
|
ExposedByDefault: true,
|
||||||
|
GroupsAsSubDomains: true,
|
||||||
|
ZkDetectionTimeout: 666,
|
||||||
|
RefreshSeconds: 666,
|
||||||
|
IPSources: "mesos IPSources",
|
||||||
|
StateTimeoutSecond: 666,
|
||||||
|
Masters: []string{"mesos Masters 1", "mesos Masters 2", "mesos Masters 3"},
|
||||||
|
}
|
||||||
|
config.Eureka = &eureka.Provider{
|
||||||
|
BaseProvider: provider.BaseProvider{
|
||||||
|
Watch: true,
|
||||||
|
Filename: "eureka Filename",
|
||||||
|
Constraints: types.Constraints{
|
||||||
|
{
|
||||||
|
Key: "eureka Constraints Key 1",
|
||||||
|
Regex: "eureka Constraints Regex 2",
|
||||||
|
MustMatch: true,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
Key: "eureka Constraints Key 1",
|
||||||
|
Regex: "eureka Constraints Regex 2",
|
||||||
|
MustMatch: true,
|
||||||
|
},
|
||||||
|
},
|
||||||
|
Trace: true,
|
||||||
|
DebugLogGeneratedTemplate: true,
|
||||||
|
},
|
||||||
|
Endpoint: "eureka Endpoint",
|
||||||
|
Delay: "eureka Delay",
|
||||||
|
}
|
||||||
|
config.ECS = &ecs.Provider{
|
||||||
|
BaseProvider: provider.BaseProvider{
|
||||||
|
Watch: true,
|
||||||
|
Filename: "ecs Filename",
|
||||||
|
Constraints: types.Constraints{
|
||||||
|
{
|
||||||
|
Key: "ecs Constraints Key 1",
|
||||||
|
Regex: "ecs Constraints Regex 2",
|
||||||
|
MustMatch: true,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
Key: "ecs Constraints Key 1",
|
||||||
|
Regex: "ecs Constraints Regex 2",
|
||||||
|
MustMatch: true,
|
||||||
|
},
|
||||||
|
},
|
||||||
|
Trace: true,
|
||||||
|
DebugLogGeneratedTemplate: true,
|
||||||
|
},
|
||||||
|
Domain: "ecs Domain",
|
||||||
|
ExposedByDefault: true,
|
||||||
|
RefreshSeconds: 666,
|
||||||
|
Clusters: ecs.Clusters{"ecs Clusters 1", "ecs Clusters 2", "ecs Clusters 3"},
|
||||||
|
Cluster: "ecs Cluster",
|
||||||
|
AutoDiscoverClusters: true,
|
||||||
|
Region: "ecs Region",
|
||||||
|
AccessKeyID: "ecs AccessKeyID",
|
||||||
|
SecretAccessKey: "ecs SecretAccessKey",
|
||||||
|
}
|
||||||
|
config.Rancher = &rancher.Provider{
|
||||||
|
BaseProvider: provider.BaseProvider{
|
||||||
|
Watch: true,
|
||||||
|
Filename: "rancher Filename",
|
||||||
|
Constraints: types.Constraints{
|
||||||
|
{
|
||||||
|
Key: "rancher Constraints Key 1",
|
||||||
|
Regex: "rancher Constraints Regex 2",
|
||||||
|
MustMatch: true,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
Key: "rancher Constraints Key 1",
|
||||||
|
Regex: "rancher Constraints Regex 2",
|
||||||
|
MustMatch: true,
|
||||||
|
},
|
||||||
|
},
|
||||||
|
Trace: true,
|
||||||
|
DebugLogGeneratedTemplate: true,
|
||||||
|
},
|
||||||
|
APIConfiguration: rancher.APIConfiguration{
|
||||||
|
Endpoint: "rancher Endpoint",
|
||||||
|
AccessKey: "rancher AccessKey",
|
||||||
|
SecretKey: "rancher SecretKey",
|
||||||
|
},
|
||||||
|
API: &rancher.APIConfiguration{
|
||||||
|
Endpoint: "rancher Endpoint",
|
||||||
|
AccessKey: "rancher AccessKey",
|
||||||
|
SecretKey: "rancher SecretKey",
|
||||||
|
},
|
||||||
|
Metadata: &rancher.MetadataConfiguration{
|
||||||
|
IntervalPoll: true,
|
||||||
|
Prefix: "rancher Metadata Prefix",
|
||||||
|
},
|
||||||
|
Domain: "rancher Domain",
|
||||||
|
RefreshSeconds: 666,
|
||||||
|
ExposedByDefault: true,
|
||||||
|
EnableServiceHealthFilter: true,
|
||||||
|
}
|
||||||
|
config.DynamoDB = &dynamodb.Provider{
|
||||||
|
BaseProvider: provider.BaseProvider{
|
||||||
|
Watch: true,
|
||||||
|
Filename: "dynamodb Filename",
|
||||||
|
Constraints: types.Constraints{
|
||||||
|
{
|
||||||
|
Key: "dynamodb Constraints Key 1",
|
||||||
|
Regex: "dynamodb Constraints Regex 2",
|
||||||
|
MustMatch: true,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
Key: "dynamodb Constraints Key 1",
|
||||||
|
Regex: "dynamodb Constraints Regex 2",
|
||||||
|
MustMatch: true,
|
||||||
|
},
|
||||||
|
},
|
||||||
|
Trace: true,
|
||||||
|
DebugLogGeneratedTemplate: true,
|
||||||
|
},
|
||||||
|
AccessKeyID: "dynamodb AccessKeyID",
|
||||||
|
RefreshSeconds: 666,
|
||||||
|
Region: "dynamodb Region",
|
||||||
|
SecretAccessKey: "dynamodb SecretAccessKey",
|
||||||
|
TableName: "dynamodb TableName",
|
||||||
|
Endpoint: "dynamodb Endpoint",
|
||||||
|
}
|
||||||
|
config.Etcd = &etcd.Provider{
|
||||||
|
Provider: kv.Provider{
|
||||||
|
BaseProvider: provider.BaseProvider{
|
||||||
|
Watch: true,
|
||||||
|
Filename: "etcd Filename",
|
||||||
|
Constraints: types.Constraints{
|
||||||
|
{
|
||||||
|
Key: "etcd Constraints Key 1",
|
||||||
|
Regex: "etcd Constraints Regex 2",
|
||||||
|
MustMatch: true,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
Key: "etcd Constraints Key 1",
|
||||||
|
Regex: "etcd Constraints Regex 2",
|
||||||
|
MustMatch: true,
|
||||||
|
},
|
||||||
|
},
|
||||||
|
Trace: true,
|
||||||
|
DebugLogGeneratedTemplate: true,
|
||||||
|
},
|
||||||
|
Endpoint: "etcd Endpoint",
|
||||||
|
Prefix: "etcd Prefix",
|
||||||
|
TLS: &types.ClientTLS{
|
||||||
|
CA: "etcd CA",
|
||||||
|
Cert: "etcd Cert",
|
||||||
|
Key: "etcd Key",
|
||||||
|
InsecureSkipVerify: true,
|
||||||
|
},
|
||||||
|
Username: "etcd Username",
|
||||||
|
Password: "etcd Password",
|
||||||
|
},
|
||||||
|
}
|
||||||
|
config.Zookeeper = &zk.Provider{
|
||||||
|
Provider: kv.Provider{
|
||||||
|
BaseProvider: provider.BaseProvider{
|
||||||
|
Watch: true,
|
||||||
|
Filename: "zk Filename",
|
||||||
|
Constraints: types.Constraints{
|
||||||
|
{
|
||||||
|
Key: "zk Constraints Key 1",
|
||||||
|
Regex: "zk Constraints Regex 2",
|
||||||
|
MustMatch: true,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
Key: "zk Constraints Key 1",
|
||||||
|
Regex: "zk Constraints Regex 2",
|
||||||
|
MustMatch: true,
|
||||||
|
},
|
||||||
|
},
|
||||||
|
Trace: true,
|
||||||
|
DebugLogGeneratedTemplate: true,
|
||||||
|
},
|
||||||
|
Endpoint: "zk Endpoint",
|
||||||
|
Prefix: "zk Prefix",
|
||||||
|
TLS: &types.ClientTLS{
|
||||||
|
CA: "zk CA",
|
||||||
|
Cert: "zk Cert",
|
||||||
|
Key: "zk Key",
|
||||||
|
InsecureSkipVerify: true,
|
||||||
|
},
|
||||||
|
Username: "zk Username",
|
||||||
|
Password: "zk Password",
|
||||||
|
},
|
||||||
|
}
|
||||||
|
config.Boltdb = &boltdb.Provider{
|
||||||
|
Provider: kv.Provider{
|
||||||
|
BaseProvider: provider.BaseProvider{
|
||||||
|
Watch: true,
|
||||||
|
Filename: "boltdb Filename",
|
||||||
|
Constraints: types.Constraints{
|
||||||
|
{
|
||||||
|
Key: "boltdb Constraints Key 1",
|
||||||
|
Regex: "boltdb Constraints Regex 2",
|
||||||
|
MustMatch: true,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
Key: "boltdb Constraints Key 1",
|
||||||
|
Regex: "boltdb Constraints Regex 2",
|
||||||
|
MustMatch: true,
|
||||||
|
},
|
||||||
|
},
|
||||||
|
Trace: true,
|
||||||
|
DebugLogGeneratedTemplate: true,
|
||||||
|
},
|
||||||
|
Endpoint: "boltdb Endpoint",
|
||||||
|
Prefix: "boltdb Prefix",
|
||||||
|
TLS: &types.ClientTLS{
|
||||||
|
CA: "boltdb CA",
|
||||||
|
Cert: "boltdb Cert",
|
||||||
|
Key: "boltdb Key",
|
||||||
|
InsecureSkipVerify: true,
|
||||||
|
},
|
||||||
|
Username: "boltdb Username",
|
||||||
|
Password: "boltdb Password",
|
||||||
|
},
|
||||||
|
}
|
||||||
|
config.Consul = &consul.Provider{
|
||||||
|
Provider: kv.Provider{
|
||||||
|
BaseProvider: provider.BaseProvider{
|
||||||
|
Watch: true,
|
||||||
|
Filename: "consul Filename",
|
||||||
|
Constraints: types.Constraints{
|
||||||
|
{
|
||||||
|
Key: "consul Constraints Key 1",
|
||||||
|
Regex: "consul Constraints Regex 2",
|
||||||
|
MustMatch: true,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
Key: "consul Constraints Key 1",
|
||||||
|
Regex: "consul Constraints Regex 2",
|
||||||
|
MustMatch: true,
|
||||||
|
},
|
||||||
|
},
|
||||||
|
Trace: true,
|
||||||
|
DebugLogGeneratedTemplate: true,
|
||||||
|
},
|
||||||
|
Endpoint: "consul Endpoint",
|
||||||
|
Prefix: "consul Prefix",
|
||||||
|
TLS: &types.ClientTLS{
|
||||||
|
CA: "consul CA",
|
||||||
|
Cert: "consul Cert",
|
||||||
|
Key: "consul Key",
|
||||||
|
InsecureSkipVerify: true,
|
||||||
|
},
|
||||||
|
Username: "consul Username",
|
||||||
|
Password: "consul Password",
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
cleanJSON, err := Do(config, true)
|
||||||
|
if err != nil {
|
||||||
|
t.Fatal(err, cleanJSON)
|
||||||
|
}
|
||||||
|
}
|
238
cmd/traefik/anonymize/anonymize_doOnJSON_test.go
Normal file
238
cmd/traefik/anonymize/anonymize_doOnJSON_test.go
Normal file
|
@ -0,0 +1,238 @@
|
||||||
|
package anonymize
|
||||||
|
|
||||||
|
import (
|
||||||
|
"github.com/stretchr/testify/assert"
|
||||||
|
"testing"
|
||||||
|
)
|
||||||
|
|
||||||
|
func Test_doOnJSON(t *testing.T) {
|
||||||
|
baseConfiguration := `
|
||||||
|
{
|
||||||
|
"GraceTimeOut": 10000000000,
|
||||||
|
"Debug": false,
|
||||||
|
"CheckNewVersion": true,
|
||||||
|
"AccessLogsFile": "",
|
||||||
|
"TraefikLogsFile": "",
|
||||||
|
"LogLevel": "ERROR",
|
||||||
|
"EntryPoints": {
|
||||||
|
"http": {
|
||||||
|
"Network": "",
|
||||||
|
"Address": ":80",
|
||||||
|
"TLS": null,
|
||||||
|
"Redirect": {
|
||||||
|
"EntryPoint": "https",
|
||||||
|
"Regex": "",
|
||||||
|
"Replacement": ""
|
||||||
|
},
|
||||||
|
"Auth": null,
|
||||||
|
"Compress": false
|
||||||
|
},
|
||||||
|
"https": {
|
||||||
|
"Network": "",
|
||||||
|
"Address": ":443",
|
||||||
|
"TLS": {
|
||||||
|
"MinVersion": "",
|
||||||
|
"CipherSuites": null,
|
||||||
|
"Certificates": null,
|
||||||
|
"ClientCAFiles": null
|
||||||
|
},
|
||||||
|
"Redirect": null,
|
||||||
|
"Auth": null,
|
||||||
|
"Compress": false
|
||||||
|
}
|
||||||
|
},
|
||||||
|
"Cluster": null,
|
||||||
|
"Constraints": [],
|
||||||
|
"ACME": {
|
||||||
|
"Email": "foo@bar.com",
|
||||||
|
"Domains": [
|
||||||
|
{
|
||||||
|
"Main": "foo@bar.com",
|
||||||
|
"SANs": null
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"Main": "foo@bar.com",
|
||||||
|
"SANs": null
|
||||||
|
}
|
||||||
|
],
|
||||||
|
"Storage": "",
|
||||||
|
"StorageFile": "/acme/acme.json",
|
||||||
|
"OnDemand": true,
|
||||||
|
"OnHostRule": true,
|
||||||
|
"CAServer": "",
|
||||||
|
"EntryPoint": "https",
|
||||||
|
"DNSProvider": "",
|
||||||
|
"DelayDontCheckDNS": 0,
|
||||||
|
"ACMELogging": false,
|
||||||
|
"TLSConfig": null
|
||||||
|
},
|
||||||
|
"DefaultEntryPoints": [
|
||||||
|
"https",
|
||||||
|
"http"
|
||||||
|
],
|
||||||
|
"ProvidersThrottleDuration": 2000000000,
|
||||||
|
"MaxIdleConnsPerHost": 200,
|
||||||
|
"IdleTimeout": 180000000000,
|
||||||
|
"InsecureSkipVerify": false,
|
||||||
|
"Retry": null,
|
||||||
|
"HealthCheck": {
|
||||||
|
"Interval": 30000000000
|
||||||
|
},
|
||||||
|
"Docker": null,
|
||||||
|
"File": null,
|
||||||
|
"Web": null,
|
||||||
|
"Marathon": null,
|
||||||
|
"Consul": null,
|
||||||
|
"ConsulCatalog": null,
|
||||||
|
"Etcd": null,
|
||||||
|
"Zookeeper": null,
|
||||||
|
"Boltdb": null,
|
||||||
|
"Kubernetes": null,
|
||||||
|
"Mesos": null,
|
||||||
|
"Eureka": null,
|
||||||
|
"ECS": null,
|
||||||
|
"Rancher": null,
|
||||||
|
"DynamoDB": null,
|
||||||
|
"ConfigFile": "/etc/traefik/traefik.toml"
|
||||||
|
}
|
||||||
|
`
|
||||||
|
expectedConfiguration := `
|
||||||
|
{
|
||||||
|
"GraceTimeOut": 10000000000,
|
||||||
|
"Debug": false,
|
||||||
|
"CheckNewVersion": true,
|
||||||
|
"AccessLogsFile": "",
|
||||||
|
"TraefikLogsFile": "",
|
||||||
|
"LogLevel": "ERROR",
|
||||||
|
"EntryPoints": {
|
||||||
|
"http": {
|
||||||
|
"Network": "",
|
||||||
|
"Address": ":80",
|
||||||
|
"TLS": null,
|
||||||
|
"Redirect": {
|
||||||
|
"EntryPoint": "https",
|
||||||
|
"Regex": "",
|
||||||
|
"Replacement": ""
|
||||||
|
},
|
||||||
|
"Auth": null,
|
||||||
|
"Compress": false
|
||||||
|
},
|
||||||
|
"https": {
|
||||||
|
"Network": "",
|
||||||
|
"Address": ":443",
|
||||||
|
"TLS": {
|
||||||
|
"MinVersion": "",
|
||||||
|
"CipherSuites": null,
|
||||||
|
"Certificates": null,
|
||||||
|
"ClientCAFiles": null
|
||||||
|
},
|
||||||
|
"Redirect": null,
|
||||||
|
"Auth": null,
|
||||||
|
"Compress": false
|
||||||
|
}
|
||||||
|
},
|
||||||
|
"Cluster": null,
|
||||||
|
"Constraints": [],
|
||||||
|
"ACME": {
|
||||||
|
"Email": "xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx",
|
||||||
|
"Domains": [
|
||||||
|
{
|
||||||
|
"Main": "xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx",
|
||||||
|
"SANs": null
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"Main": "xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx",
|
||||||
|
"SANs": null
|
||||||
|
}
|
||||||
|
],
|
||||||
|
"Storage": "",
|
||||||
|
"StorageFile": "/acme/acme.json",
|
||||||
|
"OnDemand": true,
|
||||||
|
"OnHostRule": true,
|
||||||
|
"CAServer": "",
|
||||||
|
"EntryPoint": "https",
|
||||||
|
"DNSProvider": "",
|
||||||
|
"DelayDontCheckDNS": 0,
|
||||||
|
"ACMELogging": false,
|
||||||
|
"TLSConfig": null
|
||||||
|
},
|
||||||
|
"DefaultEntryPoints": [
|
||||||
|
"https",
|
||||||
|
"http"
|
||||||
|
],
|
||||||
|
"ProvidersThrottleDuration": 2000000000,
|
||||||
|
"MaxIdleConnsPerHost": 200,
|
||||||
|
"IdleTimeout": 180000000000,
|
||||||
|
"InsecureSkipVerify": false,
|
||||||
|
"Retry": null,
|
||||||
|
"HealthCheck": {
|
||||||
|
"Interval": 30000000000
|
||||||
|
},
|
||||||
|
"Docker": null,
|
||||||
|
"File": null,
|
||||||
|
"Web": null,
|
||||||
|
"Marathon": null,
|
||||||
|
"Consul": null,
|
||||||
|
"ConsulCatalog": null,
|
||||||
|
"Etcd": null,
|
||||||
|
"Zookeeper": null,
|
||||||
|
"Boltdb": null,
|
||||||
|
"Kubernetes": null,
|
||||||
|
"Mesos": null,
|
||||||
|
"Eureka": null,
|
||||||
|
"ECS": null,
|
||||||
|
"Rancher": null,
|
||||||
|
"DynamoDB": null,
|
||||||
|
"ConfigFile": "/etc/traefik/traefik.toml"
|
||||||
|
}
|
||||||
|
`
|
||||||
|
anomConfiguration := doOnJSON(baseConfiguration)
|
||||||
|
|
||||||
|
if anomConfiguration != expectedConfiguration {
|
||||||
|
t.Errorf("Got %s, want %s.", anomConfiguration, expectedConfiguration)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func Test_doOnJSON_simple(t *testing.T) {
|
||||||
|
testCases := []struct {
|
||||||
|
name string
|
||||||
|
input string
|
||||||
|
expectedOutput string
|
||||||
|
}{
|
||||||
|
{
|
||||||
|
name: "email",
|
||||||
|
input: `{
|
||||||
|
"email1": "goo@example.com",
|
||||||
|
"email2": "foo.bargoo@example.com",
|
||||||
|
"email3": "foo.bargoo@example.com.us"
|
||||||
|
}`,
|
||||||
|
expectedOutput: `{
|
||||||
|
"email1": "xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx",
|
||||||
|
"email2": "xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx",
|
||||||
|
"email3": "xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx"
|
||||||
|
}`,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "url",
|
||||||
|
input: `{
|
||||||
|
"URL": "foo domain.com foo",
|
||||||
|
"URL": "foo sub.domain.com foo",
|
||||||
|
"URL": "foo sub.sub.domain.com foo",
|
||||||
|
"URL": "foo sub.sub.sub.domain.com.us foo"
|
||||||
|
}`,
|
||||||
|
expectedOutput: `{
|
||||||
|
"URL": "foo xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx foo",
|
||||||
|
"URL": "foo xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx foo",
|
||||||
|
"URL": "foo xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx foo",
|
||||||
|
"URL": "foo xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx foo"
|
||||||
|
}`,
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
for _, test := range testCases {
|
||||||
|
t.Run(test.name, func(t *testing.T) {
|
||||||
|
output := doOnJSON(test.input)
|
||||||
|
assert.Equal(t, test.expectedOutput, output)
|
||||||
|
})
|
||||||
|
}
|
||||||
|
}
|
176
cmd/traefik/anonymize/anonymize_doOnStruct_test.go
Normal file
176
cmd/traefik/anonymize/anonymize_doOnStruct_test.go
Normal file
|
@ -0,0 +1,176 @@
|
||||||
|
package anonymize
|
||||||
|
|
||||||
|
import (
|
||||||
|
"reflect"
|
||||||
|
"testing"
|
||||||
|
|
||||||
|
"github.com/stretchr/testify/assert"
|
||||||
|
)
|
||||||
|
|
||||||
|
type Courgette struct {
|
||||||
|
Ji string
|
||||||
|
Ho string
|
||||||
|
}
|
||||||
|
type Tomate struct {
|
||||||
|
Ji string
|
||||||
|
Ho string
|
||||||
|
}
|
||||||
|
|
||||||
|
type Carotte struct {
|
||||||
|
Name string
|
||||||
|
Value int
|
||||||
|
Courgette Courgette
|
||||||
|
ECourgette Courgette `export:"true"`
|
||||||
|
Pourgette *Courgette
|
||||||
|
EPourgette *Courgette `export:"true"`
|
||||||
|
Aubergine map[string]string
|
||||||
|
EAubergine map[string]string `export:"true"`
|
||||||
|
SAubergine map[string]Tomate
|
||||||
|
ESAubergine map[string]Tomate `export:"true"`
|
||||||
|
PSAubergine map[string]*Tomate
|
||||||
|
EPAubergine map[string]*Tomate `export:"true"`
|
||||||
|
}
|
||||||
|
|
||||||
|
func Test_doOnStruct(t *testing.T) {
|
||||||
|
testCase := []struct {
|
||||||
|
name string
|
||||||
|
base *Carotte
|
||||||
|
expected *Carotte
|
||||||
|
hasError bool
|
||||||
|
}{
|
||||||
|
{
|
||||||
|
name: "primitive",
|
||||||
|
base: &Carotte{
|
||||||
|
Name: "koko",
|
||||||
|
Value: 666,
|
||||||
|
},
|
||||||
|
expected: &Carotte{
|
||||||
|
Name: "xxxx",
|
||||||
|
},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "struct",
|
||||||
|
base: &Carotte{
|
||||||
|
Name: "koko",
|
||||||
|
Courgette: Courgette{
|
||||||
|
Ji: "huu",
|
||||||
|
},
|
||||||
|
},
|
||||||
|
expected: &Carotte{
|
||||||
|
Name: "xxxx",
|
||||||
|
},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "pointer",
|
||||||
|
base: &Carotte{
|
||||||
|
Name: "koko",
|
||||||
|
Pourgette: &Courgette{
|
||||||
|
Ji: "hoo",
|
||||||
|
},
|
||||||
|
},
|
||||||
|
expected: &Carotte{
|
||||||
|
Name: "xxxx",
|
||||||
|
Pourgette: nil,
|
||||||
|
},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "export struct",
|
||||||
|
base: &Carotte{
|
||||||
|
Name: "koko",
|
||||||
|
ECourgette: Courgette{
|
||||||
|
Ji: "huu",
|
||||||
|
},
|
||||||
|
},
|
||||||
|
expected: &Carotte{
|
||||||
|
Name: "xxxx",
|
||||||
|
ECourgette: Courgette{
|
||||||
|
Ji: "xxxx",
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "export pointer struct",
|
||||||
|
base: &Carotte{
|
||||||
|
Name: "koko",
|
||||||
|
ECourgette: Courgette{
|
||||||
|
Ji: "huu",
|
||||||
|
},
|
||||||
|
},
|
||||||
|
expected: &Carotte{
|
||||||
|
Name: "xxxx",
|
||||||
|
ECourgette: Courgette{
|
||||||
|
Ji: "xxxx",
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "export map string/string",
|
||||||
|
base: &Carotte{
|
||||||
|
Name: "koko",
|
||||||
|
EAubergine: map[string]string{
|
||||||
|
"foo": "bar",
|
||||||
|
},
|
||||||
|
},
|
||||||
|
expected: &Carotte{
|
||||||
|
Name: "xxxx",
|
||||||
|
EAubergine: map[string]string{
|
||||||
|
"foo": "bar",
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "export map string/pointer",
|
||||||
|
base: &Carotte{
|
||||||
|
Name: "koko",
|
||||||
|
EPAubergine: map[string]*Tomate{
|
||||||
|
"foo": {
|
||||||
|
Ji: "fdskljf",
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
expected: &Carotte{
|
||||||
|
Name: "xxxx",
|
||||||
|
EPAubergine: map[string]*Tomate{
|
||||||
|
"foo": {
|
||||||
|
Ji: "xxxx",
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "export map string/struct (UNSAFE)",
|
||||||
|
base: &Carotte{
|
||||||
|
Name: "koko",
|
||||||
|
ESAubergine: map[string]Tomate{
|
||||||
|
"foo": {
|
||||||
|
Ji: "JiJiJi",
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
expected: &Carotte{
|
||||||
|
Name: "xxxx",
|
||||||
|
ESAubergine: map[string]Tomate{
|
||||||
|
"foo": {
|
||||||
|
Ji: "JiJiJi",
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
hasError: true,
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
for _, test := range testCase {
|
||||||
|
t.Run(test.name, func(t *testing.T) {
|
||||||
|
val := reflect.ValueOf(test.base).Elem()
|
||||||
|
err := doOnStruct(val)
|
||||||
|
if !test.hasError && err != nil {
|
||||||
|
t.Fatal(err)
|
||||||
|
}
|
||||||
|
if test.hasError && err == nil {
|
||||||
|
t.Fatal("Got no error but want an error.")
|
||||||
|
}
|
||||||
|
|
||||||
|
assert.EqualValues(t, test.expected, test.base)
|
||||||
|
})
|
||||||
|
}
|
||||||
|
}
|
|
@ -2,20 +2,18 @@ package main
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"bytes"
|
"bytes"
|
||||||
"encoding/json"
|
|
||||||
"fmt"
|
"fmt"
|
||||||
"net/url"
|
"net/url"
|
||||||
"os/exec"
|
"os/exec"
|
||||||
"regexp"
|
|
||||||
"runtime"
|
"runtime"
|
||||||
"text/template"
|
"text/template"
|
||||||
|
|
||||||
"github.com/containous/flaeg"
|
"github.com/containous/flaeg"
|
||||||
"github.com/mvdan/xurls"
|
"github.com/containous/traefik/cmd/traefik/anonymize"
|
||||||
)
|
)
|
||||||
|
|
||||||
var (
|
const (
|
||||||
bugtracker = "https://github.com/containous/traefik/issues/new"
|
bugTracker = "https://github.com/containous/traefik/issues/new"
|
||||||
bugTemplate = `<!--
|
bugTemplate = `<!--
|
||||||
DO NOT FILE ISSUES FOR GENERAL SUPPORT QUESTIONS.
|
DO NOT FILE ISSUES FOR GENERAL SUPPORT QUESTIONS.
|
||||||
|
|
||||||
|
@ -94,50 +92,67 @@ func newBugCmd(traefikConfiguration interface{}, traefikPointersConfiguration in
|
||||||
Description: `Report an issue on Traefik bugtracker`,
|
Description: `Report an issue on Traefik bugtracker`,
|
||||||
Config: traefikConfiguration,
|
Config: traefikConfiguration,
|
||||||
DefaultPointersConfig: traefikPointersConfiguration,
|
DefaultPointersConfig: traefikPointersConfiguration,
|
||||||
Run: func() error {
|
Run: runBugCmd(traefikConfiguration),
|
||||||
var version bytes.Buffer
|
|
||||||
if err := getVersionPrint(&version); err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
tmpl, err := template.New("").Parse(bugTemplate)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
configJSON, err := json.MarshalIndent(traefikConfiguration, "", " ")
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
v := struct {
|
|
||||||
Version string
|
|
||||||
Configuration string
|
|
||||||
}{
|
|
||||||
Version: version.String(),
|
|
||||||
Configuration: anonymize(string(configJSON)),
|
|
||||||
}
|
|
||||||
|
|
||||||
var bug bytes.Buffer
|
|
||||||
if err := tmpl.Execute(&bug, v); err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
body := bug.String()
|
|
||||||
URL := bugtracker + "?body=" + url.QueryEscape(body)
|
|
||||||
if err := openBrowser(URL); err != nil {
|
|
||||||
fmt.Printf("Please file a new issue at %s using this template:\n\n", bugtracker)
|
|
||||||
fmt.Print(body)
|
|
||||||
}
|
|
||||||
|
|
||||||
return nil
|
|
||||||
},
|
|
||||||
Metadata: map[string]string{
|
Metadata: map[string]string{
|
||||||
"parseAllSources": "true",
|
"parseAllSources": "true",
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func runBugCmd(traefikConfiguration interface{}) func() error {
|
||||||
|
return func() error {
|
||||||
|
|
||||||
|
body, err := createBugReport(traefikConfiguration)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
sendBugReport(body)
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func createBugReport(traefikConfiguration interface{}) (string, error) {
|
||||||
|
var version bytes.Buffer
|
||||||
|
if err := getVersionPrint(&version); err != nil {
|
||||||
|
return "", err
|
||||||
|
}
|
||||||
|
|
||||||
|
tmpl, err := template.New("bug").Parse(bugTemplate)
|
||||||
|
if err != nil {
|
||||||
|
return "", err
|
||||||
|
}
|
||||||
|
|
||||||
|
config, err := anonymize.Do(&traefikConfiguration, true)
|
||||||
|
if err != nil {
|
||||||
|
return "", err
|
||||||
|
}
|
||||||
|
|
||||||
|
v := struct {
|
||||||
|
Version string
|
||||||
|
Configuration string
|
||||||
|
}{
|
||||||
|
Version: version.String(),
|
||||||
|
Configuration: config,
|
||||||
|
}
|
||||||
|
|
||||||
|
var bug bytes.Buffer
|
||||||
|
if err := tmpl.Execute(&bug, v); err != nil {
|
||||||
|
return "", err
|
||||||
|
}
|
||||||
|
|
||||||
|
return bug.String(), nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func sendBugReport(body string) {
|
||||||
|
URL := bugTracker + "?body=" + url.QueryEscape(body)
|
||||||
|
if err := openBrowser(URL); err != nil {
|
||||||
|
fmt.Printf("Please file a new issue at %s using this template:\n\n", bugTracker)
|
||||||
|
fmt.Print(body)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
func openBrowser(URL string) error {
|
func openBrowser(URL string) error {
|
||||||
var err error
|
var err error
|
||||||
switch runtime.GOOS {
|
switch runtime.GOOS {
|
||||||
|
@ -152,9 +167,3 @@ func openBrowser(URL string) error {
|
||||||
}
|
}
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
func anonymize(input string) string {
|
|
||||||
replace := "xxxxxxxxxxxxxxxxxxxxxxxxxxxxx\""
|
|
||||||
mailExp := regexp.MustCompile(`\w[-._\w]*\w@\w[-._\w]*\w\.\w{2,3}"`)
|
|
||||||
return xurls.Relaxed.ReplaceAllString(mailExp.ReplaceAllString(input, replace), replace)
|
|
||||||
}
|
|
||||||
|
|
|
@ -2,192 +2,48 @@ package main
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"testing"
|
"testing"
|
||||||
|
|
||||||
|
"github.com/containous/traefik/cmd/traefik/anonymize"
|
||||||
|
"github.com/containous/traefik/configuration"
|
||||||
|
"github.com/containous/traefik/provider/file"
|
||||||
|
"github.com/stretchr/testify/assert"
|
||||||
)
|
)
|
||||||
|
|
||||||
func Test_anonymize(t *testing.T) {
|
func Test_createBugReport(t *testing.T) {
|
||||||
baseConfiguration := `
|
traefikConfiguration := TraefikConfiguration{
|
||||||
{
|
ConfigFile: "FOO",
|
||||||
"GraceTimeOut": 10000000000,
|
GlobalConfiguration: configuration.GlobalConfiguration{
|
||||||
"Debug": false,
|
EntryPoints: configuration.EntryPoints{
|
||||||
"CheckNewVersion": true,
|
"goo": &configuration.EntryPoint{
|
||||||
"AccessLogsFile": "",
|
Address: "hoo.bar",
|
||||||
"TraefikLogsFile": "",
|
},
|
||||||
"LogLevel": "ERROR",
|
},
|
||||||
"EntryPoints": {
|
File: &file.Provider{
|
||||||
"http": {
|
Directory: "BAR",
|
||||||
"Network": "",
|
},
|
||||||
"Address": ":80",
|
RootCAs: configuration.RootCAs{"fllf"},
|
||||||
"TLS": null,
|
},
|
||||||
"Redirect": {
|
|
||||||
"EntryPoint": "https",
|
|
||||||
"Regex": "",
|
|
||||||
"Replacement": ""
|
|
||||||
},
|
|
||||||
"Auth": null,
|
|
||||||
"Compress": false
|
|
||||||
},
|
|
||||||
"https": {
|
|
||||||
"Network": "",
|
|
||||||
"Address": ":443",
|
|
||||||
"TLS": {
|
|
||||||
"MinVersion": "",
|
|
||||||
"CipherSuites": null,
|
|
||||||
"Certificates": null,
|
|
||||||
"ClientCAFiles": null
|
|
||||||
},
|
|
||||||
"Redirect": null,
|
|
||||||
"Auth": null,
|
|
||||||
"Compress": false
|
|
||||||
}
|
|
||||||
},
|
|
||||||
"Cluster": null,
|
|
||||||
"Constraints": [],
|
|
||||||
"ACME": {
|
|
||||||
"Email": "foo@bar.com",
|
|
||||||
"Domains": [
|
|
||||||
{
|
|
||||||
"Main": "foo@bar.com",
|
|
||||||
"SANs": null
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"Main": "foo@bar.com",
|
|
||||||
"SANs": null
|
|
||||||
}
|
|
||||||
],
|
|
||||||
"Storage": "",
|
|
||||||
"StorageFile": "/acme/acme.json",
|
|
||||||
"OnDemand": true,
|
|
||||||
"OnHostRule": true,
|
|
||||||
"CAServer": "",
|
|
||||||
"EntryPoint": "https",
|
|
||||||
"DNSProvider": "",
|
|
||||||
"DelayDontCheckDNS": 0,
|
|
||||||
"ACMELogging": false,
|
|
||||||
"TLSConfig": null
|
|
||||||
},
|
|
||||||
"DefaultEntryPoints": [
|
|
||||||
"https",
|
|
||||||
"http"
|
|
||||||
],
|
|
||||||
"ProvidersThrottleDuration": 2000000000,
|
|
||||||
"MaxIdleConnsPerHost": 200,
|
|
||||||
"IdleTimeout": 180000000000,
|
|
||||||
"InsecureSkipVerify": false,
|
|
||||||
"Retry": null,
|
|
||||||
"HealthCheck": {
|
|
||||||
"Interval": 30000000000
|
|
||||||
},
|
|
||||||
"Docker": null,
|
|
||||||
"File": null,
|
|
||||||
"Web": null,
|
|
||||||
"Marathon": null,
|
|
||||||
"Consul": null,
|
|
||||||
"ConsulCatalog": null,
|
|
||||||
"Etcd": null,
|
|
||||||
"Zookeeper": null,
|
|
||||||
"Boltdb": null,
|
|
||||||
"Kubernetes": null,
|
|
||||||
"Mesos": null,
|
|
||||||
"Eureka": null,
|
|
||||||
"ECS": null,
|
|
||||||
"Rancher": null,
|
|
||||||
"DynamoDB": null,
|
|
||||||
"ConfigFile": "/etc/traefik/traefik.toml"
|
|
||||||
}
|
|
||||||
`
|
|
||||||
expectedConfiguration := `
|
|
||||||
{
|
|
||||||
"GraceTimeOut": 10000000000,
|
|
||||||
"Debug": false,
|
|
||||||
"CheckNewVersion": true,
|
|
||||||
"AccessLogsFile": "",
|
|
||||||
"TraefikLogsFile": "",
|
|
||||||
"LogLevel": "ERROR",
|
|
||||||
"EntryPoints": {
|
|
||||||
"http": {
|
|
||||||
"Network": "",
|
|
||||||
"Address": ":80",
|
|
||||||
"TLS": null,
|
|
||||||
"Redirect": {
|
|
||||||
"EntryPoint": "https",
|
|
||||||
"Regex": "",
|
|
||||||
"Replacement": ""
|
|
||||||
},
|
|
||||||
"Auth": null,
|
|
||||||
"Compress": false
|
|
||||||
},
|
|
||||||
"https": {
|
|
||||||
"Network": "",
|
|
||||||
"Address": ":443",
|
|
||||||
"TLS": {
|
|
||||||
"MinVersion": "",
|
|
||||||
"CipherSuites": null,
|
|
||||||
"Certificates": null,
|
|
||||||
"ClientCAFiles": null
|
|
||||||
},
|
|
||||||
"Redirect": null,
|
|
||||||
"Auth": null,
|
|
||||||
"Compress": false
|
|
||||||
}
|
|
||||||
},
|
|
||||||
"Cluster": null,
|
|
||||||
"Constraints": [],
|
|
||||||
"ACME": {
|
|
||||||
"Email": "xxxxxxxxxxxxxxxxxxxxxxxxxxxxx",
|
|
||||||
"Domains": [
|
|
||||||
{
|
|
||||||
"Main": "xxxxxxxxxxxxxxxxxxxxxxxxxxxxx",
|
|
||||||
"SANs": null
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"Main": "xxxxxxxxxxxxxxxxxxxxxxxxxxxxx",
|
|
||||||
"SANs": null
|
|
||||||
}
|
|
||||||
],
|
|
||||||
"Storage": "",
|
|
||||||
"StorageFile": "/acme/acme.json",
|
|
||||||
"OnDemand": true,
|
|
||||||
"OnHostRule": true,
|
|
||||||
"CAServer": "",
|
|
||||||
"EntryPoint": "https",
|
|
||||||
"DNSProvider": "",
|
|
||||||
"DelayDontCheckDNS": 0,
|
|
||||||
"ACMELogging": false,
|
|
||||||
"TLSConfig": null
|
|
||||||
},
|
|
||||||
"DefaultEntryPoints": [
|
|
||||||
"https",
|
|
||||||
"http"
|
|
||||||
],
|
|
||||||
"ProvidersThrottleDuration": 2000000000,
|
|
||||||
"MaxIdleConnsPerHost": 200,
|
|
||||||
"IdleTimeout": 180000000000,
|
|
||||||
"InsecureSkipVerify": false,
|
|
||||||
"Retry": null,
|
|
||||||
"HealthCheck": {
|
|
||||||
"Interval": 30000000000
|
|
||||||
},
|
|
||||||
"Docker": null,
|
|
||||||
"File": null,
|
|
||||||
"Web": null,
|
|
||||||
"Marathon": null,
|
|
||||||
"Consul": null,
|
|
||||||
"ConsulCatalog": null,
|
|
||||||
"Etcd": null,
|
|
||||||
"Zookeeper": null,
|
|
||||||
"Boltdb": null,
|
|
||||||
"Kubernetes": null,
|
|
||||||
"Mesos": null,
|
|
||||||
"Eureka": null,
|
|
||||||
"ECS": null,
|
|
||||||
"Rancher": null,
|
|
||||||
"DynamoDB": null,
|
|
||||||
"ConfigFile": "/etc/traefik/traefik.toml"
|
|
||||||
}
|
|
||||||
`
|
|
||||||
anomConfiguration := anonymize(baseConfiguration)
|
|
||||||
|
|
||||||
if anomConfiguration != expectedConfiguration {
|
|
||||||
t.Errorf("Got %s, want %s.", anomConfiguration, expectedConfiguration)
|
|
||||||
}
|
}
|
||||||
|
|
||||||
|
report, err := createBugReport(traefikConfiguration)
|
||||||
|
assert.NoError(t, err, report)
|
||||||
|
}
|
||||||
|
|
||||||
|
func Test_anonymize_traefikConfiguration(t *testing.T) {
|
||||||
|
traefikConfiguration := &TraefikConfiguration{
|
||||||
|
ConfigFile: "FOO",
|
||||||
|
GlobalConfiguration: configuration.GlobalConfiguration{
|
||||||
|
EntryPoints: configuration.EntryPoints{
|
||||||
|
"goo": &configuration.EntryPoint{
|
||||||
|
Address: "hoo.bar",
|
||||||
|
},
|
||||||
|
},
|
||||||
|
File: &file.Provider{
|
||||||
|
Directory: "BAR",
|
||||||
|
},
|
||||||
|
},
|
||||||
|
}
|
||||||
|
_, err := anonymize.Do(traefikConfiguration, true)
|
||||||
|
assert.NoError(t, err)
|
||||||
|
assert.Equal(t, "hoo.bar", traefikConfiguration.GlobalConfiguration.EntryPoints["goo"].Address)
|
||||||
}
|
}
|
||||||
|
|
|
@ -25,8 +25,8 @@ import (
|
||||||
|
|
||||||
// TraefikConfiguration holds GlobalConfiguration and other stuff
|
// TraefikConfiguration holds GlobalConfiguration and other stuff
|
||||||
type TraefikConfiguration struct {
|
type TraefikConfiguration struct {
|
||||||
configuration.GlobalConfiguration `mapstructure:",squash"`
|
configuration.GlobalConfiguration `mapstructure:",squash" export:"true"`
|
||||||
ConfigFile string `short:"c" description:"Configuration file to use (TOML)."`
|
ConfigFile string `short:"c" description:"Configuration file to use (TOML)." export:"true"`
|
||||||
}
|
}
|
||||||
|
|
||||||
// NewTraefikDefaultPointersConfiguration creates a TraefikConfiguration with pointers default values
|
// NewTraefikDefaultPointersConfiguration creates a TraefikConfiguration with pointers default values
|
||||||
|
|
|
@ -24,11 +24,11 @@ import (
|
||||||
"github.com/containous/traefik/provider/kubernetes"
|
"github.com/containous/traefik/provider/kubernetes"
|
||||||
"github.com/containous/traefik/safe"
|
"github.com/containous/traefik/safe"
|
||||||
"github.com/containous/traefik/server"
|
"github.com/containous/traefik/server"
|
||||||
|
"github.com/containous/traefik/server/uuid"
|
||||||
"github.com/containous/traefik/types"
|
"github.com/containous/traefik/types"
|
||||||
"github.com/containous/traefik/version"
|
"github.com/containous/traefik/version"
|
||||||
"github.com/coreos/go-systemd/daemon"
|
"github.com/coreos/go-systemd/daemon"
|
||||||
"github.com/docker/libkv/store"
|
"github.com/docker/libkv/store"
|
||||||
"github.com/satori/go.uuid"
|
|
||||||
)
|
)
|
||||||
|
|
||||||
func main() {
|
func main() {
|
||||||
|
@ -187,7 +187,7 @@ Complete documentation is available at https://traefik.io`,
|
||||||
s.AddSource(toml)
|
s.AddSource(toml)
|
||||||
s.AddSource(f)
|
s.AddSource(f)
|
||||||
if _, err := s.LoadConfig(); err != nil {
|
if _, err := s.LoadConfig(); err != nil {
|
||||||
fmtlog.Println(fmt.Errorf("Error reading TOML config file %s : %s", toml.ConfigFileUsed(), err))
|
fmtlog.Printf("Error reading TOML config file %s : %s\n", toml.ConfigFileUsed(), err)
|
||||||
os.Exit(-1)
|
os.Exit(-1)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -202,7 +202,7 @@ Complete documentation is available at https://traefik.io`,
|
||||||
// IF a KV Store is enable and no sub-command called in args
|
// IF a KV Store is enable and no sub-command called in args
|
||||||
if kv != nil && usedCmd == traefikCmd {
|
if kv != nil && usedCmd == traefikCmd {
|
||||||
if traefikConfiguration.Cluster == nil {
|
if traefikConfiguration.Cluster == nil {
|
||||||
traefikConfiguration.Cluster = &types.Cluster{Node: uuid.NewV4().String()}
|
traefikConfiguration.Cluster = &types.Cluster{Node: uuid.Get()}
|
||||||
}
|
}
|
||||||
if traefikConfiguration.Cluster.Store == nil {
|
if traefikConfiguration.Cluster.Store == nil {
|
||||||
traefikConfiguration.Cluster.Store = &types.Store{Prefix: kv.Prefix, Store: kv.Store}
|
traefikConfiguration.Cluster.Store = &types.Store{Prefix: kv.Prefix, Store: kv.Store}
|
||||||
|
@ -279,16 +279,7 @@ func run(globalConfiguration *configuration.GlobalConfiguration) {
|
||||||
log.Infof("Traefik version %s built on %s", version.Version, version.BuildDate)
|
log.Infof("Traefik version %s built on %s", version.Version, version.BuildDate)
|
||||||
|
|
||||||
if globalConfiguration.CheckNewVersion {
|
if globalConfiguration.CheckNewVersion {
|
||||||
ticker := time.NewTicker(24 * time.Hour)
|
checkNewVersion()
|
||||||
safe.Go(func() {
|
|
||||||
version.CheckNewVersion()
|
|
||||||
for {
|
|
||||||
select {
|
|
||||||
case <-ticker.C:
|
|
||||||
version.CheckNewVersion()
|
|
||||||
}
|
|
||||||
}
|
|
||||||
})
|
|
||||||
}
|
}
|
||||||
|
|
||||||
log.Debugf("Global configuration loaded %s", string(jsonConf))
|
log.Debugf("Global configuration loaded %s", string(jsonConf))
|
||||||
|
@ -354,3 +345,16 @@ func CreateKvSource(traefikConfiguration *TraefikConfiguration) (*staert.KvSourc
|
||||||
}
|
}
|
||||||
return kv, err
|
return kv, err
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func checkNewVersion() {
|
||||||
|
ticker := time.NewTicker(24 * time.Hour)
|
||||||
|
safe.Go(func() {
|
||||||
|
version.CheckNewVersion()
|
||||||
|
for {
|
||||||
|
select {
|
||||||
|
case <-ticker.C:
|
||||||
|
version.CheckNewVersion()
|
||||||
|
}
|
||||||
|
}
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
|
@ -47,44 +47,44 @@ const (
|
||||||
// GlobalConfiguration holds global configuration (with providers, etc.).
|
// GlobalConfiguration holds global configuration (with providers, etc.).
|
||||||
// It's populated from the traefik configuration file passed as an argument to the binary.
|
// It's populated from the traefik configuration file passed as an argument to the binary.
|
||||||
type GlobalConfiguration struct {
|
type GlobalConfiguration struct {
|
||||||
LifeCycle *LifeCycle `description:"Timeouts influencing the server life cycle"`
|
LifeCycle *LifeCycle `description:"Timeouts influencing the server life cycle" export:"true"`
|
||||||
GraceTimeOut flaeg.Duration `short:"g" description:"(Deprecated) Duration to give active requests a chance to finish before Traefik stops"` // Deprecated
|
GraceTimeOut flaeg.Duration `short:"g" description:"(Deprecated) Duration to give active requests a chance to finish before Traefik stops" export:"true"` // Deprecated
|
||||||
Debug bool `short:"d" description:"Enable debug mode"`
|
Debug bool `short:"d" description:"Enable debug mode" export:"true"`
|
||||||
CheckNewVersion bool `description:"Periodically check if a new version has been released"`
|
CheckNewVersion bool `description:"Periodically check if a new version has been released" export:"true"`
|
||||||
AccessLogsFile string `description:"(Deprecated) Access logs file"` // Deprecated
|
AccessLogsFile string `description:"(Deprecated) Access logs file" export:"true"` // Deprecated
|
||||||
AccessLog *types.AccessLog `description:"Access log settings"`
|
AccessLog *types.AccessLog `description:"Access log settings" export:"true"`
|
||||||
TraefikLogsFile string `description:"(Deprecated) Traefik logs file. Stdout is used when omitted or empty"` // Deprecated
|
TraefikLogsFile string `description:"(Deprecated) Traefik logs file. Stdout is used when omitted or empty" export:"true"` // Deprecated
|
||||||
TraefikLog *types.TraefikLog `description:"Traefik log settings"`
|
TraefikLog *types.TraefikLog `description:"Traefik log settings" export:"true"`
|
||||||
LogLevel string `short:"l" description:"Log level"`
|
LogLevel string `short:"l" description:"Log level" export:"true"`
|
||||||
EntryPoints EntryPoints `description:"Entrypoints definition using format: --entryPoints='Name:http Address::8000 Redirect.EntryPoint:https' --entryPoints='Name:https Address::4442 TLS:tests/traefik.crt,tests/traefik.key;prod/traefik.crt,prod/traefik.key'"`
|
EntryPoints EntryPoints `description:"Entrypoints definition using format: --entryPoints='Name:http Address::8000 Redirect.EntryPoint:https' --entryPoints='Name:https Address::4442 TLS:tests/traefik.crt,tests/traefik.key;prod/traefik.crt,prod/traefik.key'" export:"true"`
|
||||||
Cluster *types.Cluster `description:"Enable clustering"`
|
Cluster *types.Cluster `description:"Enable clustering" export:"true"`
|
||||||
Constraints types.Constraints `description:"Filter services by constraint, matching with service tags"`
|
Constraints types.Constraints `description:"Filter services by constraint, matching with service tags" export:"true"`
|
||||||
ACME *acme.ACME `description:"Enable ACME (Let's Encrypt): automatic SSL"`
|
ACME *acme.ACME `description:"Enable ACME (Let's Encrypt): automatic SSL" export:"true"`
|
||||||
DefaultEntryPoints DefaultEntryPoints `description:"Entrypoints to be used by frontends that do not specify any entrypoint"`
|
DefaultEntryPoints DefaultEntryPoints `description:"Entrypoints to be used by frontends that do not specify any entrypoint" export:"true"`
|
||||||
ProvidersThrottleDuration flaeg.Duration `description:"Backends throttle duration: minimum duration between 2 events from providers before applying a new configuration. It avoids unnecessary reloads if multiples events are sent in a short amount of time."`
|
ProvidersThrottleDuration flaeg.Duration `description:"Backends throttle duration: minimum duration between 2 events from providers before applying a new configuration. It avoids unnecessary reloads if multiples events are sent in a short amount of time." export:"true"`
|
||||||
MaxIdleConnsPerHost int `description:"If non-zero, controls the maximum idle (keep-alive) to keep per-host. If zero, DefaultMaxIdleConnsPerHost is used"`
|
MaxIdleConnsPerHost int `description:"If non-zero, controls the maximum idle (keep-alive) to keep per-host. If zero, DefaultMaxIdleConnsPerHost is used" export:"true"`
|
||||||
IdleTimeout flaeg.Duration `description:"(Deprecated) maximum amount of time an idle (keep-alive) connection will remain idle before closing itself."` // Deprecated
|
IdleTimeout flaeg.Duration `description:"(Deprecated) maximum amount of time an idle (keep-alive) connection will remain idle before closing itself." export:"true"` // Deprecated
|
||||||
InsecureSkipVerify bool `description:"Disable SSL certificate verification"`
|
InsecureSkipVerify bool `description:"Disable SSL certificate verification" export:"true"`
|
||||||
RootCAs RootCAs `description:"Add cert file for self-signed certicate"`
|
RootCAs RootCAs `description:"Add cert file for self-signed certificate"`
|
||||||
Retry *Retry `description:"Enable retry sending request if network error"`
|
Retry *Retry `description:"Enable retry sending request if network error" export:"true"`
|
||||||
HealthCheck *HealthCheckConfig `description:"Health check parameters"`
|
HealthCheck *HealthCheckConfig `description:"Health check parameters" export:"true"`
|
||||||
RespondingTimeouts *RespondingTimeouts `description:"Timeouts for incoming requests to the Traefik instance"`
|
RespondingTimeouts *RespondingTimeouts `description:"Timeouts for incoming requests to the Traefik instance" export:"true"`
|
||||||
ForwardingTimeouts *ForwardingTimeouts `description:"Timeouts for requests forwarded to the backend servers"`
|
ForwardingTimeouts *ForwardingTimeouts `description:"Timeouts for requests forwarded to the backend servers" export:"true"`
|
||||||
Docker *docker.Provider `description:"Enable Docker backend with default settings"`
|
Docker *docker.Provider `description:"Enable Docker backend with default settings" export:"true"`
|
||||||
File *file.Provider `description:"Enable File backend with default settings"`
|
File *file.Provider `description:"Enable File backend with default settings" export:"true"`
|
||||||
Web *web.Provider `description:"Enable Web backend with default settings"`
|
Web *web.Provider `description:"Enable Web backend with default settings" export:"true"`
|
||||||
Marathon *marathon.Provider `description:"Enable Marathon backend with default settings"`
|
Marathon *marathon.Provider `description:"Enable Marathon backend with default settings" export:"true"`
|
||||||
Consul *consul.Provider `description:"Enable Consul backend with default settings"`
|
Consul *consul.Provider `description:"Enable Consul backend with default settings" export:"true"`
|
||||||
ConsulCatalog *consul.CatalogProvider `description:"Enable Consul catalog backend with default settings"`
|
ConsulCatalog *consul.CatalogProvider `description:"Enable Consul catalog backend with default settings" export:"true"`
|
||||||
Etcd *etcd.Provider `description:"Enable Etcd backend with default settings"`
|
Etcd *etcd.Provider `description:"Enable Etcd backend with default settings" export:"true"`
|
||||||
Zookeeper *zk.Provider `description:"Enable Zookeeper backend with default settings"`
|
Zookeeper *zk.Provider `description:"Enable Zookeeper backend with default settings" export:"true"`
|
||||||
Boltdb *boltdb.Provider `description:"Enable Boltdb backend with default settings"`
|
Boltdb *boltdb.Provider `description:"Enable Boltdb backend with default settings" export:"true"`
|
||||||
Kubernetes *kubernetes.Provider `description:"Enable Kubernetes backend with default settings"`
|
Kubernetes *kubernetes.Provider `description:"Enable Kubernetes backend with default settings" export:"true"`
|
||||||
Mesos *mesos.Provider `description:"Enable Mesos backend with default settings"`
|
Mesos *mesos.Provider `description:"Enable Mesos backend with default settings" export:"true"`
|
||||||
Eureka *eureka.Provider `description:"Enable Eureka backend with default settings"`
|
Eureka *eureka.Provider `description:"Enable Eureka backend with default settings" export:"true"`
|
||||||
ECS *ecs.Provider `description:"Enable ECS backend with default settings"`
|
ECS *ecs.Provider `description:"Enable ECS backend with default settings" export:"true"`
|
||||||
Rancher *rancher.Provider `description:"Enable Rancher backend with default settings"`
|
Rancher *rancher.Provider `description:"Enable Rancher backend with default settings" export:"true"`
|
||||||
DynamoDB *dynamodb.Provider `description:"Enable DynamoDB backend with default settings"`
|
DynamoDB *dynamodb.Provider `description:"Enable DynamoDB backend with default settings" export:"true"`
|
||||||
}
|
}
|
||||||
|
|
||||||
// SetEffectiveConfiguration adds missing configuration parameters derived from
|
// SetEffectiveConfiguration adds missing configuration parameters derived from
|
||||||
|
@ -342,12 +342,12 @@ func (ep *EntryPoints) Type() string {
|
||||||
type EntryPoint struct {
|
type EntryPoint struct {
|
||||||
Network string
|
Network string
|
||||||
Address string
|
Address string
|
||||||
TLS *TLS
|
TLS *TLS `export:"true"`
|
||||||
Redirect *Redirect
|
Redirect *Redirect `export:"true"`
|
||||||
Auth *types.Auth
|
Auth *types.Auth `export:"true"`
|
||||||
WhitelistSourceRange []string
|
WhitelistSourceRange []string
|
||||||
Compress bool
|
Compress bool `export:"true"`
|
||||||
ProxyProtocol bool
|
ProxyProtocol bool `export:"true"`
|
||||||
}
|
}
|
||||||
|
|
||||||
// Redirect configures a redirection of an entry point to another, or to an URL
|
// Redirect configures a redirection of an entry point to another, or to an URL
|
||||||
|
@ -359,7 +359,7 @@ type Redirect struct {
|
||||||
|
|
||||||
// TLS configures TLS for an entry point
|
// TLS configures TLS for an entry point
|
||||||
type TLS struct {
|
type TLS struct {
|
||||||
MinVersion string
|
MinVersion string `export:"true"`
|
||||||
CipherSuites []string
|
CipherSuites []string
|
||||||
Certificates Certificates
|
Certificates Certificates
|
||||||
ClientCAFiles []string
|
ClientCAFiles []string
|
||||||
|
@ -409,8 +409,6 @@ func (certs *Certificates) CreateTLSConfig() (*tls.Config, error) {
|
||||||
config.Certificates = []tls.Certificate{}
|
config.Certificates = []tls.Certificate{}
|
||||||
certsSlice := []Certificate(*certs)
|
certsSlice := []Certificate(*certs)
|
||||||
for _, v := range certsSlice {
|
for _, v := range certsSlice {
|
||||||
cert := tls.Certificate{}
|
|
||||||
|
|
||||||
var err error
|
var err error
|
||||||
|
|
||||||
certContent, err := v.CertFile.Read()
|
certContent, err := v.CertFile.Read()
|
||||||
|
@ -423,7 +421,7 @@ func (certs *Certificates) CreateTLSConfig() (*tls.Config, error) {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
|
||||||
cert, err = tls.X509KeyPair(certContent, keyContent)
|
cert, err := tls.X509KeyPair(certContent, keyContent)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
@ -478,25 +476,25 @@ type Certificate struct {
|
||||||
|
|
||||||
// Retry contains request retry config
|
// Retry contains request retry config
|
||||||
type Retry struct {
|
type Retry struct {
|
||||||
Attempts int `description:"Number of attempts"`
|
Attempts int `description:"Number of attempts" export:"true"`
|
||||||
}
|
}
|
||||||
|
|
||||||
// HealthCheckConfig contains health check configuration parameters.
|
// HealthCheckConfig contains health check configuration parameters.
|
||||||
type HealthCheckConfig struct {
|
type HealthCheckConfig struct {
|
||||||
Interval flaeg.Duration `description:"Default periodicity of enabled health checks"`
|
Interval flaeg.Duration `description:"Default periodicity of enabled health checks" export:"true"`
|
||||||
}
|
}
|
||||||
|
|
||||||
// RespondingTimeouts contains timeout configurations for incoming requests to the Traefik instance.
|
// RespondingTimeouts contains timeout configurations for incoming requests to the Traefik instance.
|
||||||
type RespondingTimeouts struct {
|
type RespondingTimeouts struct {
|
||||||
ReadTimeout flaeg.Duration `description:"ReadTimeout is the maximum duration for reading the entire request, including the body. If zero, no timeout is set"`
|
ReadTimeout flaeg.Duration `description:"ReadTimeout is the maximum duration for reading the entire request, including the body. If zero, no timeout is set" export:"true"`
|
||||||
WriteTimeout flaeg.Duration `description:"WriteTimeout is the maximum duration before timing out writes of the response. If zero, no timeout is set"`
|
WriteTimeout flaeg.Duration `description:"WriteTimeout is the maximum duration before timing out writes of the response. If zero, no timeout is set" export:"true"`
|
||||||
IdleTimeout flaeg.Duration `description:"IdleTimeout is the maximum amount duration an idle (keep-alive) connection will remain idle before closing itself. Defaults to 180 seconds. If zero, no timeout is set"`
|
IdleTimeout flaeg.Duration `description:"IdleTimeout is the maximum amount duration an idle (keep-alive) connection will remain idle before closing itself. Defaults to 180 seconds. If zero, no timeout is set" export:"true"`
|
||||||
}
|
}
|
||||||
|
|
||||||
// ForwardingTimeouts contains timeout configurations for forwarding requests to the backend servers.
|
// ForwardingTimeouts contains timeout configurations for forwarding requests to the backend servers.
|
||||||
type ForwardingTimeouts struct {
|
type ForwardingTimeouts struct {
|
||||||
DialTimeout flaeg.Duration `description:"The amount of time to wait until a connection to a backend server can be established. Defaults to 30 seconds. If zero, no timeout exists"`
|
DialTimeout flaeg.Duration `description:"The amount of time to wait until a connection to a backend server can be established. Defaults to 30 seconds. If zero, no timeout exists" export:"true"`
|
||||||
ResponseHeaderTimeout flaeg.Duration `description:"The amount of time to wait for a server's response headers after fully writing the request (including its body, if any). If zero, no timeout exists"`
|
ResponseHeaderTimeout flaeg.Duration `description:"The amount of time to wait for a server's response headers after fully writing the request (including its body, if any). If zero, no timeout exists" export:"true"`
|
||||||
}
|
}
|
||||||
|
|
||||||
// LifeCycle contains configurations relevant to the lifecycle (such as the
|
// LifeCycle contains configurations relevant to the lifecycle (such as the
|
||||||
|
|
|
@ -29,7 +29,9 @@ address = ":8080"
|
||||||
# Set REST API to read-only mode.
|
# Set REST API to read-only mode.
|
||||||
#
|
#
|
||||||
# Optional
|
# Optional
|
||||||
# readOnly = false
|
# Default: false
|
||||||
|
#
|
||||||
|
readOnly = true
|
||||||
```
|
```
|
||||||
|
|
||||||
## Web UI
|
## Web UI
|
||||||
|
|
|
@ -171,6 +171,12 @@ To enable compression support using gzip format.
|
||||||
compress = true
|
compress = true
|
||||||
```
|
```
|
||||||
|
|
||||||
|
Responses are compressed when:
|
||||||
|
|
||||||
|
* The response body is larger than `512` bytes
|
||||||
|
* And the `Accept-Encoding` request header contains `gzip`
|
||||||
|
* And the response is not already compressed, i.e. the `Content-Encoding` response header is not already set.
|
||||||
|
|
||||||
## Whitelisting
|
## Whitelisting
|
||||||
|
|
||||||
To enable IP whitelisting at the entrypoint level.
|
To enable IP whitelisting at the entrypoint level.
|
||||||
|
|
|
@ -14,7 +14,7 @@ This section explains how to use Traefik as reverse proxy for gRPC application w
|
||||||
In order to secure the gRPC server, we generate a self-signed certificate for backend url:
|
In order to secure the gRPC server, we generate a self-signed certificate for backend url:
|
||||||
|
|
||||||
```bash
|
```bash
|
||||||
openssl req -x509 -nodes -days 365 -newkey rsa:2048 -keyout ./backend.key -out ./backend.crt
|
openssl req -x509 -nodes -days 365 -newkey rsa:2048 -keyout ./backend.key -out ./backend.cert
|
||||||
```
|
```
|
||||||
|
|
||||||
That will prompt for information, the important answer is:
|
That will prompt for information, the important answer is:
|
||||||
|
@ -28,7 +28,7 @@ Common Name (e.g. server FQDN or YOUR name) []: backend.local
|
||||||
Generate your self-signed certificate for frontend url:
|
Generate your self-signed certificate for frontend url:
|
||||||
|
|
||||||
```bash
|
```bash
|
||||||
openssl req -x509 -nodes -days 365 -newkey rsa:2048 -keyout ./frontend.key -out ./frontend.crt
|
openssl req -x509 -nodes -days 365 -newkey rsa:2048 -keyout ./frontend.key -out ./frontend.cert
|
||||||
```
|
```
|
||||||
|
|
||||||
with
|
with
|
||||||
|
@ -93,13 +93,13 @@ So we modify the "gRPC server example" to use our own self-signed certificate:
|
||||||
// ...
|
// ...
|
||||||
|
|
||||||
// Read cert and key file
|
// Read cert and key file
|
||||||
BackendCert := ioutil.ReadFile("./backend.cert")
|
BackendCert, _ := ioutil.ReadFile("./backend.cert")
|
||||||
BackendKey := ioutil.ReadFile("./backend.key")
|
BackendKey, _ := ioutil.ReadFile("./backend.key")
|
||||||
|
|
||||||
// Generate Certificate struct
|
// Generate Certificate struct
|
||||||
cert, err := tls.X509KeyPair(BackendCert, BackendKey)
|
cert, err := tls.X509KeyPair(BackendCert, BackendKey)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
log.Fatalf("failed to parse certificate: %v", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
// Create credentials
|
// Create credentials
|
||||||
|
@ -110,7 +110,7 @@ serverOption := grpc.Creds(creds)
|
||||||
var s *grpc.Server = grpc.NewServer(serverOption)
|
var s *grpc.Server = grpc.NewServer(serverOption)
|
||||||
defer s.Stop()
|
defer s.Stop()
|
||||||
|
|
||||||
helloworld.RegisterGreeterServer(s, &myserver{})
|
pb.RegisterGreeterServer(s, &server{})
|
||||||
err := s.Serve(lis)
|
err := s.Serve(lis)
|
||||||
|
|
||||||
// ...
|
// ...
|
||||||
|
@ -122,7 +122,7 @@ Next we will modify gRPC Client to use our Træfik self-signed certificate:
|
||||||
// ...
|
// ...
|
||||||
|
|
||||||
// Read cert file
|
// Read cert file
|
||||||
FrontendCert := ioutil.ReadFile("./frontend.cert")
|
FrontendCert, _ := ioutil.ReadFile("./frontend.cert")
|
||||||
|
|
||||||
// Create CertPool
|
// Create CertPool
|
||||||
roots := x509.NewCertPool()
|
roots := x509.NewCertPool()
|
||||||
|
@ -132,16 +132,16 @@ roots.AppendCertsFromPEM(FrontendCert)
|
||||||
credsClient := credentials.NewClientTLSFromCert(roots, "")
|
credsClient := credentials.NewClientTLSFromCert(roots, "")
|
||||||
|
|
||||||
// Dial with specific Transport (with credentials)
|
// Dial with specific Transport (with credentials)
|
||||||
conn, err := grpc.Dial("https://frontend:4443", grpc.WithTransportCredentials(credsClient))
|
conn, err := grpc.Dial("frontend.local:4443", grpc.WithTransportCredentials(credsClient))
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
log.Fatalf("did not connect: %v", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
defer conn.Close()
|
defer conn.Close()
|
||||||
client := helloworld.NewGreeterClient(conn)
|
client := pb.NewGreeterClient(conn)
|
||||||
|
|
||||||
name := "World"
|
name := "World"
|
||||||
r, err := client.SayHello(context.Background(), &helloworld.HelloRequest{Name: name})
|
r, err := client.SayHello(context.Background(), &pb.HelloRequest{Name: name})
|
||||||
|
|
||||||
// ...
|
// ...
|
||||||
```
|
```
|
||||||
|
|
|
@ -20,7 +20,7 @@ We will see the steps to set it up with an easy example.
|
||||||
|
|
||||||
### docker-compose file for Consul
|
### docker-compose file for Consul
|
||||||
|
|
||||||
The Træfik global configuration will be getted from a [Consul](https://consul.io) store.
|
The Træfik global configuration will be retrieved from a [Consul](https://consul.io) store.
|
||||||
|
|
||||||
First we have to launch Consul in a container.
|
First we have to launch Consul in a container.
|
||||||
|
|
||||||
|
|
|
@ -7,14 +7,15 @@ The cluster consists of:
|
||||||
- 3 servers
|
- 3 servers
|
||||||
- 1 manager
|
- 1 manager
|
||||||
- 2 workers
|
- 2 workers
|
||||||
- 1 [overlay](https://docs.docker.com/engine/userguide/networking/dockernetworks/#an-overlay-network) network
|
- 1 [overlay](https://docs.docker.com/engine/userguide/networking/dockernetworks/#an-overlay-network) network (multi-host networking)
|
||||||
(multi-host networking)
|
|
||||||
|
|
||||||
## Prerequisites
|
## Prerequisites
|
||||||
|
|
||||||
1. You will need to install [docker-machine](https://docs.docker.com/machine/)
|
1. You will need to install [docker-machine](https://docs.docker.com/machine/)
|
||||||
2. You will need the latest [VirtualBox](https://www.virtualbox.org/wiki/Downloads)
|
2. You will need the latest [VirtualBox](https://www.virtualbox.org/wiki/Downloads)
|
||||||
|
|
||||||
|
|
||||||
## Cluster provisioning
|
## Cluster provisioning
|
||||||
|
|
||||||
First, let's create all the required nodes.
|
First, let's create all the required nodes.
|
||||||
|
@ -26,7 +27,7 @@ docker-machine create -d virtualbox worker1
|
||||||
docker-machine create -d virtualbox worker2
|
docker-machine create -d virtualbox worker2
|
||||||
```
|
```
|
||||||
|
|
||||||
Then, let's setup the cluster, in order :
|
Then, let's setup the cluster, in order:
|
||||||
|
|
||||||
1. initialize the cluster
|
1. initialize the cluster
|
||||||
1. get the token for other host to join
|
1. get the token for other host to join
|
||||||
|
@ -60,9 +61,9 @@ docker-machine ssh manager docker node ls
|
||||||
```
|
```
|
||||||
```
|
```
|
||||||
ID HOSTNAME STATUS AVAILABILITY MANAGER STATUS
|
ID HOSTNAME STATUS AVAILABILITY MANAGER STATUS
|
||||||
2a770ov9vixeadep674265u1n worker1 Ready Active
|
013v16l1sbuwjqcn7ucbu4jwt worker1 Ready Active
|
||||||
dbi3or4q8ii8elbws70g4hkdh * manager Ready Active Leader
|
8buzkquycd17jqjber0mo2gn8 worker2 Ready Active
|
||||||
esbhhy6vnqv90xomjaomdgy46 worker2 Ready Active
|
fnpj8ozfc85zvahx2r540xfcf * manager Ready Active Leader
|
||||||
```
|
```
|
||||||
|
|
||||||
Finally, let's create a network for Træfik to use.
|
Finally, let's create a network for Træfik to use.
|
||||||
|
@ -71,11 +72,11 @@ Finally, let's create a network for Træfik to use.
|
||||||
docker-machine ssh manager "docker network create --driver=overlay traefik-net"
|
docker-machine ssh manager "docker network create --driver=overlay traefik-net"
|
||||||
```
|
```
|
||||||
|
|
||||||
|
|
||||||
## Deploy Træfik
|
## Deploy Træfik
|
||||||
|
|
||||||
Let's deploy Træfik as a docker service in our cluster.
|
Let's deploy Træfik as a docker service in our cluster.
|
||||||
The only requirement for Træfik to work with swarm mode is that it needs to run on a manager node — we are going to use a
|
The only requirement for Træfik to work with swarm mode is that it needs to run on a manager node - we are going to use a [constraint](https://docs.docker.com/engine/reference/commandline/service_create/#/specify-service-constraints-constraint) for that.
|
||||||
[constraint](https://docs.docker.com/engine/reference/commandline/service_create/#/specify-service-constraints-constraint) for that.
|
|
||||||
|
|
||||||
```shell
|
```shell
|
||||||
docker-machine ssh manager "docker service create \
|
docker-machine ssh manager "docker service create \
|
||||||
|
@ -103,6 +104,7 @@ Let's explain this command:
|
||||||
| `--docker` | enable docker backend, and `--docker.swarmmode` to enable the swarm mode on Træfik. |
|
| `--docker` | enable docker backend, and `--docker.swarmmode` to enable the swarm mode on Træfik. |
|
||||||
| `--web` | activate the webUI on port 8080 |
|
| `--web` | activate the webUI on port 8080 |
|
||||||
|
|
||||||
|
|
||||||
## Deploy your apps
|
## Deploy your apps
|
||||||
|
|
||||||
We can now deploy our app on the cluster, here [whoami](https://github.com/emilevauge/whoami), a simple web server in Go.
|
We can now deploy our app on the cluster, here [whoami](https://github.com/emilevauge/whoami), a simple web server in Go.
|
||||||
|
@ -124,7 +126,7 @@ docker-machine ssh manager "docker service create \
|
||||||
```
|
```
|
||||||
|
|
||||||
!!! note
|
!!! note
|
||||||
We set whoami1 to use sticky sessions (`--label traefik.backend.loadbalancer.sticky=true`).
|
We set `whoami1` to use sticky sessions (`--label traefik.backend.loadbalancer.sticky=true`).
|
||||||
We'll demonstrate that later.
|
We'll demonstrate that later.
|
||||||
|
|
||||||
!!! note
|
!!! note
|
||||||
|
@ -136,55 +138,52 @@ Check that everything is scheduled and started:
|
||||||
docker-machine ssh manager "docker service ls"
|
docker-machine ssh manager "docker service ls"
|
||||||
```
|
```
|
||||||
```
|
```
|
||||||
ID NAME REPLICAS IMAGE COMMAND
|
ID NAME MODE REPLICAS IMAGE PORTS
|
||||||
ab046gpaqtln whoami0 1/1 emilevauge/whoami
|
moq3dq4xqv6t traefik replicated 1/1 traefik:latest *:80->80/tcp,*:8080->8080/tcp
|
||||||
cgfg5ifzrpgm whoami1 1/1 emilevauge/whoami
|
ysil6oto1wim whoami0 replicated 1/1 emilevauge/whoami:latest
|
||||||
dtpl249tfghc traefik 1/1 traefik --docker --docker.swarmmode --docker.domain=traefik --docker.watch --web
|
z9re2mnl34k4 whoami1 replicated 1/1 emilevauge/whoami:latest
|
||||||
```
|
```
|
||||||
|
|
||||||
|
|
||||||
## Access to your apps through Træfik
|
## Access to your apps through Træfik
|
||||||
|
|
||||||
```shell
|
```shell
|
||||||
curl -H Host:whoami0.traefik http://$(docker-machine ip manager)
|
curl -H Host:whoami0.traefik http://$(docker-machine ip manager)
|
||||||
```
|
```
|
||||||
```yaml
|
```yaml
|
||||||
Hostname: 8147a7746e7a
|
Hostname: 5b0b3d148359
|
||||||
IP: 127.0.0.1
|
IP: 127.0.0.1
|
||||||
IP: ::1
|
IP: 10.0.0.8
|
||||||
IP: 10.0.9.3
|
IP: 10.0.0.4
|
||||||
IP: fe80::42:aff:fe00:903
|
IP: 172.18.0.5
|
||||||
IP: 172.18.0.3
|
|
||||||
IP: fe80::42:acff:fe12:3
|
|
||||||
GET / HTTP/1.1
|
GET / HTTP/1.1
|
||||||
Host: 10.0.9.3:80
|
Host: whoami0.traefik
|
||||||
User-Agent: curl/7.35.0
|
User-Agent: curl/7.55.1
|
||||||
Accept: */*
|
Accept: */*
|
||||||
Accept-Encoding: gzip
|
Accept-Encoding: gzip
|
||||||
X-Forwarded-For: 192.168.99.1
|
X-Forwarded-For: 10.255.0.2
|
||||||
X-Forwarded-Host: 10.0.9.3:80
|
X-Forwarded-Host: whoami0.traefik
|
||||||
X-Forwarded-Proto: http
|
X-Forwarded-Proto: http
|
||||||
X-Forwarded-Server: 8fbc39271b4c
|
X-Forwarded-Server: 77fc29c69fe4
|
||||||
```
|
```
|
||||||
```shell
|
```shell
|
||||||
curl -H Host:whoami1.traefik http://$(docker-machine ip manager)
|
curl -H Host:whoami1.traefik http://$(docker-machine ip manager)
|
||||||
```
|
```
|
||||||
```yaml
|
```yaml
|
||||||
Hostname: ba2c21488299
|
Hostname: 3633163970f6
|
||||||
IP: 127.0.0.1
|
IP: 127.0.0.1
|
||||||
IP: ::1
|
IP: 10.0.0.14
|
||||||
IP: 10.0.9.4
|
IP: 10.0.0.6
|
||||||
IP: fe80::42:aff:fe00:904
|
IP: 172.18.0.5
|
||||||
IP: 172.18.0.2
|
|
||||||
IP: fe80::42:acff:fe12:2
|
|
||||||
GET / HTTP/1.1
|
GET / HTTP/1.1
|
||||||
Host: 10.0.9.4:80
|
Host: whoami1.traefik
|
||||||
User-Agent: curl/7.35.0
|
User-Agent: curl/7.55.1
|
||||||
Accept: */*
|
Accept: */*
|
||||||
Accept-Encoding: gzip
|
Accept-Encoding: gzip
|
||||||
X-Forwarded-For: 192.168.99.1
|
X-Forwarded-For: 10.255.0.2
|
||||||
X-Forwarded-Host: 10.0.9.4:80
|
X-Forwarded-Host: whoami1.traefik
|
||||||
X-Forwarded-Proto: http
|
X-Forwarded-Proto: http
|
||||||
X-Forwarded-Server: 8fbc39271b4c
|
X-Forwarded-Server: 77fc29c69fe4
|
||||||
```
|
```
|
||||||
|
|
||||||
!!! note
|
!!! note
|
||||||
|
@ -194,43 +193,39 @@ X-Forwarded-Server: 8fbc39271b4c
|
||||||
curl -H Host:whoami0.traefik http://$(docker-machine ip worker1)
|
curl -H Host:whoami0.traefik http://$(docker-machine ip worker1)
|
||||||
```
|
```
|
||||||
```yaml
|
```yaml
|
||||||
Hostname: 8147a7746e7a
|
Hostname: 5b0b3d148359
|
||||||
IP: 127.0.0.1
|
IP: 127.0.0.1
|
||||||
IP: ::1
|
IP: 10.0.0.8
|
||||||
IP: 10.0.9.3
|
IP: 10.0.0.4
|
||||||
IP: fe80::42:aff:fe00:903
|
IP: 172.18.0.5
|
||||||
IP: 172.18.0.3
|
|
||||||
IP: fe80::42:acff:fe12:3
|
|
||||||
GET / HTTP/1.1
|
GET / HTTP/1.1
|
||||||
Host: 10.0.9.3:80
|
Host: whoami0.traefik
|
||||||
User-Agent: curl/7.35.0
|
User-Agent: curl/7.55.1
|
||||||
Accept: */*
|
Accept: */*
|
||||||
Accept-Encoding: gzip
|
Accept-Encoding: gzip
|
||||||
X-Forwarded-For: 192.168.99.1
|
X-Forwarded-For: 10.255.0.3
|
||||||
X-Forwarded-Host: 10.0.9.3:80
|
X-Forwarded-Host: whoami0.traefik
|
||||||
X-Forwarded-Proto: http
|
X-Forwarded-Proto: http
|
||||||
X-Forwarded-Server: 8fbc39271b4c
|
X-Forwarded-Server: 77fc29c69fe4
|
||||||
```
|
```
|
||||||
```shell
|
```shell
|
||||||
curl -H Host:whoami1.traefik http://$(docker-machine ip worker2)
|
curl -H Host:whoami1.traefik http://$(docker-machine ip worker2)
|
||||||
```
|
```
|
||||||
```yaml
|
```yaml
|
||||||
Hostname: ba2c21488299
|
Hostname: 3633163970f6
|
||||||
IP: 127.0.0.1
|
IP: 127.0.0.1
|
||||||
IP: ::1
|
IP: 10.0.0.14
|
||||||
IP: 10.0.9.4
|
IP: 10.0.0.6
|
||||||
IP: fe80::42:aff:fe00:904
|
IP: 172.18.0.5
|
||||||
IP: 172.18.0.2
|
|
||||||
IP: fe80::42:acff:fe12:2
|
|
||||||
GET / HTTP/1.1
|
GET / HTTP/1.1
|
||||||
Host: 10.0.9.4:80
|
Host: whoami1.traefik
|
||||||
User-Agent: curl/7.35.0
|
User-Agent: curl/7.55.1
|
||||||
Accept: */*
|
Accept: */*
|
||||||
Accept-Encoding: gzip
|
Accept-Encoding: gzip
|
||||||
X-Forwarded-For: 192.168.99.1
|
X-Forwarded-For: 10.255.0.4
|
||||||
X-Forwarded-Host: 10.0.9.4:80
|
X-Forwarded-Host: whoami1.traefik
|
||||||
X-Forwarded-Proto: http
|
X-Forwarded-Proto: http
|
||||||
X-Forwarded-Server: 8fbc39271b4c
|
X-Forwarded-Server: 77fc29c69fe4
|
||||||
```
|
```
|
||||||
|
|
||||||
## Scale both services
|
## Scale both services
|
||||||
|
@ -246,79 +241,93 @@ Check that we now have 5 replicas of each `whoami` service:
|
||||||
docker-machine ssh manager "docker service ls"
|
docker-machine ssh manager "docker service ls"
|
||||||
```
|
```
|
||||||
```
|
```
|
||||||
ID NAME REPLICAS IMAGE COMMAND
|
ID NAME MODE REPLICAS IMAGE PORTS
|
||||||
ab046gpaqtln whoami0 5/5 emilevauge/whoami
|
moq3dq4xqv6t traefik replicated 1/1 traefik:latest *:80->80/tcp,*:8080->8080/tcp
|
||||||
cgfg5ifzrpgm whoami1 5/5 emilevauge/whoami
|
ysil6oto1wim whoami0 replicated 5/5 emilevauge/whoami:latest
|
||||||
dtpl249tfghc traefik 1/1 traefik --docker --docker.swarmmode --docker.domain=traefik --docker.watch --web
|
z9re2mnl34k4 whoami1 replicated 5/5 emilevauge/whoami:latest
|
||||||
```
|
```
|
||||||
## Access to your whoami0 through Træfik multiple times.
|
|
||||||
|
## Access to your `whoami0` through Træfik multiple times.
|
||||||
|
|
||||||
Repeat the following command multiple times and note that the Hostname changes each time as Traefik load balances each request against the 5 tasks:
|
Repeat the following command multiple times and note that the Hostname changes each time as Traefik load balances each request against the 5 tasks:
|
||||||
|
|
||||||
```shell
|
```shell
|
||||||
curl -H Host:whoami0.traefik http://$(docker-machine ip manager)
|
curl -H Host:whoami0.traefik http://$(docker-machine ip manager)
|
||||||
```
|
```
|
||||||
|
|
||||||
```yaml
|
```yaml
|
||||||
Hostname: 8147a7746e7a
|
Hostname: f3138d15b567
|
||||||
IP: 127.0.0.1
|
IP: 127.0.0.1
|
||||||
IP: ::1
|
IP: 10.0.0.5
|
||||||
IP: 10.0.9.3
|
IP: 10.0.0.4
|
||||||
IP: fe80::42:aff:fe00:903
|
|
||||||
IP: 172.18.0.3
|
IP: 172.18.0.3
|
||||||
IP: fe80::42:acff:fe12:3
|
|
||||||
GET / HTTP/1.1
|
GET / HTTP/1.1
|
||||||
Host: 10.0.9.3:80
|
Host: whoami0.traefik
|
||||||
User-Agent: curl/7.35.0
|
User-Agent: curl/7.55.1
|
||||||
Accept: */*
|
Accept: */*
|
||||||
Accept-Encoding: gzip
|
Accept-Encoding: gzip
|
||||||
X-Forwarded-For: 192.168.99.1
|
X-Forwarded-For: 10.255.0.2
|
||||||
X-Forwarded-Host: 10.0.9.3:80
|
X-Forwarded-Host: whoami0.traefik
|
||||||
X-Forwarded-Proto: http
|
X-Forwarded-Proto: http
|
||||||
X-Forwarded-Server: 8fbc39271b4c
|
X-Forwarded-Server: 77fc29c69fe4
|
||||||
```
|
```
|
||||||
|
|
||||||
Do the same against whoami1:
|
Do the same against `whoami1`:
|
||||||
|
|
||||||
```shell
|
```shell
|
||||||
curl -H Host:whoami1.traefik http://$(docker-machine ip manager)
|
curl -c cookies.txt -H Host:whoami1.traefik http://$(docker-machine ip manager)
|
||||||
```
|
```
|
||||||
|
|
||||||
```yaml
|
```yaml
|
||||||
Hostname: ba2c21488299
|
Hostname: 348e2f7bf432
|
||||||
IP: 127.0.0.1
|
IP: 127.0.0.1
|
||||||
IP: ::1
|
IP: 10.0.0.15
|
||||||
IP: 10.0.9.4
|
IP: 10.0.0.6
|
||||||
IP: fe80::42:aff:fe00:904
|
IP: 172.18.0.6
|
||||||
IP: 172.18.0.2
|
|
||||||
IP: fe80::42:acff:fe12:2
|
|
||||||
GET / HTTP/1.1
|
GET / HTTP/1.1
|
||||||
Host: 10.0.9.4:80
|
Host: whoami1.traefik
|
||||||
User-Agent: curl/7.35.0
|
User-Agent: curl/7.55.1
|
||||||
Accept: */*
|
Accept: */*
|
||||||
Accept-Encoding: gzip
|
Accept-Encoding: gzip
|
||||||
X-Forwarded-For: 192.168.99.1
|
X-Forwarded-For: 10.255.0.2
|
||||||
X-Forwarded-Host: 10.0.9.4:80
|
X-Forwarded-Host: whoami1.traefik
|
||||||
X-Forwarded-Proto: http
|
X-Forwarded-Proto: http
|
||||||
X-Forwarded-Server: 8fbc39271b4c
|
X-Forwarded-Server: 77fc29c69fe4
|
||||||
```
|
```
|
||||||
|
|
||||||
Wait, I thought we added the sticky flag to `whoami1`?
|
Because the sticky sessions require cookies to work, we used the `-c cookies.txt` option to store the cookie into a file.
|
||||||
Traefik relies on a cookie to maintain stickyness so you'll need to test this with a browser.
|
The cookie contains the IP of the container to which the session sticks:
|
||||||
|
|
||||||
First you need to add `whoami1.traefik` to your hosts file:
|
|
||||||
|
|
||||||
```shell
|
```shell
|
||||||
if [ -n "$(grep whoami1.traefik /etc/hosts)" ];
|
cat ./cookies.txt
|
||||||
then
|
```
|
||||||
echo "whoami1.traefik already exists (make sure the ip is current)";
|
```
|
||||||
else
|
# Netscape HTTP Cookie File
|
||||||
sudo -- sh -c -e "echo '$(docker-machine ip manager)\twhoami1.traefik' >> /etc/hosts";
|
# https://curl.haxx.se/docs/http-cookies.html
|
||||||
fi
|
# This file was generated by libcurl! Edit at your own risk.
|
||||||
|
|
||||||
|
whoami1.traefik FALSE / FALSE 0 _TRAEFIK_BACKEND http://10.0.0.15:80
|
||||||
```
|
```
|
||||||
|
|
||||||
Now open your browser and go to http://whoami1.traefik/
|
If you load the cookies file (`-b cookies.txt`) for the next request, you will see that stickyness is maintained:
|
||||||
|
|
||||||
You will now see that stickyness is maintained.
|
```shell
|
||||||
|
curl -b cookies.txt -H Host:whoami1.traefik http://$(docker-machine ip manager)
|
||||||
|
```
|
||||||
|
```yaml
|
||||||
|
Hostname: 348e2f7bf432
|
||||||
|
IP: 127.0.0.1
|
||||||
|
IP: 10.0.0.15
|
||||||
|
IP: 10.0.0.6
|
||||||
|
IP: 172.18.0.6
|
||||||
|
GET / HTTP/1.1
|
||||||
|
Host: whoami1.traefik
|
||||||
|
User-Agent: curl/7.55.1
|
||||||
|
Accept: */*
|
||||||
|
Accept-Encoding: gzip
|
||||||
|
Cookie: _TRAEFIK_BACKEND=http://10.0.0.15:80
|
||||||
|
X-Forwarded-For: 10.255.0.2
|
||||||
|
X-Forwarded-Host: whoami1.traefik
|
||||||
|
X-Forwarded-Proto: http
|
||||||
|
X-Forwarded-Server: 77fc29c69fe4
|
||||||
|
```
|
||||||
|
|
||||||
![](https://i.giphy.com/ujUdrdpX7Ok5W.gif)
|
![](https://i.giphy.com/ujUdrdpX7Ok5W.gif)
|
||||||
|
|
10
glide.lock
generated
10
glide.lock
generated
|
@ -1,5 +1,5 @@
|
||||||
hash: f5dd83cd0bcf9f38bf6916bc028e108c59aee57ea440e914bc68f2b90da227d3
|
hash: 6881e0574a026dde78c9d7f03a4aa56f6c6dc585d2b6d3e6f4ae1a94810b7f88
|
||||||
updated: 2017-09-09T11:52:16.848940186+02:00
|
updated: 2017-10-02T18:32:16.848940186+02:00
|
||||||
imports:
|
imports:
|
||||||
- name: cloud.google.com/go
|
- name: cloud.google.com/go
|
||||||
version: 2e6a95edb1071d750f6d7db777bf66cd2997af6c
|
version: 2e6a95edb1071d750f6d7db777bf66cd2997af6c
|
||||||
|
@ -374,8 +374,12 @@ imports:
|
||||||
version: f533f7a102197536779ea3a8cb881d639e21ec5a
|
version: f533f7a102197536779ea3a8cb881d639e21ec5a
|
||||||
- name: github.com/miekg/dns
|
- name: github.com/miekg/dns
|
||||||
version: 8060d9f51305bbe024b99679454e62f552cd0b0b
|
version: 8060d9f51305bbe024b99679454e62f552cd0b0b
|
||||||
|
- name: github.com/mitchellh/copystructure
|
||||||
|
version: d23ffcb85de31694d6ccaa23ccb4a03e55c1303f
|
||||||
- name: github.com/mitchellh/mapstructure
|
- name: github.com/mitchellh/mapstructure
|
||||||
version: d0303fe809921458f417bcf828397a65db30a7e4
|
version: d0303fe809921458f417bcf828397a65db30a7e4
|
||||||
|
- name: github.com/mitchellh/reflectwalk
|
||||||
|
version: 63d60e9d0dbc60cf9164e6510889b0db6683d98c
|
||||||
- name: github.com/mvdan/xurls
|
- name: github.com/mvdan/xurls
|
||||||
version: db96455566f05ffe42bd6ac671f05eeb1152b45d
|
version: db96455566f05ffe42bd6ac671f05eeb1152b45d
|
||||||
- name: github.com/Nvveen/Gotty
|
- name: github.com/Nvveen/Gotty
|
||||||
|
@ -481,7 +485,7 @@ imports:
|
||||||
- name: github.com/urfave/negroni
|
- name: github.com/urfave/negroni
|
||||||
version: 490e6a555d47ca891a89a150d0c1ef3922dfffe9
|
version: 490e6a555d47ca891a89a150d0c1ef3922dfffe9
|
||||||
- name: github.com/vulcand/oxy
|
- name: github.com/vulcand/oxy
|
||||||
version: 6c94d2888dba2b1a15a89b8a2ca515fc85e07477
|
version: 648088ee0902cf8d8337826ae2a82444008720e2
|
||||||
repo: https://github.com/containous/oxy.git
|
repo: https://github.com/containous/oxy.git
|
||||||
vcs: git
|
vcs: git
|
||||||
subpackages:
|
subpackages:
|
||||||
|
|
|
@ -12,7 +12,7 @@ import:
|
||||||
- package: github.com/cenk/backoff
|
- package: github.com/cenk/backoff
|
||||||
- package: github.com/containous/flaeg
|
- package: github.com/containous/flaeg
|
||||||
- package: github.com/vulcand/oxy
|
- package: github.com/vulcand/oxy
|
||||||
version: 6c94d2888dba2b1a15a89b8a2ca515fc85e07477
|
version: 648088ee0902cf8d8337826ae2a82444008720e2
|
||||||
repo: https://github.com/containous/oxy.git
|
repo: https://github.com/containous/oxy.git
|
||||||
vcs: git
|
vcs: git
|
||||||
subpackages:
|
subpackages:
|
||||||
|
@ -201,6 +201,7 @@ import:
|
||||||
version: e039e20e500c2c025d9145be375e27cf42a94174
|
version: e039e20e500c2c025d9145be375e27cf42a94174
|
||||||
- package: github.com/armon/go-proxyproto
|
- package: github.com/armon/go-proxyproto
|
||||||
version: 48572f11356f1843b694f21a290d4f1006bc5e47
|
version: 48572f11356f1843b694f21a290d4f1006bc5e47
|
||||||
|
- package: github.com/mitchellh/copystructure
|
||||||
testImport:
|
testImport:
|
||||||
- package: github.com/stvp/go-udp-testing
|
- package: github.com/stvp/go-udp-testing
|
||||||
- package: github.com/docker/libcompose
|
- package: github.com/docker/libcompose
|
||||||
|
|
|
@ -138,7 +138,6 @@ func (s *ConsulCatalogSuite) TestSingleService(c *check.C) {
|
||||||
|
|
||||||
err = s.registerService("test", nginx.NetworkSettings.IPAddress, 80, []string{})
|
err = s.registerService("test", nginx.NetworkSettings.IPAddress, 80, []string{})
|
||||||
c.Assert(err, checker.IsNil, check.Commentf("Error registering service"))
|
c.Assert(err, checker.IsNil, check.Commentf("Error registering service"))
|
||||||
defer s.deregisterService("test", nginx.NetworkSettings.IPAddress)
|
|
||||||
|
|
||||||
req, err := http.NewRequest(http.MethodGet, "http://127.0.0.1:8000/", nil)
|
req, err := http.NewRequest(http.MethodGet, "http://127.0.0.1:8000/", nil)
|
||||||
c.Assert(err, checker.IsNil)
|
c.Assert(err, checker.IsNil)
|
||||||
|
@ -146,6 +145,11 @@ func (s *ConsulCatalogSuite) TestSingleService(c *check.C) {
|
||||||
|
|
||||||
err = try.Request(req, 10*time.Second, try.StatusCodeIs(http.StatusOK), try.HasBody())
|
err = try.Request(req, 10*time.Second, try.StatusCodeIs(http.StatusOK), try.HasBody())
|
||||||
c.Assert(err, checker.IsNil)
|
c.Assert(err, checker.IsNil)
|
||||||
|
|
||||||
|
s.deregisterService("test", nginx.NetworkSettings.IPAddress)
|
||||||
|
err = try.Request(req, 10*time.Second, try.StatusCodeIs(http.StatusNotFound), try.HasBody())
|
||||||
|
c.Assert(err, checker.IsNil)
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|
||||||
func (s *ConsulCatalogSuite) TestExposedByDefaultFalseSingleService(c *check.C) {
|
func (s *ConsulCatalogSuite) TestExposedByDefaultFalseSingleService(c *check.C) {
|
||||||
|
|
|
@ -9,6 +9,8 @@ defaultEntryPoints = ["http"]
|
||||||
[entryPoints.http]
|
[entryPoints.http]
|
||||||
address = ":8000"
|
address = ":8000"
|
||||||
|
|
||||||
|
checkNewVersion = false
|
||||||
|
|
||||||
################################################################
|
################################################################
|
||||||
# Web configuration backend
|
# Web configuration backend
|
||||||
################################################################
|
################################################################
|
||||||
|
|
|
@ -9,6 +9,8 @@ defaultEntryPoints = ["http"]
|
||||||
[entryPoints.http]
|
[entryPoints.http]
|
||||||
address = ":8000"
|
address = ":8000"
|
||||||
|
|
||||||
|
checkNewVersion = false
|
||||||
|
|
||||||
################################################################
|
################################################################
|
||||||
# Web configuration backend
|
# Web configuration backend
|
||||||
################################################################
|
################################################################
|
||||||
|
|
|
@ -4,8 +4,10 @@
|
||||||
traefikLogsFile = "traefik.log"
|
traefikLogsFile = "traefik.log"
|
||||||
accessLogsFile = "access.log"
|
accessLogsFile = "access.log"
|
||||||
logLevel = "DEBUG"
|
logLevel = "DEBUG"
|
||||||
checkNewVersion = false
|
|
||||||
defaultEntryPoints = ["http"]
|
defaultEntryPoints = ["http"]
|
||||||
|
|
||||||
|
checkNewVersion = false
|
||||||
|
|
||||||
[entryPoints]
|
[entryPoints]
|
||||||
[entryPoints.http]
|
[entryPoints.http]
|
||||||
address = ":8000"
|
address = ":8000"
|
||||||
|
|
|
@ -21,4 +21,4 @@ logLevel = "DEBUG"
|
||||||
[frontends.frontend1]
|
[frontends.frontend1]
|
||||||
backend = "backend1"
|
backend = "backend1"
|
||||||
[frontends.frontend1.routes.test_1]
|
[frontends.frontend1.routes.test_1]
|
||||||
rule = "Path:/ws"
|
rule = "PathPrefix:/ws"
|
||||||
|
|
|
@ -3,6 +3,7 @@ package integration
|
||||||
import (
|
import (
|
||||||
"crypto/tls"
|
"crypto/tls"
|
||||||
"crypto/x509"
|
"crypto/x509"
|
||||||
|
"encoding/base64"
|
||||||
"io/ioutil"
|
"io/ioutil"
|
||||||
"net"
|
"net"
|
||||||
"net/http"
|
"net/http"
|
||||||
|
@ -295,3 +296,148 @@ func (s *WebsocketSuite) TestSSLTermination(c *check.C) {
|
||||||
c.Assert(err, checker.IsNil)
|
c.Assert(err, checker.IsNil)
|
||||||
c.Assert(string(msg), checker.Equals, "OK")
|
c.Assert(string(msg), checker.Equals, "OK")
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func (s *WebsocketSuite) TestBasicAuth(c *check.C) {
|
||||||
|
var upgrader = gorillawebsocket.Upgrader{} // use default options
|
||||||
|
|
||||||
|
srv := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
|
||||||
|
conn, err := upgrader.Upgrade(w, r, nil)
|
||||||
|
|
||||||
|
if err != nil {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
defer conn.Close()
|
||||||
|
|
||||||
|
user, password, _ := r.BasicAuth()
|
||||||
|
c.Assert(user, check.Equals, "traefiker")
|
||||||
|
c.Assert(password, check.Equals, "secret")
|
||||||
|
|
||||||
|
for {
|
||||||
|
mt, message, err := conn.ReadMessage()
|
||||||
|
if err != nil {
|
||||||
|
break
|
||||||
|
}
|
||||||
|
err = conn.WriteMessage(mt, message)
|
||||||
|
if err != nil {
|
||||||
|
break
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}))
|
||||||
|
file := s.adaptFile(c, "fixtures/websocket/config.toml", struct {
|
||||||
|
WebsocketServer string
|
||||||
|
}{
|
||||||
|
WebsocketServer: srv.URL,
|
||||||
|
})
|
||||||
|
|
||||||
|
defer os.Remove(file)
|
||||||
|
cmd, display := s.traefikCmd(withConfigFile(file), "--debug")
|
||||||
|
defer display(c)
|
||||||
|
|
||||||
|
err := cmd.Start()
|
||||||
|
c.Assert(err, check.IsNil)
|
||||||
|
defer cmd.Process.Kill()
|
||||||
|
|
||||||
|
// wait for traefik
|
||||||
|
err = try.GetRequest("http://127.0.0.1:8080/api/providers", 10*time.Second, try.BodyContains("127.0.0.1"))
|
||||||
|
c.Assert(err, checker.IsNil)
|
||||||
|
|
||||||
|
config, err := websocket.NewConfig("ws://127.0.0.1:8000/ws", "ws://127.0.0.1:8000")
|
||||||
|
auth := "traefiker:secret"
|
||||||
|
config.Header.Set("Authorization", "Basic "+base64.StdEncoding.EncodeToString([]byte(auth)))
|
||||||
|
|
||||||
|
c.Assert(err, check.IsNil)
|
||||||
|
|
||||||
|
conn, err := net.DialTimeout("tcp", "127.0.0.1:8000", time.Second)
|
||||||
|
c.Assert(err, checker.IsNil)
|
||||||
|
client, err := websocket.NewClient(config, conn)
|
||||||
|
c.Assert(err, checker.IsNil)
|
||||||
|
|
||||||
|
n, err := client.Write([]byte("OK"))
|
||||||
|
c.Assert(err, checker.IsNil)
|
||||||
|
c.Assert(n, checker.Equals, 2)
|
||||||
|
|
||||||
|
msg := make([]byte, 2)
|
||||||
|
n, err = client.Read(msg)
|
||||||
|
c.Assert(err, checker.IsNil)
|
||||||
|
c.Assert(n, checker.Equals, 2)
|
||||||
|
c.Assert(string(msg), checker.Equals, "OK")
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s *WebsocketSuite) TestSpecificResponseFromBackend(c *check.C) {
|
||||||
|
srv := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
|
||||||
|
w.WriteHeader(401)
|
||||||
|
}))
|
||||||
|
file := s.adaptFile(c, "fixtures/websocket/config.toml", struct {
|
||||||
|
WebsocketServer string
|
||||||
|
}{
|
||||||
|
WebsocketServer: srv.URL,
|
||||||
|
})
|
||||||
|
|
||||||
|
defer os.Remove(file)
|
||||||
|
cmd, display := s.traefikCmd(withConfigFile(file), "--debug")
|
||||||
|
defer display(c)
|
||||||
|
|
||||||
|
err := cmd.Start()
|
||||||
|
c.Assert(err, check.IsNil)
|
||||||
|
defer cmd.Process.Kill()
|
||||||
|
|
||||||
|
// wait for traefik
|
||||||
|
err = try.GetRequest("http://127.0.0.1:8080/api/providers", 10*time.Second, try.BodyContains("127.0.0.1"))
|
||||||
|
c.Assert(err, checker.IsNil)
|
||||||
|
|
||||||
|
_, resp, err := gorillawebsocket.DefaultDialer.Dial("ws://127.0.0.1:8000/ws", nil)
|
||||||
|
c.Assert(err, checker.NotNil)
|
||||||
|
c.Assert(resp.StatusCode, check.Equals, 401)
|
||||||
|
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s *WebsocketSuite) TestURLWithURLEncodedChar(c *check.C) {
|
||||||
|
var upgrader = gorillawebsocket.Upgrader{} // use default options
|
||||||
|
|
||||||
|
srv := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
|
||||||
|
c.Assert(r.URL.Path, check.Equals, "/ws/http%3A%2F%2Ftest")
|
||||||
|
conn, err := upgrader.Upgrade(w, r, nil)
|
||||||
|
if err != nil {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
defer conn.Close()
|
||||||
|
for {
|
||||||
|
mt, message, err := conn.ReadMessage()
|
||||||
|
if err != nil {
|
||||||
|
break
|
||||||
|
}
|
||||||
|
err = conn.WriteMessage(mt, message)
|
||||||
|
if err != nil {
|
||||||
|
break
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}))
|
||||||
|
|
||||||
|
file := s.adaptFile(c, "fixtures/websocket/config.toml", struct {
|
||||||
|
WebsocketServer string
|
||||||
|
}{
|
||||||
|
WebsocketServer: srv.URL,
|
||||||
|
})
|
||||||
|
|
||||||
|
defer os.Remove(file)
|
||||||
|
cmd, display := s.traefikCmd(withConfigFile(file), "--debug")
|
||||||
|
defer display(c)
|
||||||
|
|
||||||
|
err := cmd.Start()
|
||||||
|
c.Assert(err, check.IsNil)
|
||||||
|
defer cmd.Process.Kill()
|
||||||
|
|
||||||
|
// wait for traefik
|
||||||
|
err = try.GetRequest("http://127.0.0.1:8080/api/providers", 10*time.Second, try.BodyContains("127.0.0.1"))
|
||||||
|
c.Assert(err, checker.IsNil)
|
||||||
|
|
||||||
|
conn, _, err := gorillawebsocket.DefaultDialer.Dial("ws://127.0.0.1:8000/ws/http%3A%2F%2Ftest", nil)
|
||||||
|
c.Assert(err, checker.IsNil)
|
||||||
|
|
||||||
|
err = conn.WriteMessage(gorillawebsocket.TextMessage, []byte("OK"))
|
||||||
|
c.Assert(err, checker.IsNil)
|
||||||
|
|
||||||
|
_, msg, err := conn.ReadMessage()
|
||||||
|
c.Assert(err, checker.IsNil)
|
||||||
|
c.Assert(string(msg), checker.Equals, "OK")
|
||||||
|
}
|
||||||
|
|
|
@ -18,7 +18,7 @@ type Authenticator struct {
|
||||||
users map[string]string
|
users map[string]string
|
||||||
}
|
}
|
||||||
|
|
||||||
// NewAuthenticator builds a new Autenticator given a config
|
// NewAuthenticator builds a new Authenticator given a config
|
||||||
func NewAuthenticator(authConfig *types.Auth) (*Authenticator, error) {
|
func NewAuthenticator(authConfig *types.Auth) (*Authenticator, error) {
|
||||||
if authConfig == nil {
|
if authConfig == nil {
|
||||||
return nil, fmt.Errorf("Error creating Authenticator: auth is nil")
|
return nil, fmt.Errorf("Error creating Authenticator: auth is nil")
|
||||||
|
|
|
@ -38,7 +38,7 @@ func (m *MetricsWrapper) ServeHTTP(rw http.ResponseWriter, r *http.Request, next
|
||||||
m.registry.ReqsCounter().With(reqLabels...).Add(1)
|
m.registry.ReqsCounter().With(reqLabels...).Add(1)
|
||||||
|
|
||||||
reqDurationLabels := []string{"service", m.serviceName, "code", strconv.Itoa(prw.statusCode)}
|
reqDurationLabels := []string{"service", m.serviceName, "code", strconv.Itoa(prw.statusCode)}
|
||||||
m.registry.ReqDurationHistogram().With(reqDurationLabels...).Observe(float64(time.Since(start).Seconds()))
|
m.registry.ReqDurationHistogram().With(reqDurationLabels...).Observe(time.Since(start).Seconds())
|
||||||
}
|
}
|
||||||
|
|
||||||
type retryMetrics interface {
|
type retryMetrics interface {
|
||||||
|
|
|
@ -15,7 +15,7 @@ var _ provider.Provider = (*Provider)(nil)
|
||||||
|
|
||||||
// Provider holds configurations of the provider.
|
// Provider holds configurations of the provider.
|
||||||
type Provider struct {
|
type Provider struct {
|
||||||
kv.Provider `mapstructure:",squash"`
|
kv.Provider `mapstructure:",squash" export:"true"`
|
||||||
}
|
}
|
||||||
|
|
||||||
// Provide allows the boltdb provider to Provide configurations to traefik
|
// Provide allows the boltdb provider to Provide configurations to traefik
|
||||||
|
|
|
@ -15,7 +15,7 @@ var _ provider.Provider = (*Provider)(nil)
|
||||||
|
|
||||||
// Provider holds configurations of the p.
|
// Provider holds configurations of the p.
|
||||||
type Provider struct {
|
type Provider struct {
|
||||||
kv.Provider `mapstructure:",squash"`
|
kv.Provider `mapstructure:",squash" export:"true"`
|
||||||
}
|
}
|
||||||
|
|
||||||
// Provide allows the consul provider to provide configurations to traefik
|
// Provide allows the consul provider to provide configurations to traefik
|
||||||
|
|
|
@ -28,12 +28,12 @@ var _ provider.Provider = (*CatalogProvider)(nil)
|
||||||
|
|
||||||
// CatalogProvider holds configurations of the Consul catalog provider.
|
// CatalogProvider holds configurations of the Consul catalog provider.
|
||||||
type CatalogProvider struct {
|
type CatalogProvider struct {
|
||||||
provider.BaseProvider `mapstructure:",squash"`
|
provider.BaseProvider `mapstructure:",squash" export:"true"`
|
||||||
Endpoint string `description:"Consul server endpoint"`
|
Endpoint string `description:"Consul server endpoint"`
|
||||||
Domain string `description:"Default domain used"`
|
Domain string `description:"Default domain used"`
|
||||||
ExposedByDefault bool `description:"Expose Consul services by default"`
|
ExposedByDefault bool `description:"Expose Consul services by default" export:"true"`
|
||||||
Prefix string `description:"Prefix used for Consul catalog tags"`
|
Prefix string `description:"Prefix used for Consul catalog tags" export:"true"`
|
||||||
FrontEndRule string `description:"Frontend rule used for Consul services"`
|
FrontEndRule string `description:"Frontend rule used for Consul services" export:"true"`
|
||||||
client *api.Client
|
client *api.Client
|
||||||
frontEndRuleTemplate *template.Template
|
frontEndRuleTemplate *template.Template
|
||||||
}
|
}
|
||||||
|
@ -190,7 +190,6 @@ func (p *CatalogProvider) watchCatalogServices(stopCh <-chan struct{}, watchCh c
|
||||||
catalog := p.client.Catalog()
|
catalog := p.client.Catalog()
|
||||||
|
|
||||||
safe.Go(func() {
|
safe.Go(func() {
|
||||||
current := make(map[string]Service)
|
|
||||||
// variable to hold previous state
|
// variable to hold previous state
|
||||||
var flashback map[string]Service
|
var flashback map[string]Service
|
||||||
|
|
||||||
|
@ -216,7 +215,7 @@ func (p *CatalogProvider) watchCatalogServices(stopCh <-chan struct{}, watchCh c
|
||||||
options.WaitIndex = meta.LastIndex
|
options.WaitIndex = meta.LastIndex
|
||||||
|
|
||||||
if data != nil {
|
if data != nil {
|
||||||
|
current := make(map[string]Service)
|
||||||
for key, value := range data {
|
for key, value := range data {
|
||||||
nodes, _, err := catalog.Service(key, "", &api.QueryOptions{})
|
nodes, _, err := catalog.Service(key, "", &api.QueryOptions{})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
|
@ -246,10 +245,7 @@ func (p *CatalogProvider) watchCatalogServices(stopCh <-chan struct{}, watchCh c
|
||||||
if len(removedServiceKeys) > 0 || len(removedServiceNodeKeys) > 0 || len(addedServiceKeys) > 0 || len(addedServiceNodeKeys) > 0 {
|
if len(removedServiceKeys) > 0 || len(removedServiceNodeKeys) > 0 || len(addedServiceKeys) > 0 || len(addedServiceNodeKeys) > 0 {
|
||||||
log.WithField("MissingServices", removedServiceKeys).WithField("DiscoveredServices", addedServiceKeys).Debug("Catalog Services change detected.")
|
log.WithField("MissingServices", removedServiceKeys).WithField("DiscoveredServices", addedServiceKeys).Debug("Catalog Services change detected.")
|
||||||
watchCh <- data
|
watchCh <- data
|
||||||
flashback = make(map[string]Service, len(current))
|
flashback = current
|
||||||
for key, value := range current {
|
|
||||||
flashback[key] = value
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
|
@ -46,13 +46,13 @@ var _ provider.Provider = (*Provider)(nil)
|
||||||
|
|
||||||
// Provider holds configurations of the provider.
|
// Provider holds configurations of the provider.
|
||||||
type Provider struct {
|
type Provider struct {
|
||||||
provider.BaseProvider `mapstructure:",squash"`
|
provider.BaseProvider `mapstructure:",squash" export:"true"`
|
||||||
Endpoint string `description:"Docker server endpoint. Can be a tcp or a unix socket endpoint"`
|
Endpoint string `description:"Docker server endpoint. Can be a tcp or a unix socket endpoint"`
|
||||||
Domain string `description:"Default domain used"`
|
Domain string `description:"Default domain used"`
|
||||||
TLS *types.ClientTLS `description:"Enable Docker TLS support"`
|
TLS *types.ClientTLS `description:"Enable Docker TLS support" export:"true"`
|
||||||
ExposedByDefault bool `description:"Expose containers by default"`
|
ExposedByDefault bool `description:"Expose containers by default" export:"true"`
|
||||||
UseBindPortIP bool `description:"Use the ip address from the bound port, rather than from the inner network"`
|
UseBindPortIP bool `description:"Use the ip address from the bound port, rather than from the inner network" export:"true"`
|
||||||
SwarmMode bool `description:"Use Docker on Swarm Mode"`
|
SwarmMode bool `description:"Use Docker on Swarm Mode" export:"true"`
|
||||||
}
|
}
|
||||||
|
|
||||||
// dockerData holds the need data to the Provider p
|
// dockerData holds the need data to the Provider p
|
||||||
|
|
|
@ -24,14 +24,13 @@ var _ provider.Provider = (*Provider)(nil)
|
||||||
|
|
||||||
// Provider holds configuration for provider.
|
// Provider holds configuration for provider.
|
||||||
type Provider struct {
|
type Provider struct {
|
||||||
provider.BaseProvider `mapstructure:",squash"`
|
provider.BaseProvider `mapstructure:",squash" export:"true"`
|
||||||
|
AccessKeyID string `description:"The AWS credentials access key to use for making requests"`
|
||||||
AccessKeyID string `description:"The AWS credentials access key to use for making requests"`
|
RefreshSeconds int `description:"Polling interval (in seconds)" export:"true"`
|
||||||
RefreshSeconds int `description:"Polling interval (in seconds)"`
|
Region string `description:"The AWS region to use for requests" export:"true"`
|
||||||
Region string `description:"The AWS region to use for requests"`
|
SecretAccessKey string `description:"The AWS credentials secret key to use for making requests"`
|
||||||
SecretAccessKey string `description:"The AWS credentals secret key to use for making requests"`
|
TableName string `description:"The AWS dynamodb table that stores configuration for traefik" export:"true"`
|
||||||
TableName string `description:"The AWS dynamodb table that stores configuration for traefik"`
|
Endpoint string `description:"The endpoint of a dynamodb. Used for testing with a local dynamodb"`
|
||||||
Endpoint string `description:"The endpoint of a dynamodb. Used for testing with a local dynamodb"`
|
|
||||||
}
|
}
|
||||||
|
|
||||||
type dynamoClient struct {
|
type dynamoClient struct {
|
||||||
|
|
|
@ -29,17 +29,17 @@ var _ provider.Provider = (*Provider)(nil)
|
||||||
|
|
||||||
// Provider holds configurations of the provider.
|
// Provider holds configurations of the provider.
|
||||||
type Provider struct {
|
type Provider struct {
|
||||||
provider.BaseProvider `mapstructure:",squash"`
|
provider.BaseProvider `mapstructure:",squash" export:"true"`
|
||||||
|
|
||||||
Domain string `description:"Default domain used"`
|
Domain string `description:"Default domain used"`
|
||||||
ExposedByDefault bool `description:"Expose containers by default"`
|
ExposedByDefault bool `description:"Expose containers by default" export:"true"`
|
||||||
RefreshSeconds int `description:"Polling interval (in seconds)"`
|
RefreshSeconds int `description:"Polling interval (in seconds)" export:"true"`
|
||||||
|
|
||||||
// Provider lookup parameters
|
// Provider lookup parameters
|
||||||
Clusters Clusters `description:"ECS Clusters name"`
|
Clusters Clusters `description:"ECS Clusters name"`
|
||||||
Cluster string `description:"deprecated - ECS Cluster name"` // deprecated
|
Cluster string `description:"deprecated - ECS Cluster name"` // deprecated
|
||||||
AutoDiscoverClusters bool `description:"Auto discover cluster"`
|
AutoDiscoverClusters bool `description:"Auto discover cluster" export:"true"`
|
||||||
Region string `description:"The AWS region to use for requests"`
|
Region string `description:"The AWS region to use for requests" export:"true"`
|
||||||
AccessKeyID string `description:"The AWS credentials access key to use for making requests"`
|
AccessKeyID string `description:"The AWS credentials access key to use for making requests"`
|
||||||
SecretAccessKey string `description:"The AWS credentials access key to use for making requests"`
|
SecretAccessKey string `description:"The AWS credentials access key to use for making requests"`
|
||||||
}
|
}
|
||||||
|
@ -214,7 +214,6 @@ func (p *Provider) loadECSConfig(ctx context.Context, client *awsClient) (*types
|
||||||
// Find all running Provider tasks in a cluster, also collect the task definitions (for docker labels)
|
// Find all running Provider tasks in a cluster, also collect the task definitions (for docker labels)
|
||||||
// and the EC2 instance data
|
// and the EC2 instance data
|
||||||
func (p *Provider) listInstances(ctx context.Context, client *awsClient) ([]ecsInstance, error) {
|
func (p *Provider) listInstances(ctx context.Context, client *awsClient) ([]ecsInstance, error) {
|
||||||
var taskArns []*string
|
|
||||||
var instances []ecsInstance
|
var instances []ecsInstance
|
||||||
var clustersArn []*string
|
var clustersArn []*string
|
||||||
var clusters Clusters
|
var clusters Clusters
|
||||||
|
@ -255,6 +254,8 @@ func (p *Provider) listInstances(ctx context.Context, client *awsClient) ([]ecsI
|
||||||
DesiredStatus: aws.String(ecs.DesiredStatusRunning),
|
DesiredStatus: aws.String(ecs.DesiredStatusRunning),
|
||||||
})
|
})
|
||||||
|
|
||||||
|
var taskArns []*string
|
||||||
|
|
||||||
for ; req != nil; req = req.NextPage() {
|
for ; req != nil; req = req.NextPage() {
|
||||||
if err := wrapAws(ctx, req); err != nil {
|
if err := wrapAws(ctx, req); err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
|
@ -263,12 +264,10 @@ func (p *Provider) listInstances(ctx context.Context, client *awsClient) ([]ecsI
|
||||||
taskArns = append(taskArns, req.Data.(*ecs.ListTasksOutput).TaskArns...)
|
taskArns = append(taskArns, req.Data.(*ecs.ListTasksOutput).TaskArns...)
|
||||||
}
|
}
|
||||||
|
|
||||||
// Early return: if we can't list tasks we have nothing to
|
// Skip to the next cluster if there are no tasks found on
|
||||||
// describe below - likely empty cluster/permissions are bad. This
|
// this cluster.
|
||||||
// stops the AWS API from returning a 401 when you DescribeTasks
|
|
||||||
// with no input.
|
|
||||||
if len(taskArns) == 0 {
|
if len(taskArns) == 0 {
|
||||||
return []ecsInstance{}, nil
|
continue
|
||||||
}
|
}
|
||||||
|
|
||||||
chunkedTaskArns := p.chunkedTaskArns(taskArns)
|
chunkedTaskArns := p.chunkedTaskArns(taskArns)
|
||||||
|
|
|
@ -15,7 +15,7 @@ var _ provider.Provider = (*Provider)(nil)
|
||||||
|
|
||||||
// Provider holds configurations of the provider.
|
// Provider holds configurations of the provider.
|
||||||
type Provider struct {
|
type Provider struct {
|
||||||
kv.Provider `mapstructure:",squash"`
|
kv.Provider `mapstructure:",squash" export:"true"`
|
||||||
}
|
}
|
||||||
|
|
||||||
// Provide allows the etcd provider to Provide configurations to traefik
|
// Provide allows the etcd provider to Provide configurations to traefik
|
||||||
|
|
|
@ -18,9 +18,9 @@ import (
|
||||||
|
|
||||||
// Provider holds configuration of the Provider provider.
|
// Provider holds configuration of the Provider provider.
|
||||||
type Provider struct {
|
type Provider struct {
|
||||||
provider.BaseProvider `mapstructure:",squash"`
|
provider.BaseProvider `mapstructure:",squash" export:"true"`
|
||||||
Endpoint string `description:"Eureka server endpoint"`
|
Endpoint string `description:"Eureka server endpoint"`
|
||||||
Delay string `description:"Override default configuration time between refresh"`
|
Delay string `description:"Override default configuration time between refresh" export:"true"`
|
||||||
}
|
}
|
||||||
|
|
||||||
// Provide allows the eureka provider to provide configurations to traefik
|
// Provide allows the eureka provider to provide configurations to traefik
|
||||||
|
|
|
@ -18,8 +18,8 @@ var _ provider.Provider = (*Provider)(nil)
|
||||||
|
|
||||||
// Provider holds configurations of the provider.
|
// Provider holds configurations of the provider.
|
||||||
type Provider struct {
|
type Provider struct {
|
||||||
provider.BaseProvider `mapstructure:",squash"`
|
provider.BaseProvider `mapstructure:",squash" export:"true"`
|
||||||
Directory string `description:"Load configuration from one or more .toml files in a directory"`
|
Directory string `description:"Load configuration from one or more .toml files in a directory" export:"true"`
|
||||||
}
|
}
|
||||||
|
|
||||||
// Provide allows the file provider to provide configurations to traefik
|
// Provide allows the file provider to provide configurations to traefik
|
||||||
|
|
|
@ -42,13 +42,13 @@ const traefikDefaultRealm = "traefik"
|
||||||
|
|
||||||
// Provider holds configurations of the provider.
|
// Provider holds configurations of the provider.
|
||||||
type Provider struct {
|
type Provider struct {
|
||||||
provider.BaseProvider `mapstructure:",squash"`
|
provider.BaseProvider `mapstructure:",squash" export:"true"`
|
||||||
Endpoint string `description:"Kubernetes server endpoint (required for external cluster client)"`
|
Endpoint string `description:"Kubernetes server endpoint (required for external cluster client)"`
|
||||||
Token string `description:"Kubernetes bearer token (not needed for in-cluster client)"`
|
Token string `description:"Kubernetes bearer token (not needed for in-cluster client)"`
|
||||||
CertAuthFilePath string `description:"Kubernetes certificate authority file path (not needed for in-cluster client)"`
|
CertAuthFilePath string `description:"Kubernetes certificate authority file path (not needed for in-cluster client)"`
|
||||||
DisablePassHostHeaders bool `description:"Kubernetes disable PassHost Headers"`
|
DisablePassHostHeaders bool `description:"Kubernetes disable PassHost Headers" export:"true"`
|
||||||
Namespaces Namespaces `description:"Kubernetes namespaces"`
|
Namespaces Namespaces `description:"Kubernetes namespaces" export:"true"`
|
||||||
LabelSelector string `description:"Kubernetes api label selector to use"`
|
LabelSelector string `description:"Kubernetes api label selector to use" export:"true"`
|
||||||
lastConfiguration safe.Safe
|
lastConfiguration safe.Safe
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -20,10 +20,10 @@ import (
|
||||||
|
|
||||||
// Provider holds common configurations of key-value providers.
|
// Provider holds common configurations of key-value providers.
|
||||||
type Provider struct {
|
type Provider struct {
|
||||||
provider.BaseProvider `mapstructure:",squash"`
|
provider.BaseProvider `mapstructure:",squash" export:"true"`
|
||||||
Endpoint string `description:"Comma separated server endpoints"`
|
Endpoint string `description:"Comma separated server endpoints"`
|
||||||
Prefix string `description:"Prefix used for KV store"`
|
Prefix string `description:"Prefix used for KV store" export:"true"`
|
||||||
TLS *types.ClientTLS `description:"Enable TLS support"`
|
TLS *types.ClientTLS `description:"Enable TLS support" export:"true"`
|
||||||
Username string `description:"KV Username"`
|
Username string `description:"KV Username"`
|
||||||
Password string `description:"KV Password"`
|
Password string `description:"KV Password"`
|
||||||
storeType store.Backend
|
storeType store.Backend
|
||||||
|
|
|
@ -53,18 +53,18 @@ var servicesPropertiesRegexp = regexp.MustCompile(`^traefik\.(?P<service_name>.+
|
||||||
// Provider holds configuration of the provider.
|
// Provider holds configuration of the provider.
|
||||||
type Provider struct {
|
type Provider struct {
|
||||||
provider.BaseProvider
|
provider.BaseProvider
|
||||||
Endpoint string `description:"Marathon server endpoint. You can also specify multiple endpoint for Marathon"`
|
Endpoint string `description:"Marathon server endpoint. You can also specify multiple endpoint for Marathon" export:"true"`
|
||||||
Domain string `description:"Default domain used"`
|
Domain string `description:"Default domain used" export:"true"`
|
||||||
ExposedByDefault bool `description:"Expose Marathon apps by default"`
|
ExposedByDefault bool `description:"Expose Marathon apps by default" export:"true"`
|
||||||
GroupsAsSubDomains bool `description:"Convert Marathon groups to subdomains"`
|
GroupsAsSubDomains bool `description:"Convert Marathon groups to subdomains" export:"true"`
|
||||||
DCOSToken string `description:"DCOSToken for DCOS environment, This will override the Authorization header"`
|
DCOSToken string `description:"DCOSToken for DCOS environment, This will override the Authorization header" export:"true"`
|
||||||
MarathonLBCompatibility bool `description:"Add compatibility with marathon-lb labels"`
|
MarathonLBCompatibility bool `description:"Add compatibility with marathon-lb labels" export:"true"`
|
||||||
TLS *types.ClientTLS `description:"Enable Docker TLS support"`
|
TLS *types.ClientTLS `description:"Enable Docker TLS support" export:"true"`
|
||||||
DialerTimeout flaeg.Duration `description:"Set a non-default connection timeout for Marathon"`
|
DialerTimeout flaeg.Duration `description:"Set a non-default connection timeout for Marathon" export:"true"`
|
||||||
KeepAlive flaeg.Duration `description:"Set a non-default TCP Keep Alive time in seconds"`
|
KeepAlive flaeg.Duration `description:"Set a non-default TCP Keep Alive time in seconds" export:"true"`
|
||||||
ForceTaskHostname bool `description:"Force to use the task's hostname."`
|
ForceTaskHostname bool `description:"Force to use the task's hostname." export:"true"`
|
||||||
Basic *Basic `description:"Enable basic authentication"`
|
Basic *Basic `description:"Enable basic authentication" export:"true"`
|
||||||
RespectReadinessChecks bool `description:"Filter out tasks with non-successful readiness checks during deployments"`
|
RespectReadinessChecks bool `description:"Filter out tasks with non-successful readiness checks during deployments" export:"true"`
|
||||||
readyChecker *readinessChecker
|
readyChecker *readinessChecker
|
||||||
marathonClient marathon.Marathon
|
marathonClient marathon.Marathon
|
||||||
}
|
}
|
||||||
|
|
|
@ -32,12 +32,12 @@ type Provider struct {
|
||||||
provider.BaseProvider
|
provider.BaseProvider
|
||||||
Endpoint string `description:"Mesos server endpoint. You can also specify multiple endpoint for Mesos"`
|
Endpoint string `description:"Mesos server endpoint. You can also specify multiple endpoint for Mesos"`
|
||||||
Domain string `description:"Default domain used"`
|
Domain string `description:"Default domain used"`
|
||||||
ExposedByDefault bool `description:"Expose Mesos apps by default"`
|
ExposedByDefault bool `description:"Expose Mesos apps by default" export:"true"`
|
||||||
GroupsAsSubDomains bool `description:"Convert Mesos groups to subdomains"`
|
GroupsAsSubDomains bool `description:"Convert Mesos groups to subdomains" export:"true"`
|
||||||
ZkDetectionTimeout int `description:"Zookeeper timeout (in seconds)"`
|
ZkDetectionTimeout int `description:"Zookeeper timeout (in seconds)" export:"true"`
|
||||||
RefreshSeconds int `description:"Polling interval (in seconds)"`
|
RefreshSeconds int `description:"Polling interval (in seconds)" export:"true"`
|
||||||
IPSources string `description:"IPSources (e.g. host, docker, mesos, rkt)"` // e.g. "host", "docker", "mesos", "rkt"
|
IPSources string `description:"IPSources (e.g. host, docker, mesos, rkt)" export:"true"`
|
||||||
StateTimeoutSecond int `description:"HTTP Timeout (in seconds)"`
|
StateTimeoutSecond int `description:"HTTP Timeout (in seconds)" export:"true"`
|
||||||
Masters []string
|
Masters []string
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -24,11 +24,11 @@ type Provider interface {
|
||||||
|
|
||||||
// BaseProvider should be inherited by providers
|
// BaseProvider should be inherited by providers
|
||||||
type BaseProvider struct {
|
type BaseProvider struct {
|
||||||
Watch bool `description:"Watch provider"`
|
Watch bool `description:"Watch provider" export:"true"`
|
||||||
Filename string `description:"Override default configuration template. For advanced users :)"`
|
Filename string `description:"Override default configuration template. For advanced users :)" export:"true"`
|
||||||
Constraints types.Constraints `description:"Filter services by constraint, matching with Traefik tags."`
|
Constraints types.Constraints `description:"Filter services by constraint, matching with Traefik tags." export:"true"`
|
||||||
Trace bool `description:"Display additional provider logs (if available)."`
|
Trace bool `description:"Display additional provider logs (if available)." export:"true"`
|
||||||
DebugLogGeneratedTemplate bool `description:"Enable debug logging of generated configuration template."`
|
DebugLogGeneratedTemplate bool `description:"Enable debug logging of generated configuration template." export:"true"`
|
||||||
}
|
}
|
||||||
|
|
||||||
// MatchConstraints must match with EVERY single contraint
|
// MatchConstraints must match with EVERY single contraint
|
||||||
|
|
|
@ -18,14 +18,14 @@ var _ provider.Provider = (*Provider)(nil)
|
||||||
|
|
||||||
// Provider holds configurations of the provider.
|
// Provider holds configurations of the provider.
|
||||||
type Provider struct {
|
type Provider struct {
|
||||||
provider.BaseProvider `mapstructure:",squash"`
|
provider.BaseProvider `mapstructure:",squash" export:"true"`
|
||||||
APIConfiguration `mapstructure:",squash"` // Provide backwards compatibility
|
APIConfiguration `mapstructure:",squash" export:"true"` // Provide backwards compatibility
|
||||||
API *APIConfiguration `description:"Enable the Rancher API provider"`
|
API *APIConfiguration `description:"Enable the Rancher API provider" export:"true"`
|
||||||
Metadata *MetadataConfiguration `description:"Enable the Rancher metadata service provider"`
|
Metadata *MetadataConfiguration `description:"Enable the Rancher metadata service provider" export:"true"`
|
||||||
Domain string `description:"Default domain used"`
|
Domain string `description:"Default domain used"`
|
||||||
RefreshSeconds int `description:"Polling interval (in seconds)"`
|
RefreshSeconds int `description:"Polling interval (in seconds)" export:"true"`
|
||||||
ExposedByDefault bool `description:"Expose services by default"`
|
ExposedByDefault bool `description:"Expose services by default" export:"true"`
|
||||||
EnableServiceHealthFilter bool `description:"Filter services with unhealthy states and inactive states"`
|
EnableServiceHealthFilter bool `description:"Filter services with unhealthy states and inactive states" export:"true"`
|
||||||
}
|
}
|
||||||
|
|
||||||
type rancherData struct {
|
type rancherData struct {
|
||||||
|
|
|
@ -25,15 +25,15 @@ import (
|
||||||
|
|
||||||
// Provider is a provider.Provider implementation that provides the UI
|
// Provider is a provider.Provider implementation that provides the UI
|
||||||
type Provider struct {
|
type Provider struct {
|
||||||
Address string `description:"Web administration port"`
|
Address string `description:"Web administration port" export:"true"`
|
||||||
CertFile string `description:"SSL certificate"`
|
CertFile string `description:"SSL certificate" export:"true"`
|
||||||
KeyFile string `description:"SSL certificate"`
|
KeyFile string `description:"SSL certificate" export:"true"`
|
||||||
ReadOnly bool `description:"Enable read only API"`
|
ReadOnly bool `description:"Enable read only API" export:"true"`
|
||||||
Statistics *types.Statistics `description:"Enable more detailed statistics"`
|
Statistics *types.Statistics `description:"Enable more detailed statistics" export:"true"`
|
||||||
Metrics *types.Metrics `description:"Enable a metrics exporter"`
|
Metrics *types.Metrics `description:"Enable a metrics exporter" export:"true"`
|
||||||
Path string `description:"Root path for dashboard and API"`
|
Path string `description:"Root path for dashboard and API"`
|
||||||
Auth *types.Auth
|
Auth *types.Auth `export:"true"`
|
||||||
Debug bool
|
Debug bool `export:"true"`
|
||||||
CurrentConfigurations *safe.Safe
|
CurrentConfigurations *safe.Safe
|
||||||
Stats *thoas_stats.Stats
|
Stats *thoas_stats.Stats
|
||||||
StatsRecorder *middlewares.StatsRecorder
|
StatsRecorder *middlewares.StatsRecorder
|
||||||
|
|
|
@ -15,7 +15,7 @@ var _ provider.Provider = (*Provider)(nil)
|
||||||
|
|
||||||
// Provider holds configurations of the provider.
|
// Provider holds configurations of the provider.
|
||||||
type Provider struct {
|
type Provider struct {
|
||||||
kv.Provider `mapstructure:",squash"`
|
kv.Provider `mapstructure:",squash" export:"true"`
|
||||||
}
|
}
|
||||||
|
|
||||||
// Provide allows the zk provider to Provide configurations to traefik
|
// Provide allows the zk provider to Provide configurations to traefik
|
||||||
|
|
14
server/uuid/uuid.go
Normal file
14
server/uuid/uuid.go
Normal file
|
@ -0,0 +1,14 @@
|
||||||
|
package uuid
|
||||||
|
|
||||||
|
import guuid "github.com/satori/go.uuid"
|
||||||
|
|
||||||
|
var uuid string
|
||||||
|
|
||||||
|
func init() {
|
||||||
|
uuid = guuid.NewV4().String()
|
||||||
|
}
|
||||||
|
|
||||||
|
// Get the instance UUID
|
||||||
|
func Get() string {
|
||||||
|
return uuid
|
||||||
|
}
|
|
@ -192,13 +192,13 @@ type ConfigMessage struct {
|
||||||
Configuration *Configuration
|
Configuration *Configuration
|
||||||
}
|
}
|
||||||
|
|
||||||
// Constraint hold a parsed constraint expresssion
|
// Constraint hold a parsed constraint expression
|
||||||
type Constraint struct {
|
type Constraint struct {
|
||||||
Key string
|
Key string `export:"true"`
|
||||||
// MustMatch is true if operator is "==" or false if operator is "!="
|
// MustMatch is true if operator is "==" or false if operator is "!="
|
||||||
MustMatch bool
|
MustMatch bool `export:"true"`
|
||||||
// TODO: support regex
|
// TODO: support regex
|
||||||
Regex string
|
Regex string `export:"true"`
|
||||||
}
|
}
|
||||||
|
|
||||||
// NewConstraint receive a string and return a *Constraint, after checking syntax and parsing the constraint expression
|
// NewConstraint receive a string and return a *Constraint, after checking syntax and parsing the constraint expression
|
||||||
|
@ -213,14 +213,14 @@ func NewConstraint(exp string) (*Constraint, error) {
|
||||||
sep = "!="
|
sep = "!="
|
||||||
constraint.MustMatch = false
|
constraint.MustMatch = false
|
||||||
} else {
|
} else {
|
||||||
return nil, errors.New("Constraint expression missing valid operator: '==' or '!='")
|
return nil, errors.New("constraint expression missing valid operator: '==' or '!='")
|
||||||
}
|
}
|
||||||
|
|
||||||
kv := strings.SplitN(exp, sep, 2)
|
kv := strings.SplitN(exp, sep, 2)
|
||||||
if len(kv) == 2 {
|
if len(kv) == 2 {
|
||||||
// At the moment, it only supports tags
|
// At the moment, it only supports tags
|
||||||
if kv[0] != "tag" {
|
if kv[0] != "tag" {
|
||||||
return nil, errors.New("Constraint must be tag-based. Syntax: tag==us-*")
|
return nil, errors.New("constraint must be tag-based. Syntax: tag==us-*")
|
||||||
}
|
}
|
||||||
|
|
||||||
constraint.Key = kv[0]
|
constraint.Key = kv[0]
|
||||||
|
@ -228,7 +228,7 @@ func NewConstraint(exp string) (*Constraint, error) {
|
||||||
return constraint, nil
|
return constraint, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
return nil, errors.New("Incorrect constraint expression: " + exp)
|
return nil, fmt.Errorf("incorrect constraint expression: %s", exp)
|
||||||
}
|
}
|
||||||
|
|
||||||
func (c *Constraint) String() string {
|
func (c *Constraint) String() string {
|
||||||
|
@ -273,7 +273,7 @@ func (c *Constraint) MatchConstraintWithAtLeastOneTag(tags []string) bool {
|
||||||
func (cs *Constraints) Set(str string) error {
|
func (cs *Constraints) Set(str string) error {
|
||||||
exps := strings.Split(str, ",")
|
exps := strings.Split(str, ",")
|
||||||
if len(exps) == 0 {
|
if len(exps) == 0 {
|
||||||
return errors.New("Bad Constraint format: " + str)
|
return fmt.Errorf("bad Constraint format: %s", str)
|
||||||
}
|
}
|
||||||
for _, exp := range exps {
|
for _, exp := range exps {
|
||||||
constraint, err := NewConstraint(exp)
|
constraint, err := NewConstraint(exp)
|
||||||
|
@ -307,21 +307,22 @@ func (cs *Constraints) Type() string {
|
||||||
// Store holds KV store cluster config
|
// Store holds KV store cluster config
|
||||||
type Store struct {
|
type Store struct {
|
||||||
store.Store
|
store.Store
|
||||||
Prefix string // like this "prefix" (without the /)
|
// like this "prefix" (without the /)
|
||||||
|
Prefix string `export:"true"`
|
||||||
}
|
}
|
||||||
|
|
||||||
// Cluster holds cluster config
|
// Cluster holds cluster config
|
||||||
type Cluster struct {
|
type Cluster struct {
|
||||||
Node string `description:"Node name"`
|
Node string `description:"Node name" export:"true"`
|
||||||
Store *Store
|
Store *Store `export:"true"`
|
||||||
}
|
}
|
||||||
|
|
||||||
// Auth holds authentication configuration (BASIC, DIGEST, users)
|
// Auth holds authentication configuration (BASIC, DIGEST, users)
|
||||||
type Auth struct {
|
type Auth struct {
|
||||||
Basic *Basic
|
Basic *Basic `export:"true"`
|
||||||
Digest *Digest
|
Digest *Digest `export:"true"`
|
||||||
Forward *Forward
|
Forward *Forward `export:"true"`
|
||||||
HeaderField string
|
HeaderField string `export:"true"`
|
||||||
}
|
}
|
||||||
|
|
||||||
// Users authentication users
|
// Users authentication users
|
||||||
|
@ -342,8 +343,8 @@ type Digest struct {
|
||||||
// Forward authentication
|
// Forward authentication
|
||||||
type Forward struct {
|
type Forward struct {
|
||||||
Address string `description:"Authentication server address"`
|
Address string `description:"Authentication server address"`
|
||||||
TLS *ClientTLS `description:"Enable TLS support"`
|
TLS *ClientTLS `description:"Enable TLS support" export:"true"`
|
||||||
TrustForwardHeader bool `description:"Trust X-Forwarded-* headers"`
|
TrustForwardHeader bool `description:"Trust X-Forwarded-* headers" export:"true"`
|
||||||
}
|
}
|
||||||
|
|
||||||
// CanonicalDomain returns a lower case domain with trim space
|
// CanonicalDomain returns a lower case domain with trim space
|
||||||
|
@ -353,31 +354,31 @@ func CanonicalDomain(domain string) string {
|
||||||
|
|
||||||
// Statistics provides options for monitoring request and response stats
|
// Statistics provides options for monitoring request and response stats
|
||||||
type Statistics struct {
|
type Statistics struct {
|
||||||
RecentErrors int `description:"Number of recent errors logged"`
|
RecentErrors int `description:"Number of recent errors logged" export:"true"`
|
||||||
}
|
}
|
||||||
|
|
||||||
// Metrics provides options to expose and send Traefik metrics to different third party monitoring systems
|
// Metrics provides options to expose and send Traefik metrics to different third party monitoring systems
|
||||||
type Metrics struct {
|
type Metrics struct {
|
||||||
Prometheus *Prometheus `description:"Prometheus metrics exporter type"`
|
Prometheus *Prometheus `description:"Prometheus metrics exporter type" export:"true"`
|
||||||
Datadog *Datadog `description:"DataDog metrics exporter type"`
|
Datadog *Datadog `description:"DataDog metrics exporter type" export:"true"`
|
||||||
StatsD *Statsd `description:"StatsD metrics exporter type"`
|
StatsD *Statsd `description:"StatsD metrics exporter type" export:"true"`
|
||||||
}
|
}
|
||||||
|
|
||||||
// Prometheus can contain specific configuration used by the Prometheus Metrics exporter
|
// Prometheus can contain specific configuration used by the Prometheus Metrics exporter
|
||||||
type Prometheus struct {
|
type Prometheus struct {
|
||||||
Buckets Buckets `description:"Buckets for latency metrics"`
|
Buckets Buckets `description:"Buckets for latency metrics" export:"true"`
|
||||||
}
|
}
|
||||||
|
|
||||||
// Datadog contains address and metrics pushing interval configuration
|
// Datadog contains address and metrics pushing interval configuration
|
||||||
type Datadog struct {
|
type Datadog struct {
|
||||||
Address string `description:"DataDog's address"`
|
Address string `description:"DataDog's address"`
|
||||||
PushInterval string `description:"DataDog push interval"`
|
PushInterval string `description:"DataDog push interval" export:"true"`
|
||||||
}
|
}
|
||||||
|
|
||||||
// Statsd contains address and metrics pushing interval configuration
|
// Statsd contains address and metrics pushing interval configuration
|
||||||
type Statsd struct {
|
type Statsd struct {
|
||||||
Address string `description:"StatsD address"`
|
Address string `description:"StatsD address"`
|
||||||
PushInterval string `description:"DataDog push interval"`
|
PushInterval string `description:"DataDog push interval" export:"true"`
|
||||||
}
|
}
|
||||||
|
|
||||||
// Buckets holds Prometheus Buckets
|
// Buckets holds Prometheus Buckets
|
||||||
|
@ -420,8 +421,8 @@ type TraefikLog struct {
|
||||||
|
|
||||||
// AccessLog holds the configuration settings for the access logger (middlewares/accesslog).
|
// AccessLog holds the configuration settings for the access logger (middlewares/accesslog).
|
||||||
type AccessLog struct {
|
type AccessLog struct {
|
||||||
FilePath string `json:"file,omitempty" description:"Access log file path. Stdout is used when omitted or empty"`
|
FilePath string `json:"file,omitempty" description:"Access log file path. Stdout is used when omitted or empty" export:"true"`
|
||||||
Format string `json:"format,omitempty" description:"Access log format: json | common"`
|
Format string `json:"format,omitempty" description:"Access log format: json | common" export:"true"`
|
||||||
}
|
}
|
||||||
|
|
||||||
// ClientTLS holds TLS specific configurations as client
|
// ClientTLS holds TLS specific configurations as client
|
||||||
|
|
21
vendor/github.com/mitchellh/copystructure/LICENSE
generated
vendored
Normal file
21
vendor/github.com/mitchellh/copystructure/LICENSE
generated
vendored
Normal file
|
@ -0,0 +1,21 @@
|
||||||
|
The MIT License (MIT)
|
||||||
|
|
||||||
|
Copyright (c) 2014 Mitchell Hashimoto
|
||||||
|
|
||||||
|
Permission is hereby granted, free of charge, to any person obtaining a copy
|
||||||
|
of this software and associated documentation files (the "Software"), to deal
|
||||||
|
in the Software without restriction, including without limitation the rights
|
||||||
|
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
|
||||||
|
copies of the Software, and to permit persons to whom the Software is
|
||||||
|
furnished to do so, subject to the following conditions:
|
||||||
|
|
||||||
|
The above copyright notice and this permission notice shall be included in
|
||||||
|
all copies or substantial portions of the Software.
|
||||||
|
|
||||||
|
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||||
|
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||||
|
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
|
||||||
|
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
||||||
|
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
||||||
|
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
|
||||||
|
THE SOFTWARE.
|
15
vendor/github.com/mitchellh/copystructure/copier_time.go
generated
vendored
Normal file
15
vendor/github.com/mitchellh/copystructure/copier_time.go
generated
vendored
Normal file
|
@ -0,0 +1,15 @@
|
||||||
|
package copystructure
|
||||||
|
|
||||||
|
import (
|
||||||
|
"reflect"
|
||||||
|
"time"
|
||||||
|
)
|
||||||
|
|
||||||
|
func init() {
|
||||||
|
Copiers[reflect.TypeOf(time.Time{})] = timeCopier
|
||||||
|
}
|
||||||
|
|
||||||
|
func timeCopier(v interface{}) (interface{}, error) {
|
||||||
|
// Just... copy it.
|
||||||
|
return v.(time.Time), nil
|
||||||
|
}
|
548
vendor/github.com/mitchellh/copystructure/copystructure.go
generated
vendored
Normal file
548
vendor/github.com/mitchellh/copystructure/copystructure.go
generated
vendored
Normal file
|
@ -0,0 +1,548 @@
|
||||||
|
package copystructure
|
||||||
|
|
||||||
|
import (
|
||||||
|
"errors"
|
||||||
|
"reflect"
|
||||||
|
"sync"
|
||||||
|
|
||||||
|
"github.com/mitchellh/reflectwalk"
|
||||||
|
)
|
||||||
|
|
||||||
|
// Copy returns a deep copy of v.
|
||||||
|
func Copy(v interface{}) (interface{}, error) {
|
||||||
|
return Config{}.Copy(v)
|
||||||
|
}
|
||||||
|
|
||||||
|
// CopierFunc is a function that knows how to deep copy a specific type.
|
||||||
|
// Register these globally with the Copiers variable.
|
||||||
|
type CopierFunc func(interface{}) (interface{}, error)
|
||||||
|
|
||||||
|
// Copiers is a map of types that behave specially when they are copied.
|
||||||
|
// If a type is found in this map while deep copying, this function
|
||||||
|
// will be called to copy it instead of attempting to copy all fields.
|
||||||
|
//
|
||||||
|
// The key should be the type, obtained using: reflect.TypeOf(value with type).
|
||||||
|
//
|
||||||
|
// It is unsafe to write to this map after Copies have started. If you
|
||||||
|
// are writing to this map while also copying, wrap all modifications to
|
||||||
|
// this map as well as to Copy in a mutex.
|
||||||
|
var Copiers map[reflect.Type]CopierFunc = make(map[reflect.Type]CopierFunc)
|
||||||
|
|
||||||
|
// Must is a helper that wraps a call to a function returning
|
||||||
|
// (interface{}, error) and panics if the error is non-nil. It is intended
|
||||||
|
// for use in variable initializations and should only be used when a copy
|
||||||
|
// error should be a crashing case.
|
||||||
|
func Must(v interface{}, err error) interface{} {
|
||||||
|
if err != nil {
|
||||||
|
panic("copy error: " + err.Error())
|
||||||
|
}
|
||||||
|
|
||||||
|
return v
|
||||||
|
}
|
||||||
|
|
||||||
|
var errPointerRequired = errors.New("Copy argument must be a pointer when Lock is true")
|
||||||
|
|
||||||
|
type Config struct {
|
||||||
|
// Lock any types that are a sync.Locker and are not a mutex while copying.
|
||||||
|
// If there is an RLocker method, use that to get the sync.Locker.
|
||||||
|
Lock bool
|
||||||
|
|
||||||
|
// Copiers is a map of types associated with a CopierFunc. Use the global
|
||||||
|
// Copiers map if this is nil.
|
||||||
|
Copiers map[reflect.Type]CopierFunc
|
||||||
|
}
|
||||||
|
|
||||||
|
func (c Config) Copy(v interface{}) (interface{}, error) {
|
||||||
|
if c.Lock && reflect.ValueOf(v).Kind() != reflect.Ptr {
|
||||||
|
return nil, errPointerRequired
|
||||||
|
}
|
||||||
|
|
||||||
|
w := new(walker)
|
||||||
|
if c.Lock {
|
||||||
|
w.useLocks = true
|
||||||
|
}
|
||||||
|
|
||||||
|
if c.Copiers == nil {
|
||||||
|
c.Copiers = Copiers
|
||||||
|
}
|
||||||
|
|
||||||
|
err := reflectwalk.Walk(v, w)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
// Get the result. If the result is nil, then we want to turn it
|
||||||
|
// into a typed nil if we can.
|
||||||
|
result := w.Result
|
||||||
|
if result == nil {
|
||||||
|
val := reflect.ValueOf(v)
|
||||||
|
result = reflect.Indirect(reflect.New(val.Type())).Interface()
|
||||||
|
}
|
||||||
|
|
||||||
|
return result, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// Return the key used to index interfaces types we've seen. Store the number
|
||||||
|
// of pointers in the upper 32bits, and the depth in the lower 32bits. This is
|
||||||
|
// easy to calculate, easy to match a key with our current depth, and we don't
|
||||||
|
// need to deal with initializing and cleaning up nested maps or slices.
|
||||||
|
func ifaceKey(pointers, depth int) uint64 {
|
||||||
|
return uint64(pointers)<<32 | uint64(depth)
|
||||||
|
}
|
||||||
|
|
||||||
|
type walker struct {
|
||||||
|
Result interface{}
|
||||||
|
|
||||||
|
depth int
|
||||||
|
ignoreDepth int
|
||||||
|
vals []reflect.Value
|
||||||
|
cs []reflect.Value
|
||||||
|
|
||||||
|
// This stores the number of pointers we've walked over, indexed by depth.
|
||||||
|
ps []int
|
||||||
|
|
||||||
|
// If an interface is indirected by a pointer, we need to know the type of
|
||||||
|
// interface to create when creating the new value. Store the interface
|
||||||
|
// types here, indexed by both the walk depth and the number of pointers
|
||||||
|
// already seen at that depth. Use ifaceKey to calculate the proper uint64
|
||||||
|
// value.
|
||||||
|
ifaceTypes map[uint64]reflect.Type
|
||||||
|
|
||||||
|
// any locks we've taken, indexed by depth
|
||||||
|
locks []sync.Locker
|
||||||
|
// take locks while walking the structure
|
||||||
|
useLocks bool
|
||||||
|
}
|
||||||
|
|
||||||
|
func (w *walker) Enter(l reflectwalk.Location) error {
|
||||||
|
w.depth++
|
||||||
|
|
||||||
|
// ensure we have enough elements to index via w.depth
|
||||||
|
for w.depth >= len(w.locks) {
|
||||||
|
w.locks = append(w.locks, nil)
|
||||||
|
}
|
||||||
|
|
||||||
|
for len(w.ps) < w.depth+1 {
|
||||||
|
w.ps = append(w.ps, 0)
|
||||||
|
}
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (w *walker) Exit(l reflectwalk.Location) error {
|
||||||
|
locker := w.locks[w.depth]
|
||||||
|
w.locks[w.depth] = nil
|
||||||
|
if locker != nil {
|
||||||
|
defer locker.Unlock()
|
||||||
|
}
|
||||||
|
|
||||||
|
// clear out pointers and interfaces as we exit the stack
|
||||||
|
w.ps[w.depth] = 0
|
||||||
|
|
||||||
|
for k := range w.ifaceTypes {
|
||||||
|
mask := uint64(^uint32(0))
|
||||||
|
if k&mask == uint64(w.depth) {
|
||||||
|
delete(w.ifaceTypes, k)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
w.depth--
|
||||||
|
if w.ignoreDepth > w.depth {
|
||||||
|
w.ignoreDepth = 0
|
||||||
|
}
|
||||||
|
|
||||||
|
if w.ignoring() {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
switch l {
|
||||||
|
case reflectwalk.Array:
|
||||||
|
fallthrough
|
||||||
|
case reflectwalk.Map:
|
||||||
|
fallthrough
|
||||||
|
case reflectwalk.Slice:
|
||||||
|
w.replacePointerMaybe()
|
||||||
|
|
||||||
|
// Pop map off our container
|
||||||
|
w.cs = w.cs[:len(w.cs)-1]
|
||||||
|
case reflectwalk.MapValue:
|
||||||
|
// Pop off the key and value
|
||||||
|
mv := w.valPop()
|
||||||
|
mk := w.valPop()
|
||||||
|
m := w.cs[len(w.cs)-1]
|
||||||
|
|
||||||
|
// If mv is the zero value, SetMapIndex deletes the key form the map,
|
||||||
|
// or in this case never adds it. We need to create a properly typed
|
||||||
|
// zero value so that this key can be set.
|
||||||
|
if !mv.IsValid() {
|
||||||
|
mv = reflect.Zero(m.Elem().Type().Elem())
|
||||||
|
}
|
||||||
|
m.Elem().SetMapIndex(mk, mv)
|
||||||
|
case reflectwalk.ArrayElem:
|
||||||
|
// Pop off the value and the index and set it on the array
|
||||||
|
v := w.valPop()
|
||||||
|
i := w.valPop().Interface().(int)
|
||||||
|
if v.IsValid() {
|
||||||
|
a := w.cs[len(w.cs)-1]
|
||||||
|
ae := a.Elem().Index(i) // storing array as pointer on stack - so need Elem() call
|
||||||
|
if ae.CanSet() {
|
||||||
|
ae.Set(v)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
case reflectwalk.SliceElem:
|
||||||
|
// Pop off the value and the index and set it on the slice
|
||||||
|
v := w.valPop()
|
||||||
|
i := w.valPop().Interface().(int)
|
||||||
|
if v.IsValid() {
|
||||||
|
s := w.cs[len(w.cs)-1]
|
||||||
|
se := s.Elem().Index(i)
|
||||||
|
if se.CanSet() {
|
||||||
|
se.Set(v)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
case reflectwalk.Struct:
|
||||||
|
w.replacePointerMaybe()
|
||||||
|
|
||||||
|
// Remove the struct from the container stack
|
||||||
|
w.cs = w.cs[:len(w.cs)-1]
|
||||||
|
case reflectwalk.StructField:
|
||||||
|
// Pop off the value and the field
|
||||||
|
v := w.valPop()
|
||||||
|
f := w.valPop().Interface().(reflect.StructField)
|
||||||
|
if v.IsValid() {
|
||||||
|
s := w.cs[len(w.cs)-1]
|
||||||
|
sf := reflect.Indirect(s).FieldByName(f.Name)
|
||||||
|
|
||||||
|
if sf.CanSet() {
|
||||||
|
sf.Set(v)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
case reflectwalk.WalkLoc:
|
||||||
|
// Clear out the slices for GC
|
||||||
|
w.cs = nil
|
||||||
|
w.vals = nil
|
||||||
|
}
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (w *walker) Map(m reflect.Value) error {
|
||||||
|
if w.ignoring() {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
w.lock(m)
|
||||||
|
|
||||||
|
// Create the map. If the map itself is nil, then just make a nil map
|
||||||
|
var newMap reflect.Value
|
||||||
|
if m.IsNil() {
|
||||||
|
newMap = reflect.New(m.Type())
|
||||||
|
} else {
|
||||||
|
newMap = wrapPtr(reflect.MakeMap(m.Type()))
|
||||||
|
}
|
||||||
|
|
||||||
|
w.cs = append(w.cs, newMap)
|
||||||
|
w.valPush(newMap)
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (w *walker) MapElem(m, k, v reflect.Value) error {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (w *walker) PointerEnter(v bool) error {
|
||||||
|
if v {
|
||||||
|
w.ps[w.depth]++
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (w *walker) PointerExit(v bool) error {
|
||||||
|
if v {
|
||||||
|
w.ps[w.depth]--
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (w *walker) Interface(v reflect.Value) error {
|
||||||
|
if !v.IsValid() {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
if w.ifaceTypes == nil {
|
||||||
|
w.ifaceTypes = make(map[uint64]reflect.Type)
|
||||||
|
}
|
||||||
|
|
||||||
|
w.ifaceTypes[ifaceKey(w.ps[w.depth], w.depth)] = v.Type()
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (w *walker) Primitive(v reflect.Value) error {
|
||||||
|
if w.ignoring() {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
w.lock(v)
|
||||||
|
|
||||||
|
// IsValid verifies the v is non-zero and CanInterface verifies
|
||||||
|
// that we're allowed to read this value (unexported fields).
|
||||||
|
var newV reflect.Value
|
||||||
|
if v.IsValid() && v.CanInterface() {
|
||||||
|
newV = reflect.New(v.Type())
|
||||||
|
newV.Elem().Set(v)
|
||||||
|
}
|
||||||
|
|
||||||
|
w.valPush(newV)
|
||||||
|
w.replacePointerMaybe()
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (w *walker) Slice(s reflect.Value) error {
|
||||||
|
if w.ignoring() {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
w.lock(s)
|
||||||
|
|
||||||
|
var newS reflect.Value
|
||||||
|
if s.IsNil() {
|
||||||
|
newS = reflect.New(s.Type())
|
||||||
|
} else {
|
||||||
|
newS = wrapPtr(reflect.MakeSlice(s.Type(), s.Len(), s.Cap()))
|
||||||
|
}
|
||||||
|
|
||||||
|
w.cs = append(w.cs, newS)
|
||||||
|
w.valPush(newS)
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (w *walker) SliceElem(i int, elem reflect.Value) error {
|
||||||
|
if w.ignoring() {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// We don't write the slice here because elem might still be
|
||||||
|
// arbitrarily complex. Just record the index and continue on.
|
||||||
|
w.valPush(reflect.ValueOf(i))
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (w *walker) Array(a reflect.Value) error {
|
||||||
|
if w.ignoring() {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
w.lock(a)
|
||||||
|
|
||||||
|
newA := reflect.New(a.Type())
|
||||||
|
|
||||||
|
w.cs = append(w.cs, newA)
|
||||||
|
w.valPush(newA)
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (w *walker) ArrayElem(i int, elem reflect.Value) error {
|
||||||
|
if w.ignoring() {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// We don't write the array here because elem might still be
|
||||||
|
// arbitrarily complex. Just record the index and continue on.
|
||||||
|
w.valPush(reflect.ValueOf(i))
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (w *walker) Struct(s reflect.Value) error {
|
||||||
|
if w.ignoring() {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
w.lock(s)
|
||||||
|
|
||||||
|
var v reflect.Value
|
||||||
|
if c, ok := Copiers[s.Type()]; ok {
|
||||||
|
// We have a Copier for this struct, so we use that copier to
|
||||||
|
// get the copy, and we ignore anything deeper than this.
|
||||||
|
w.ignoreDepth = w.depth
|
||||||
|
|
||||||
|
dup, err := c(s.Interface())
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
// We need to put a pointer to the value on the value stack,
|
||||||
|
// so allocate a new pointer and set it.
|
||||||
|
v = reflect.New(s.Type())
|
||||||
|
reflect.Indirect(v).Set(reflect.ValueOf(dup))
|
||||||
|
} else {
|
||||||
|
// No copier, we copy ourselves and allow reflectwalk to guide
|
||||||
|
// us deeper into the structure for copying.
|
||||||
|
v = reflect.New(s.Type())
|
||||||
|
}
|
||||||
|
|
||||||
|
// Push the value onto the value stack for setting the struct field,
|
||||||
|
// and add the struct itself to the containers stack in case we walk
|
||||||
|
// deeper so that its own fields can be modified.
|
||||||
|
w.valPush(v)
|
||||||
|
w.cs = append(w.cs, v)
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (w *walker) StructField(f reflect.StructField, v reflect.Value) error {
|
||||||
|
if w.ignoring() {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// If PkgPath is non-empty, this is a private (unexported) field.
|
||||||
|
// We do not set this unexported since the Go runtime doesn't allow us.
|
||||||
|
if f.PkgPath != "" {
|
||||||
|
return reflectwalk.SkipEntry
|
||||||
|
}
|
||||||
|
|
||||||
|
// Push the field onto the stack, we'll handle it when we exit
|
||||||
|
// the struct field in Exit...
|
||||||
|
w.valPush(reflect.ValueOf(f))
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// ignore causes the walker to ignore any more values until we exit this on
|
||||||
|
func (w *walker) ignore() {
|
||||||
|
w.ignoreDepth = w.depth
|
||||||
|
}
|
||||||
|
|
||||||
|
func (w *walker) ignoring() bool {
|
||||||
|
return w.ignoreDepth > 0 && w.depth >= w.ignoreDepth
|
||||||
|
}
|
||||||
|
|
||||||
|
func (w *walker) pointerPeek() bool {
|
||||||
|
return w.ps[w.depth] > 0
|
||||||
|
}
|
||||||
|
|
||||||
|
func (w *walker) valPop() reflect.Value {
|
||||||
|
result := w.vals[len(w.vals)-1]
|
||||||
|
w.vals = w.vals[:len(w.vals)-1]
|
||||||
|
|
||||||
|
// If we're out of values, that means we popped everything off. In
|
||||||
|
// this case, we reset the result so the next pushed value becomes
|
||||||
|
// the result.
|
||||||
|
if len(w.vals) == 0 {
|
||||||
|
w.Result = nil
|
||||||
|
}
|
||||||
|
|
||||||
|
return result
|
||||||
|
}
|
||||||
|
|
||||||
|
func (w *walker) valPush(v reflect.Value) {
|
||||||
|
w.vals = append(w.vals, v)
|
||||||
|
|
||||||
|
// If we haven't set the result yet, then this is the result since
|
||||||
|
// it is the first (outermost) value we're seeing.
|
||||||
|
if w.Result == nil && v.IsValid() {
|
||||||
|
w.Result = v.Interface()
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (w *walker) replacePointerMaybe() {
|
||||||
|
// Determine the last pointer value. If it is NOT a pointer, then
|
||||||
|
// we need to push that onto the stack.
|
||||||
|
if !w.pointerPeek() {
|
||||||
|
w.valPush(reflect.Indirect(w.valPop()))
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
v := w.valPop()
|
||||||
|
|
||||||
|
// If the expected type is a pointer to an interface of any depth,
|
||||||
|
// such as *interface{}, **interface{}, etc., then we need to convert
|
||||||
|
// the value "v" from *CONCRETE to *interface{} so types match for
|
||||||
|
// Set.
|
||||||
|
//
|
||||||
|
// Example if v is type *Foo where Foo is a struct, v would become
|
||||||
|
// *interface{} instead. This only happens if we have an interface expectation
|
||||||
|
// at this depth.
|
||||||
|
//
|
||||||
|
// For more info, see GH-16
|
||||||
|
if iType, ok := w.ifaceTypes[ifaceKey(w.ps[w.depth], w.depth)]; ok && iType.Kind() == reflect.Interface {
|
||||||
|
y := reflect.New(iType) // Create *interface{}
|
||||||
|
y.Elem().Set(reflect.Indirect(v)) // Assign "Foo" to interface{} (dereferenced)
|
||||||
|
v = y // v is now typed *interface{} (where *v = Foo)
|
||||||
|
}
|
||||||
|
|
||||||
|
for i := 1; i < w.ps[w.depth]; i++ {
|
||||||
|
if iType, ok := w.ifaceTypes[ifaceKey(w.ps[w.depth]-i, w.depth)]; ok {
|
||||||
|
iface := reflect.New(iType).Elem()
|
||||||
|
iface.Set(v)
|
||||||
|
v = iface
|
||||||
|
}
|
||||||
|
|
||||||
|
p := reflect.New(v.Type())
|
||||||
|
p.Elem().Set(v)
|
||||||
|
v = p
|
||||||
|
}
|
||||||
|
|
||||||
|
w.valPush(v)
|
||||||
|
}
|
||||||
|
|
||||||
|
// if this value is a Locker, lock it and add it to the locks slice
|
||||||
|
func (w *walker) lock(v reflect.Value) {
|
||||||
|
if !w.useLocks {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
if !v.IsValid() || !v.CanInterface() {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
type rlocker interface {
|
||||||
|
RLocker() sync.Locker
|
||||||
|
}
|
||||||
|
|
||||||
|
var locker sync.Locker
|
||||||
|
|
||||||
|
// We can't call Interface() on a value directly, since that requires
|
||||||
|
// a copy. This is OK, since the pointer to a value which is a sync.Locker
|
||||||
|
// is also a sync.Locker.
|
||||||
|
if v.Kind() == reflect.Ptr {
|
||||||
|
switch l := v.Interface().(type) {
|
||||||
|
case rlocker:
|
||||||
|
// don't lock a mutex directly
|
||||||
|
if _, ok := l.(*sync.RWMutex); !ok {
|
||||||
|
locker = l.RLocker()
|
||||||
|
}
|
||||||
|
case sync.Locker:
|
||||||
|
locker = l
|
||||||
|
}
|
||||||
|
} else if v.CanAddr() {
|
||||||
|
switch l := v.Addr().Interface().(type) {
|
||||||
|
case rlocker:
|
||||||
|
// don't lock a mutex directly
|
||||||
|
if _, ok := l.(*sync.RWMutex); !ok {
|
||||||
|
locker = l.RLocker()
|
||||||
|
}
|
||||||
|
case sync.Locker:
|
||||||
|
locker = l
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// still no callable locker
|
||||||
|
if locker == nil {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
// don't lock a mutex directly
|
||||||
|
switch locker.(type) {
|
||||||
|
case *sync.Mutex, *sync.RWMutex:
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
locker.Lock()
|
||||||
|
w.locks[w.depth] = locker
|
||||||
|
}
|
||||||
|
|
||||||
|
// wrapPtr is a helper that takes v and always make it *v. copystructure
|
||||||
|
// stores things internally as pointers until the last moment before unwrapping
|
||||||
|
func wrapPtr(v reflect.Value) reflect.Value {
|
||||||
|
if !v.IsValid() {
|
||||||
|
return v
|
||||||
|
}
|
||||||
|
vPtr := reflect.New(v.Type())
|
||||||
|
vPtr.Elem().Set(v)
|
||||||
|
return vPtr
|
||||||
|
}
|
21
vendor/github.com/mitchellh/reflectwalk/LICENSE
generated
vendored
Normal file
21
vendor/github.com/mitchellh/reflectwalk/LICENSE
generated
vendored
Normal file
|
@ -0,0 +1,21 @@
|
||||||
|
The MIT License (MIT)
|
||||||
|
|
||||||
|
Copyright (c) 2013 Mitchell Hashimoto
|
||||||
|
|
||||||
|
Permission is hereby granted, free of charge, to any person obtaining a copy
|
||||||
|
of this software and associated documentation files (the "Software"), to deal
|
||||||
|
in the Software without restriction, including without limitation the rights
|
||||||
|
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
|
||||||
|
copies of the Software, and to permit persons to whom the Software is
|
||||||
|
furnished to do so, subject to the following conditions:
|
||||||
|
|
||||||
|
The above copyright notice and this permission notice shall be included in
|
||||||
|
all copies or substantial portions of the Software.
|
||||||
|
|
||||||
|
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||||
|
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||||
|
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
|
||||||
|
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
||||||
|
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
||||||
|
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
|
||||||
|
THE SOFTWARE.
|
19
vendor/github.com/mitchellh/reflectwalk/location.go
generated
vendored
Normal file
19
vendor/github.com/mitchellh/reflectwalk/location.go
generated
vendored
Normal file
|
@ -0,0 +1,19 @@
|
||||||
|
package reflectwalk
|
||||||
|
|
||||||
|
//go:generate stringer -type=Location location.go
|
||||||
|
|
||||||
|
type Location uint
|
||||||
|
|
||||||
|
const (
|
||||||
|
None Location = iota
|
||||||
|
Map
|
||||||
|
MapKey
|
||||||
|
MapValue
|
||||||
|
Slice
|
||||||
|
SliceElem
|
||||||
|
Array
|
||||||
|
ArrayElem
|
||||||
|
Struct
|
||||||
|
StructField
|
||||||
|
WalkLoc
|
||||||
|
)
|
16
vendor/github.com/mitchellh/reflectwalk/location_string.go
generated
vendored
Normal file
16
vendor/github.com/mitchellh/reflectwalk/location_string.go
generated
vendored
Normal file
|
@ -0,0 +1,16 @@
|
||||||
|
// Code generated by "stringer -type=Location location.go"; DO NOT EDIT.
|
||||||
|
|
||||||
|
package reflectwalk
|
||||||
|
|
||||||
|
import "fmt"
|
||||||
|
|
||||||
|
const _Location_name = "NoneMapMapKeyMapValueSliceSliceElemArrayArrayElemStructStructFieldWalkLoc"
|
||||||
|
|
||||||
|
var _Location_index = [...]uint8{0, 4, 7, 13, 21, 26, 35, 40, 49, 55, 66, 73}
|
||||||
|
|
||||||
|
func (i Location) String() string {
|
||||||
|
if i >= Location(len(_Location_index)-1) {
|
||||||
|
return fmt.Sprintf("Location(%d)", i)
|
||||||
|
}
|
||||||
|
return _Location_name[_Location_index[i]:_Location_index[i+1]]
|
||||||
|
}
|
401
vendor/github.com/mitchellh/reflectwalk/reflectwalk.go
generated
vendored
Normal file
401
vendor/github.com/mitchellh/reflectwalk/reflectwalk.go
generated
vendored
Normal file
|
@ -0,0 +1,401 @@
|
||||||
|
// reflectwalk is a package that allows you to "walk" complex structures
|
||||||
|
// similar to how you may "walk" a filesystem: visiting every element one
|
||||||
|
// by one and calling callback functions allowing you to handle and manipulate
|
||||||
|
// those elements.
|
||||||
|
package reflectwalk
|
||||||
|
|
||||||
|
import (
|
||||||
|
"errors"
|
||||||
|
"reflect"
|
||||||
|
)
|
||||||
|
|
||||||
|
// PrimitiveWalker implementations are able to handle primitive values
|
||||||
|
// within complex structures. Primitive values are numbers, strings,
|
||||||
|
// booleans, funcs, chans.
|
||||||
|
//
|
||||||
|
// These primitive values are often members of more complex
|
||||||
|
// structures (slices, maps, etc.) that are walkable by other interfaces.
|
||||||
|
type PrimitiveWalker interface {
|
||||||
|
Primitive(reflect.Value) error
|
||||||
|
}
|
||||||
|
|
||||||
|
// InterfaceWalker implementations are able to handle interface values as they
|
||||||
|
// are encountered during the walk.
|
||||||
|
type InterfaceWalker interface {
|
||||||
|
Interface(reflect.Value) error
|
||||||
|
}
|
||||||
|
|
||||||
|
// MapWalker implementations are able to handle individual elements
|
||||||
|
// found within a map structure.
|
||||||
|
type MapWalker interface {
|
||||||
|
Map(m reflect.Value) error
|
||||||
|
MapElem(m, k, v reflect.Value) error
|
||||||
|
}
|
||||||
|
|
||||||
|
// SliceWalker implementations are able to handle slice elements found
|
||||||
|
// within complex structures.
|
||||||
|
type SliceWalker interface {
|
||||||
|
Slice(reflect.Value) error
|
||||||
|
SliceElem(int, reflect.Value) error
|
||||||
|
}
|
||||||
|
|
||||||
|
// ArrayWalker implementations are able to handle array elements found
|
||||||
|
// within complex structures.
|
||||||
|
type ArrayWalker interface {
|
||||||
|
Array(reflect.Value) error
|
||||||
|
ArrayElem(int, reflect.Value) error
|
||||||
|
}
|
||||||
|
|
||||||
|
// StructWalker is an interface that has methods that are called for
|
||||||
|
// structs when a Walk is done.
|
||||||
|
type StructWalker interface {
|
||||||
|
Struct(reflect.Value) error
|
||||||
|
StructField(reflect.StructField, reflect.Value) error
|
||||||
|
}
|
||||||
|
|
||||||
|
// EnterExitWalker implementations are notified before and after
|
||||||
|
// they walk deeper into complex structures (into struct fields,
|
||||||
|
// into slice elements, etc.)
|
||||||
|
type EnterExitWalker interface {
|
||||||
|
Enter(Location) error
|
||||||
|
Exit(Location) error
|
||||||
|
}
|
||||||
|
|
||||||
|
// PointerWalker implementations are notified when the value they're
|
||||||
|
// walking is a pointer or not. Pointer is called for _every_ value whether
|
||||||
|
// it is a pointer or not.
|
||||||
|
type PointerWalker interface {
|
||||||
|
PointerEnter(bool) error
|
||||||
|
PointerExit(bool) error
|
||||||
|
}
|
||||||
|
|
||||||
|
// SkipEntry can be returned from walk functions to skip walking
|
||||||
|
// the value of this field. This is only valid in the following functions:
|
||||||
|
//
|
||||||
|
// - Struct: skips all fields from being walked
|
||||||
|
// - StructField: skips walking the struct value
|
||||||
|
//
|
||||||
|
var SkipEntry = errors.New("skip this entry")
|
||||||
|
|
||||||
|
// Walk takes an arbitrary value and an interface and traverses the
|
||||||
|
// value, calling callbacks on the interface if they are supported.
|
||||||
|
// The interface should implement one or more of the walker interfaces
|
||||||
|
// in this package, such as PrimitiveWalker, StructWalker, etc.
|
||||||
|
func Walk(data, walker interface{}) (err error) {
|
||||||
|
v := reflect.ValueOf(data)
|
||||||
|
ew, ok := walker.(EnterExitWalker)
|
||||||
|
if ok {
|
||||||
|
err = ew.Enter(WalkLoc)
|
||||||
|
}
|
||||||
|
|
||||||
|
if err == nil {
|
||||||
|
err = walk(v, walker)
|
||||||
|
}
|
||||||
|
|
||||||
|
if ok && err == nil {
|
||||||
|
err = ew.Exit(WalkLoc)
|
||||||
|
}
|
||||||
|
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
func walk(v reflect.Value, w interface{}) (err error) {
|
||||||
|
// Determine if we're receiving a pointer and if so notify the walker.
|
||||||
|
// The logic here is convoluted but very important (tests will fail if
|
||||||
|
// almost any part is changed). I will try to explain here.
|
||||||
|
//
|
||||||
|
// First, we check if the value is an interface, if so, we really need
|
||||||
|
// to check the interface's VALUE to see whether it is a pointer.
|
||||||
|
//
|
||||||
|
// Check whether the value is then a pointer. If so, then set pointer
|
||||||
|
// to true to notify the user.
|
||||||
|
//
|
||||||
|
// If we still have a pointer or an interface after the indirections, then
|
||||||
|
// we unwrap another level
|
||||||
|
//
|
||||||
|
// At this time, we also set "v" to be the dereferenced value. This is
|
||||||
|
// because once we've unwrapped the pointer we want to use that value.
|
||||||
|
pointer := false
|
||||||
|
pointerV := v
|
||||||
|
|
||||||
|
for {
|
||||||
|
if pointerV.Kind() == reflect.Interface {
|
||||||
|
if iw, ok := w.(InterfaceWalker); ok {
|
||||||
|
if err = iw.Interface(pointerV); err != nil {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
pointerV = pointerV.Elem()
|
||||||
|
}
|
||||||
|
|
||||||
|
if pointerV.Kind() == reflect.Ptr {
|
||||||
|
pointer = true
|
||||||
|
v = reflect.Indirect(pointerV)
|
||||||
|
}
|
||||||
|
if pw, ok := w.(PointerWalker); ok {
|
||||||
|
if err = pw.PointerEnter(pointer); err != nil {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
defer func(pointer bool) {
|
||||||
|
if err != nil {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
err = pw.PointerExit(pointer)
|
||||||
|
}(pointer)
|
||||||
|
}
|
||||||
|
|
||||||
|
if pointer {
|
||||||
|
pointerV = v
|
||||||
|
}
|
||||||
|
pointer = false
|
||||||
|
|
||||||
|
// If we still have a pointer or interface we have to indirect another level.
|
||||||
|
switch pointerV.Kind() {
|
||||||
|
case reflect.Ptr, reflect.Interface:
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
break
|
||||||
|
}
|
||||||
|
|
||||||
|
// We preserve the original value here because if it is an interface
|
||||||
|
// type, we want to pass that directly into the walkPrimitive, so that
|
||||||
|
// we can set it.
|
||||||
|
originalV := v
|
||||||
|
if v.Kind() == reflect.Interface {
|
||||||
|
v = v.Elem()
|
||||||
|
}
|
||||||
|
|
||||||
|
k := v.Kind()
|
||||||
|
if k >= reflect.Int && k <= reflect.Complex128 {
|
||||||
|
k = reflect.Int
|
||||||
|
}
|
||||||
|
|
||||||
|
switch k {
|
||||||
|
// Primitives
|
||||||
|
case reflect.Bool, reflect.Chan, reflect.Func, reflect.Int, reflect.String, reflect.Invalid:
|
||||||
|
err = walkPrimitive(originalV, w)
|
||||||
|
return
|
||||||
|
case reflect.Map:
|
||||||
|
err = walkMap(v, w)
|
||||||
|
return
|
||||||
|
case reflect.Slice:
|
||||||
|
err = walkSlice(v, w)
|
||||||
|
return
|
||||||
|
case reflect.Struct:
|
||||||
|
err = walkStruct(v, w)
|
||||||
|
return
|
||||||
|
case reflect.Array:
|
||||||
|
err = walkArray(v, w)
|
||||||
|
return
|
||||||
|
default:
|
||||||
|
panic("unsupported type: " + k.String())
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func walkMap(v reflect.Value, w interface{}) error {
|
||||||
|
ew, ewok := w.(EnterExitWalker)
|
||||||
|
if ewok {
|
||||||
|
ew.Enter(Map)
|
||||||
|
}
|
||||||
|
|
||||||
|
if mw, ok := w.(MapWalker); ok {
|
||||||
|
if err := mw.Map(v); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
for _, k := range v.MapKeys() {
|
||||||
|
kv := v.MapIndex(k)
|
||||||
|
|
||||||
|
if mw, ok := w.(MapWalker); ok {
|
||||||
|
if err := mw.MapElem(v, k, kv); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
ew, ok := w.(EnterExitWalker)
|
||||||
|
if ok {
|
||||||
|
ew.Enter(MapKey)
|
||||||
|
}
|
||||||
|
|
||||||
|
if err := walk(k, w); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
if ok {
|
||||||
|
ew.Exit(MapKey)
|
||||||
|
ew.Enter(MapValue)
|
||||||
|
}
|
||||||
|
|
||||||
|
if err := walk(kv, w); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
if ok {
|
||||||
|
ew.Exit(MapValue)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if ewok {
|
||||||
|
ew.Exit(Map)
|
||||||
|
}
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func walkPrimitive(v reflect.Value, w interface{}) error {
|
||||||
|
if pw, ok := w.(PrimitiveWalker); ok {
|
||||||
|
return pw.Primitive(v)
|
||||||
|
}
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func walkSlice(v reflect.Value, w interface{}) (err error) {
|
||||||
|
ew, ok := w.(EnterExitWalker)
|
||||||
|
if ok {
|
||||||
|
ew.Enter(Slice)
|
||||||
|
}
|
||||||
|
|
||||||
|
if sw, ok := w.(SliceWalker); ok {
|
||||||
|
if err := sw.Slice(v); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
for i := 0; i < v.Len(); i++ {
|
||||||
|
elem := v.Index(i)
|
||||||
|
|
||||||
|
if sw, ok := w.(SliceWalker); ok {
|
||||||
|
if err := sw.SliceElem(i, elem); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
ew, ok := w.(EnterExitWalker)
|
||||||
|
if ok {
|
||||||
|
ew.Enter(SliceElem)
|
||||||
|
}
|
||||||
|
|
||||||
|
if err := walk(elem, w); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
if ok {
|
||||||
|
ew.Exit(SliceElem)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
ew, ok = w.(EnterExitWalker)
|
||||||
|
if ok {
|
||||||
|
ew.Exit(Slice)
|
||||||
|
}
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func walkArray(v reflect.Value, w interface{}) (err error) {
|
||||||
|
ew, ok := w.(EnterExitWalker)
|
||||||
|
if ok {
|
||||||
|
ew.Enter(Array)
|
||||||
|
}
|
||||||
|
|
||||||
|
if aw, ok := w.(ArrayWalker); ok {
|
||||||
|
if err := aw.Array(v); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
for i := 0; i < v.Len(); i++ {
|
||||||
|
elem := v.Index(i)
|
||||||
|
|
||||||
|
if aw, ok := w.(ArrayWalker); ok {
|
||||||
|
if err := aw.ArrayElem(i, elem); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
ew, ok := w.(EnterExitWalker)
|
||||||
|
if ok {
|
||||||
|
ew.Enter(ArrayElem)
|
||||||
|
}
|
||||||
|
|
||||||
|
if err := walk(elem, w); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
if ok {
|
||||||
|
ew.Exit(ArrayElem)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
ew, ok = w.(EnterExitWalker)
|
||||||
|
if ok {
|
||||||
|
ew.Exit(Array)
|
||||||
|
}
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func walkStruct(v reflect.Value, w interface{}) (err error) {
|
||||||
|
ew, ewok := w.(EnterExitWalker)
|
||||||
|
if ewok {
|
||||||
|
ew.Enter(Struct)
|
||||||
|
}
|
||||||
|
|
||||||
|
skip := false
|
||||||
|
if sw, ok := w.(StructWalker); ok {
|
||||||
|
err = sw.Struct(v)
|
||||||
|
if err == SkipEntry {
|
||||||
|
skip = true
|
||||||
|
err = nil
|
||||||
|
}
|
||||||
|
if err != nil {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if !skip {
|
||||||
|
vt := v.Type()
|
||||||
|
for i := 0; i < vt.NumField(); i++ {
|
||||||
|
sf := vt.Field(i)
|
||||||
|
f := v.FieldByIndex([]int{i})
|
||||||
|
|
||||||
|
if sw, ok := w.(StructWalker); ok {
|
||||||
|
err = sw.StructField(sf, f)
|
||||||
|
|
||||||
|
// SkipEntry just pretends this field doesn't even exist
|
||||||
|
if err == SkipEntry {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
|
||||||
|
if err != nil {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
ew, ok := w.(EnterExitWalker)
|
||||||
|
if ok {
|
||||||
|
ew.Enter(StructField)
|
||||||
|
}
|
||||||
|
|
||||||
|
err = walk(f, w)
|
||||||
|
if err != nil {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
if ok {
|
||||||
|
ew.Exit(StructField)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if ewok {
|
||||||
|
ew.Exit(Struct)
|
||||||
|
}
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
29
vendor/github.com/vulcand/oxy/forward/fwd.go
generated
vendored
29
vendor/github.com/vulcand/oxy/forward/fwd.go
generated
vendored
|
@ -9,6 +9,7 @@ import (
|
||||||
"net/http"
|
"net/http"
|
||||||
"net/url"
|
"net/url"
|
||||||
"os"
|
"os"
|
||||||
|
"reflect"
|
||||||
"strconv"
|
"strconv"
|
||||||
"strings"
|
"strings"
|
||||||
"time"
|
"time"
|
||||||
|
@ -261,8 +262,32 @@ func (f *websocketForwarder) serveHTTP(w http.ResponseWriter, req *http.Request,
|
||||||
}
|
}
|
||||||
targetConn, resp, err := dialer.Dial(outReq.URL.String(), outReq.Header)
|
targetConn, resp, err := dialer.Dial(outReq.URL.String(), outReq.Header)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
ctx.log.Errorf("Error dialing `%v`: %v", outReq.Host, err)
|
if resp == nil {
|
||||||
ctx.errHandler.ServeHTTP(w, req, err)
|
ctx.errHandler.ServeHTTP(w, req, err)
|
||||||
|
} else {
|
||||||
|
ctx.log.Errorf("Error dialing %q: %v with resp: %d %s", outReq.Host, err, resp.StatusCode, resp.Status)
|
||||||
|
hijacker, ok := w.(http.Hijacker)
|
||||||
|
if !ok {
|
||||||
|
ctx.log.Errorf("%s can not be hijack", reflect.TypeOf(w))
|
||||||
|
ctx.errHandler.ServeHTTP(w, req, err)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
conn, _, err := hijacker.Hijack()
|
||||||
|
if err != nil {
|
||||||
|
ctx.log.Errorf("Failed to hijack responseWriter")
|
||||||
|
ctx.errHandler.ServeHTTP(w, req, err)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
defer conn.Close()
|
||||||
|
|
||||||
|
err = resp.Write(conn)
|
||||||
|
if err != nil {
|
||||||
|
ctx.log.Errorf("Failed to forward response")
|
||||||
|
ctx.errHandler.ServeHTTP(w, req, err)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
}
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
Loading…
Reference in a new issue