Add Rancher provider again
This commit is contained in:
parent
ed12366d52
commit
e1d097ea20
31 changed files with 2585 additions and 295 deletions
16
Gopkg.lock
generated
16
Gopkg.lock
generated
|
@ -1389,6 +1389,15 @@
|
|||
pruneopts = "NUT"
|
||||
revision = "a1dba9ce8baed984a2495b658c82687f8157b98f"
|
||||
|
||||
[[projects]]
|
||||
branch = "containous-fork"
|
||||
digest = "1:f103263e55945772fcb058736a03dfc2796c6a14b99b1684b8e88327c0fa0f75"
|
||||
name = "github.com/rancher/go-rancher-metadata"
|
||||
packages = ["metadata"]
|
||||
pruneopts = "NUT"
|
||||
revision = "c6a65f8b7a28edc424e85732df4cd1c215843a99"
|
||||
source = "github.com/containous/go-rancher-metadata"
|
||||
|
||||
[[projects]]
|
||||
digest = "1:0d095d4b1220902aec6896e23808fbcdfa5192dab96d5a31a443a8c47eabc326"
|
||||
name = "github.com/rcrowley/go-metrics"
|
||||
|
@ -1426,12 +1435,12 @@
|
|||
version = "v1.2.0"
|
||||
|
||||
[[projects]]
|
||||
digest = "1:01252cd79aac70f16cac02a72a1067dd136e0ad6d5b597d0129cf74c739fd8d1"
|
||||
digest = "1:bb9033d47c116ea3b981ff159bdef73df8351b0b9700da2066339b97211b1bf0"
|
||||
name = "github.com/sirupsen/logrus"
|
||||
packages = ["."]
|
||||
pruneopts = "NUT"
|
||||
revision = "a67f783a3814b8729bd2dac5780b5f78f8dbd64d"
|
||||
version = "v1.1.0"
|
||||
revision = "dae0fa8d5b0c810a8ab733fbd5510c7cae84eca4"
|
||||
version = "v1.4.0"
|
||||
|
||||
[[projects]]
|
||||
digest = "1:9d8420bbf131d1618bde6530af37c3799340d3762cc47210c1d9532a4c3a2779"
|
||||
|
@ -2228,6 +2237,7 @@
|
|||
"github.com/prometheus/client_golang/prometheus",
|
||||
"github.com/prometheus/client_golang/prometheus/promhttp",
|
||||
"github.com/prometheus/client_model/go",
|
||||
"github.com/rancher/go-rancher-metadata/metadata",
|
||||
"github.com/ryanuber/go-glob",
|
||||
"github.com/satori/go.uuid",
|
||||
"github.com/sirupsen/logrus",
|
||||
|
|
|
@ -149,10 +149,10 @@ required = [
|
|||
name = "github.com/opentracing/opentracing-go"
|
||||
version = "1.0.2"
|
||||
|
||||
#[[constraint]]
|
||||
# branch = "containous-fork"
|
||||
# name = "github.com/rancher/go-rancher-metadata"
|
||||
# source = "github.com/containous/go-rancher-metadata"
|
||||
[[constraint]]
|
||||
branch = "containous-fork"
|
||||
name = "github.com/rancher/go-rancher-metadata"
|
||||
source = "github.com/containous/go-rancher-metadata"
|
||||
|
||||
[[constraint]]
|
||||
branch = "master"
|
||||
|
|
|
@ -11,6 +11,7 @@ import (
|
|||
"github.com/containous/traefik/pkg/provider/file"
|
||||
"github.com/containous/traefik/pkg/provider/kubernetes/ingress"
|
||||
"github.com/containous/traefik/pkg/provider/marathon"
|
||||
"github.com/containous/traefik/pkg/provider/rancher"
|
||||
"github.com/containous/traefik/pkg/provider/rest"
|
||||
"github.com/containous/traefik/pkg/tracing/datadog"
|
||||
"github.com/containous/traefik/pkg/tracing/instana"
|
||||
|
@ -172,12 +173,22 @@ func NewTraefikDefaultPointersConfiguration() *TraefikConfiguration {
|
|||
// default Kubernetes
|
||||
var defaultKubernetes ingress.Provider
|
||||
|
||||
// default Rancher
|
||||
var defaultRancher rancher.Provider
|
||||
defaultRancher.Watch = true
|
||||
defaultRancher.ExposedByDefault = true
|
||||
defaultRancher.EnableServiceHealthFilter = true
|
||||
defaultRancher.RefreshSeconds = 15
|
||||
defaultRancher.DefaultRule = rancher.DefaultTemplateRule
|
||||
defaultRancher.Prefix = "latest"
|
||||
|
||||
defaultProviders := static.Providers{
|
||||
File: &defaultFile,
|
||||
Docker: &defaultDocker,
|
||||
Rest: &defaultRest,
|
||||
Marathon: &defaultMarathon,
|
||||
Kubernetes: &defaultKubernetes,
|
||||
Rancher: &defaultRancher,
|
||||
}
|
||||
|
||||
return &TraefikConfiguration{
|
||||
|
|
BIN
docs/content/assets/img/providers/rancher.png
Normal file
BIN
docs/content/assets/img/providers/rancher.png
Normal file
Binary file not shown.
After Width: | Height: | Size: 2.2 KiB |
177
docs/content/providers/rancher.md
Normal file
177
docs/content/providers/rancher.md
Normal file
|
@ -0,0 +1,177 @@
|
|||
# Traefik & Rancher
|
||||
|
||||
A Story of Labels, Services & Container
|
||||
{: .subtitle }
|
||||
|
||||
![Rancher](../assets/img/providers/rancher.png)
|
||||
|
||||
Attach labels to your services and let Traefik do the rest!
|
||||
|
||||
## Configuration Examples
|
||||
|
||||
??? example "Configuring Docker & Deploying / Exposing Services"
|
||||
|
||||
Enabling the rancher provider
|
||||
|
||||
```toml
|
||||
[provider.rancher]
|
||||
```
|
||||
|
||||
Attaching labels to services
|
||||
|
||||
```yaml
|
||||
labels:
|
||||
- traefik.http.services.my-service.rule=Host(my-domain)
|
||||
```
|
||||
|
||||
## Provider Configuration Options
|
||||
|
||||
!!! tip "Browse the Reference"
|
||||
If you're in a hurry, maybe you'd rather go through the configuration reference:
|
||||
|
||||
```toml
|
||||
################################################################
|
||||
# Rancher Provider
|
||||
################################################################
|
||||
|
||||
# Enable Docker Provider.
|
||||
[rancher]
|
||||
|
||||
# The default host rule for all services.
|
||||
#
|
||||
# Optionnal
|
||||
#
|
||||
DefaultRule = "unix:///var/run/docker.sock"
|
||||
|
||||
# Expose Rancher services by default in Traefik.
|
||||
#
|
||||
# Optional
|
||||
#
|
||||
ExposedByDefault = "docker.localhost"
|
||||
|
||||
# Enable watch docker changes.
|
||||
#
|
||||
# Optional
|
||||
#
|
||||
watch = true
|
||||
|
||||
# Filter services with unhealthy states and inactive states.
|
||||
#
|
||||
# Optional
|
||||
#
|
||||
EnableServiceHealthFilter = true
|
||||
|
||||
# Defines the polling interval (in seconds).
|
||||
#
|
||||
# Optional
|
||||
#
|
||||
RefreshSeconds = true
|
||||
|
||||
# Poll the Rancher metadata service for changes every `rancher.refreshSeconds`, which is less accurate
|
||||
#
|
||||
# Optional
|
||||
#
|
||||
IntervalPoll = false
|
||||
|
||||
# Prefix used for accessing the Rancher metadata service
|
||||
#
|
||||
# Optional
|
||||
#
|
||||
Prefix = 15
|
||||
```
|
||||
|
||||
### `ExposedByDefault`
|
||||
|
||||
_Optional, Default=true_
|
||||
|
||||
Expose Rancher services by default in Traefik.
|
||||
If set to false, services that don't have a `traefik.enable=true` label will be ignored from the resulting routing configuration.
|
||||
|
||||
### `DefaultRule`
|
||||
|
||||
_Optional_
|
||||
|
||||
The default host rule for all services.
|
||||
|
||||
This option can be overridden on a container basis with the `traefik.http.routers.Router1.rule` label.
|
||||
|
||||
### `EnableServiceHealthFilter`
|
||||
|
||||
_Optional, Default=true_
|
||||
|
||||
Filter services with unhealthy states and inactive states.
|
||||
|
||||
### `RefreshSeconds`
|
||||
|
||||
_Optional, Default=15_
|
||||
|
||||
Defines the polling interval (in seconds).
|
||||
|
||||
### `IntervalPoll`
|
||||
|
||||
_Optional, Default=false_
|
||||
|
||||
Poll the Rancher metadata service for changes every `rancher.refreshSeconds`,
|
||||
which is less accurate than the default long polling technique which will provide near instantaneous updates to Traefik.
|
||||
|
||||
### `Prefix`
|
||||
|
||||
_Optional, Default=/latest_
|
||||
|
||||
Prefix used for accessing the Rancher metadata service
|
||||
|
||||
### General
|
||||
|
||||
Traefik creates, for each rancher service, a corresponding [service](../routing/services/index.md) and [router](../routing/routers/index.md).
|
||||
|
||||
The Service automatically gets a server per container in this rancher service, and the router gets a default rule attached to it, based on the service name.
|
||||
|
||||
### Routers
|
||||
|
||||
To update the configuration of the Router automatically attached to the container, add labels starting with `traefik.routers.{name-of-your-choice}.` and followed by the option you want to change.
|
||||
For example, to change the rule, you could add the label `traefik.http.routers.my-container.rule=Host(my-domain)`.
|
||||
|
||||
Every [Router](../routing/routers/index.md) parameter can be updated this way.
|
||||
|
||||
### Services
|
||||
|
||||
To update the configuration of the Service automatically attached to the container, add labels starting with `traefik.http.services.{name-of-your-choice}.`,
|
||||
followed by the option you want to change. For example, to change the load balancer method,
|
||||
you'd add the label `traefik.http.services.{name-of-your-choice}.loadbalancer.method=drr`.
|
||||
|
||||
Every [Service](../routing/services/index.md) parameter can be updated this way.
|
||||
|
||||
### Middleware
|
||||
|
||||
You can declare pieces of middleware using labels starting with `traefik.http.middlewares.{name-of-your-choice}.`, followed by the middleware type/options.
|
||||
For example, to declare a middleware [`schemeredirect`](../middlewares/redirectscheme.md) named `my-redirect`, you'd write `traefik.http.middlewares.my-redirect.schemeredirect.scheme: https`.
|
||||
|
||||
??? example "Declaring and Referencing a Middleware"
|
||||
|
||||
```yaml
|
||||
# ...
|
||||
labels:
|
||||
- traefik.http.middlewares.my-redirect.schemeredirect.scheme=https
|
||||
- traefik.http.routers.middlewares=my-redirect
|
||||
```
|
||||
|
||||
!!! warning "Conflicts in Declaration"
|
||||
|
||||
If you declare multiple middleware with the same name but with different parameters, the middleware fails to be declared.
|
||||
|
||||
### Specific Options
|
||||
|
||||
#### `traefik.enable`
|
||||
|
||||
You can tell Traefik to consider (or not) the container by setting `traefik.enable` to true or false.
|
||||
|
||||
This option overrides the value of `exposedByDefault`.
|
||||
|
||||
#### `traefik.tags`
|
||||
|
||||
Sets the tags for [constraints filtering](./overview.md#constraints-configuration).
|
||||
|
||||
#### Port Lookup
|
||||
|
||||
Traefik is now capable of detecting the port to use, by following the default rancher flow.
|
||||
That means, if you just expose lets say port :1337 on the rancher ui, traefik will pick up this port and use it.
|
|
@ -74,9 +74,10 @@ nav:
|
|||
- 'Configuration Discovery':
|
||||
- 'Overview': 'providers/overview.md'
|
||||
- 'Docker': 'providers/docker.md'
|
||||
- 'File': 'providers/file.md'
|
||||
- 'Kubernetes IngressRoute': 'providers/kubernetes-crd.md'
|
||||
# - 'Kubernetes Ingress': 'providers/kubernetes-ingress.md'
|
||||
- 'Rancher': 'providers/rancher.md'
|
||||
- 'File': 'providers/file.md'
|
||||
- 'Routing & Load Balancing':
|
||||
- 'Overview': 'routing/overview.md'
|
||||
- 'Entrypoints': 'routing/entrypoints.md'
|
||||
|
|
|
@ -14,6 +14,7 @@ import (
|
|||
"github.com/containous/traefik/pkg/provider/kubernetes/crd"
|
||||
"github.com/containous/traefik/pkg/provider/kubernetes/ingress"
|
||||
"github.com/containous/traefik/pkg/provider/marathon"
|
||||
"github.com/containous/traefik/pkg/provider/rancher"
|
||||
"github.com/containous/traefik/pkg/provider/rest"
|
||||
"github.com/containous/traefik/pkg/tls"
|
||||
"github.com/containous/traefik/pkg/tracing/datadog"
|
||||
|
@ -126,6 +127,7 @@ type Providers struct {
|
|||
Kubernetes *ingress.Provider `description:"Enable Kubernetes backend with default settings" export:"true"`
|
||||
KubernetesCRD *crd.Provider `description:"Enable Kubernetes backend with default settings" export:"true"`
|
||||
Rest *rest.Provider `description:"Enable Rest backend with default settings" export:"true"`
|
||||
Rancher *rancher.Provider `description:"Enable Rancher backend with default settings" export:"true"`
|
||||
}
|
||||
|
||||
// SetEffectiveConfiguration adds missing configuration parameters derived from existing ones.
|
||||
|
@ -178,6 +180,12 @@ func (c *Configuration) SetEffectiveConfiguration(configFile string) {
|
|||
c.Providers.File.TraefikFile = configFile
|
||||
}
|
||||
|
||||
if c.Providers.Rancher != nil {
|
||||
if c.Providers.Rancher.RefreshSeconds <= 0 {
|
||||
c.Providers.Rancher.RefreshSeconds = 15
|
||||
}
|
||||
}
|
||||
|
||||
c.initACMEProvider()
|
||||
c.initTracing()
|
||||
}
|
||||
|
|
|
@ -44,6 +44,9 @@ func NewProviderAggregator(conf static.Providers) ProviderAggregator {
|
|||
if conf.KubernetesCRD != nil {
|
||||
p.quietAddProvider(conf.KubernetesCRD)
|
||||
}
|
||||
if conf.Rancher != nil {
|
||||
p.quietAddProvider(conf.Rancher)
|
||||
}
|
||||
|
||||
return p
|
||||
}
|
||||
|
|
237
pkg/provider/rancher/config.go
Normal file
237
pkg/provider/rancher/config.go
Normal file
|
@ -0,0 +1,237 @@
|
|||
package rancher
|
||||
|
||||
import (
|
||||
"context"
|
||||
"errors"
|
||||
"fmt"
|
||||
"net"
|
||||
"strings"
|
||||
|
||||
"github.com/containous/traefik/pkg/config"
|
||||
"github.com/containous/traefik/pkg/log"
|
||||
"github.com/containous/traefik/pkg/provider"
|
||||
"github.com/containous/traefik/pkg/provider/label"
|
||||
)
|
||||
|
||||
func (p *Provider) buildConfiguration(ctx context.Context, services []rancherData) *config.Configuration {
|
||||
configurations := make(map[string]*config.Configuration)
|
||||
|
||||
for _, service := range services {
|
||||
ctxService := log.With(ctx, log.Str("service", service.Name))
|
||||
|
||||
if !p.keepService(ctx, service) {
|
||||
continue
|
||||
}
|
||||
|
||||
logger := log.FromContext(ctxService)
|
||||
|
||||
confFromLabel, err := label.DecodeConfiguration(service.Labels)
|
||||
if err != nil {
|
||||
logger.Error(err)
|
||||
continue
|
||||
}
|
||||
|
||||
if len(confFromLabel.TCP.Routers) > 0 || len(confFromLabel.TCP.Services) > 0 {
|
||||
err := p.buildTCPServiceConfiguration(ctxService, service, confFromLabel.TCP)
|
||||
if err != nil {
|
||||
logger.Error(err)
|
||||
continue
|
||||
}
|
||||
provider.BuildTCPRouterConfiguration(ctxService, confFromLabel.TCP)
|
||||
if len(confFromLabel.HTTP.Routers) == 0 &&
|
||||
len(confFromLabel.HTTP.Middlewares) == 0 &&
|
||||
len(confFromLabel.HTTP.Services) == 0 {
|
||||
configurations[service.Name] = confFromLabel
|
||||
continue
|
||||
}
|
||||
}
|
||||
|
||||
err = p.buildServiceConfiguration(ctx, service, confFromLabel.HTTP)
|
||||
if err != nil {
|
||||
logger.Error(err)
|
||||
continue
|
||||
}
|
||||
|
||||
model := struct {
|
||||
Name string
|
||||
Labels map[string]string
|
||||
}{
|
||||
Name: service.Name,
|
||||
Labels: service.Labels,
|
||||
}
|
||||
|
||||
provider.BuildRouterConfiguration(ctx, confFromLabel.HTTP, service.Name, p.defaultRuleTpl, model)
|
||||
|
||||
configurations[service.Name] = confFromLabel
|
||||
}
|
||||
|
||||
return provider.Merge(ctx, configurations)
|
||||
}
|
||||
|
||||
func (p *Provider) buildTCPServiceConfiguration(ctx context.Context, service rancherData, configuration *config.TCPConfiguration) error {
|
||||
serviceName := service.Name
|
||||
|
||||
if len(configuration.Services) == 0 {
|
||||
configuration.Services = make(map[string]*config.TCPService)
|
||||
lb := &config.TCPLoadBalancerService{}
|
||||
lb.SetDefaults()
|
||||
configuration.Services[serviceName] = &config.TCPService{
|
||||
LoadBalancer: lb,
|
||||
}
|
||||
}
|
||||
|
||||
for _, confService := range configuration.Services {
|
||||
err := p.addServerTCP(ctx, service, confService.LoadBalancer)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (p *Provider) buildServiceConfiguration(ctx context.Context, service rancherData, configuration *config.HTTPConfiguration) error {
|
||||
|
||||
serviceName := service.Name
|
||||
|
||||
if len(configuration.Services) == 0 {
|
||||
configuration.Services = make(map[string]*config.Service)
|
||||
lb := &config.LoadBalancerService{}
|
||||
lb.SetDefaults()
|
||||
configuration.Services[serviceName] = &config.Service{
|
||||
LoadBalancer: lb,
|
||||
}
|
||||
}
|
||||
|
||||
for _, confService := range configuration.Services {
|
||||
err := p.addServers(ctx, service, confService.LoadBalancer)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (p *Provider) keepService(ctx context.Context, service rancherData) bool {
|
||||
logger := log.FromContext(ctx)
|
||||
|
||||
if !service.ExtraConf.Enable {
|
||||
logger.Debug("Filtering disabled service.")
|
||||
return false
|
||||
}
|
||||
|
||||
if ok, failingConstraint := p.MatchConstraints(service.ExtraConf.Tags); !ok {
|
||||
if failingConstraint != nil {
|
||||
logger.Debugf("service pruned by %q constraint", failingConstraint.String())
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
if p.EnableServiceHealthFilter {
|
||||
if service.Health != "" && service.Health != healthy && service.Health != updatingHealthy {
|
||||
logger.Debugf("Filtering service %s with healthState of %s \n", service.Name, service.Health)
|
||||
return false
|
||||
}
|
||||
if service.State != "" && service.State != active && service.State != updatingActive && service.State != upgraded && service.State != upgrading {
|
||||
logger.Debugf("Filtering service %s with state of %s \n", service.Name, service.State)
|
||||
return false
|
||||
}
|
||||
}
|
||||
|
||||
return true
|
||||
}
|
||||
|
||||
func (p *Provider) addServerTCP(ctx context.Context, service rancherData, loadBalancer *config.TCPLoadBalancerService) error {
|
||||
log.FromContext(ctx).Debugf("Trying to add servers for service %s \n", service.Name)
|
||||
|
||||
serverPort := ""
|
||||
|
||||
if loadBalancer != nil && len(loadBalancer.Servers) > 0 {
|
||||
serverPort = loadBalancer.Servers[0].Port
|
||||
}
|
||||
|
||||
port := getServicePort(service)
|
||||
|
||||
if len(loadBalancer.Servers) == 0 {
|
||||
server := config.TCPServer{}
|
||||
server.SetDefaults()
|
||||
|
||||
loadBalancer.Servers = []config.TCPServer{server}
|
||||
}
|
||||
|
||||
if serverPort != "" {
|
||||
port = serverPort
|
||||
loadBalancer.Servers[0].Port = ""
|
||||
}
|
||||
|
||||
if port == "" {
|
||||
return errors.New("port is missing")
|
||||
}
|
||||
|
||||
var servers []config.TCPServer
|
||||
for _, containerIP := range service.Containers {
|
||||
servers = append(servers, config.TCPServer{
|
||||
Address: net.JoinHostPort(containerIP, port),
|
||||
Weight: 1,
|
||||
})
|
||||
}
|
||||
|
||||
loadBalancer.Servers = servers
|
||||
return nil
|
||||
|
||||
}
|
||||
|
||||
func (p *Provider) addServers(ctx context.Context, service rancherData, loadBalancer *config.LoadBalancerService) error {
|
||||
log.FromContext(ctx).Debugf("Trying to add servers for service %s \n", service.Name)
|
||||
|
||||
serverPort := getLBServerPort(loadBalancer)
|
||||
port := getServicePort(service)
|
||||
|
||||
if len(loadBalancer.Servers) == 0 {
|
||||
server := config.Server{}
|
||||
server.SetDefaults()
|
||||
|
||||
loadBalancer.Servers = []config.Server{server}
|
||||
}
|
||||
|
||||
if serverPort != "" {
|
||||
port = serverPort
|
||||
loadBalancer.Servers[0].Port = ""
|
||||
}
|
||||
|
||||
if port == "" {
|
||||
return errors.New("port is missing")
|
||||
}
|
||||
|
||||
var servers []config.Server
|
||||
for _, containerIP := range service.Containers {
|
||||
servers = append(servers, config.Server{
|
||||
URL: fmt.Sprintf("%s://%s", loadBalancer.Servers[0].Scheme, net.JoinHostPort(containerIP, port)),
|
||||
Weight: 1,
|
||||
})
|
||||
}
|
||||
|
||||
loadBalancer.Servers = servers
|
||||
return nil
|
||||
}
|
||||
|
||||
func getLBServerPort(loadBalancer *config.LoadBalancerService) string {
|
||||
if loadBalancer != nil && len(loadBalancer.Servers) > 0 {
|
||||
return loadBalancer.Servers[0].Port
|
||||
}
|
||||
return ""
|
||||
}
|
||||
|
||||
func getServicePort(data rancherData) string {
|
||||
rawPort := strings.Split(data.Port, "/")[0]
|
||||
hostPort := strings.Split(rawPort, ":")
|
||||
|
||||
if len(hostPort) >= 2 {
|
||||
return hostPort[1]
|
||||
}
|
||||
if len(hostPort) > 0 && hostPort[0] != "" {
|
||||
return hostPort[0]
|
||||
}
|
||||
return rawPort
|
||||
}
|
788
pkg/provider/rancher/config_test.go
Normal file
788
pkg/provider/rancher/config_test.go
Normal file
|
@ -0,0 +1,788 @@
|
|||
package rancher
|
||||
|
||||
import (
|
||||
"context"
|
||||
"testing"
|
||||
|
||||
"github.com/containous/traefik/pkg/config"
|
||||
"github.com/containous/traefik/pkg/types"
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/require"
|
||||
)
|
||||
|
||||
func Test_buildConfiguration(t *testing.T) {
|
||||
testCases := []struct {
|
||||
desc string
|
||||
containers []rancherData
|
||||
constraints types.Constraints
|
||||
expected *config.Configuration
|
||||
}{
|
||||
{
|
||||
desc: "one service no label",
|
||||
containers: []rancherData{
|
||||
{
|
||||
Name: "Test",
|
||||
Labels: map[string]string{},
|
||||
Port: "80/tcp",
|
||||
Containers: []string{"127.0.0.1"},
|
||||
Health: "",
|
||||
State: "",
|
||||
},
|
||||
},
|
||||
expected: &config.Configuration{
|
||||
TCP: &config.TCPConfiguration{
|
||||
Routers: map[string]*config.TCPRouter{},
|
||||
Services: map[string]*config.TCPService{},
|
||||
},
|
||||
HTTP: &config.HTTPConfiguration{
|
||||
Routers: map[string]*config.Router{
|
||||
"Test": {
|
||||
Service: "Test",
|
||||
Rule: "Host(`Test.traefik.wtf`)",
|
||||
},
|
||||
},
|
||||
Middlewares: map[string]*config.Middleware{},
|
||||
Services: map[string]*config.Service{
|
||||
"Test": {
|
||||
LoadBalancer: &config.LoadBalancerService{
|
||||
Servers: []config.Server{
|
||||
{
|
||||
URL: "http://127.0.0.1:80",
|
||||
Weight: 1,
|
||||
},
|
||||
},
|
||||
Method: "wrr",
|
||||
PassHostHeader: true,
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
desc: "two services no label",
|
||||
containers: []rancherData{
|
||||
{
|
||||
Name: "Test1",
|
||||
Labels: map[string]string{},
|
||||
Port: "80/tcp",
|
||||
Containers: []string{"127.0.0.1"},
|
||||
Health: "",
|
||||
State: "",
|
||||
},
|
||||
{
|
||||
Name: "Test2",
|
||||
Labels: map[string]string{},
|
||||
Port: "80/tcp",
|
||||
Containers: []string{"127.0.0.2"},
|
||||
Health: "",
|
||||
State: "",
|
||||
},
|
||||
},
|
||||
expected: &config.Configuration{
|
||||
TCP: &config.TCPConfiguration{
|
||||
Routers: map[string]*config.TCPRouter{},
|
||||
Services: map[string]*config.TCPService{},
|
||||
},
|
||||
HTTP: &config.HTTPConfiguration{
|
||||
Routers: map[string]*config.Router{
|
||||
"Test1": {
|
||||
Service: "Test1",
|
||||
Rule: "Host(`Test1.traefik.wtf`)",
|
||||
},
|
||||
"Test2": {
|
||||
Service: "Test2",
|
||||
Rule: "Host(`Test2.traefik.wtf`)",
|
||||
},
|
||||
},
|
||||
Middlewares: map[string]*config.Middleware{},
|
||||
Services: map[string]*config.Service{
|
||||
"Test1": {
|
||||
LoadBalancer: &config.LoadBalancerService{
|
||||
Servers: []config.Server{
|
||||
{
|
||||
URL: "http://127.0.0.1:80",
|
||||
Weight: 1,
|
||||
},
|
||||
},
|
||||
Method: "wrr",
|
||||
PassHostHeader: true,
|
||||
},
|
||||
},
|
||||
"Test2": {
|
||||
LoadBalancer: &config.LoadBalancerService{
|
||||
Servers: []config.Server{
|
||||
{
|
||||
URL: "http://127.0.0.2:80",
|
||||
Weight: 1,
|
||||
},
|
||||
},
|
||||
Method: "wrr",
|
||||
PassHostHeader: true,
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
desc: "two services no label multiple containers",
|
||||
containers: []rancherData{
|
||||
{
|
||||
Name: "Test1",
|
||||
Labels: map[string]string{},
|
||||
Port: "80/tcp",
|
||||
Containers: []string{"127.0.0.1", "127.0.0.2"},
|
||||
Health: "",
|
||||
State: "",
|
||||
},
|
||||
{
|
||||
Name: "Test2",
|
||||
Labels: map[string]string{},
|
||||
Port: "80/tcp",
|
||||
Containers: []string{"128.0.0.1"},
|
||||
Health: "",
|
||||
State: "",
|
||||
},
|
||||
},
|
||||
expected: &config.Configuration{
|
||||
TCP: &config.TCPConfiguration{
|
||||
Routers: map[string]*config.TCPRouter{},
|
||||
Services: map[string]*config.TCPService{},
|
||||
},
|
||||
HTTP: &config.HTTPConfiguration{
|
||||
Routers: map[string]*config.Router{
|
||||
"Test1": {
|
||||
Service: "Test1",
|
||||
Rule: "Host(`Test1.traefik.wtf`)",
|
||||
},
|
||||
"Test2": {
|
||||
Service: "Test2",
|
||||
Rule: "Host(`Test2.traefik.wtf`)",
|
||||
},
|
||||
},
|
||||
Middlewares: map[string]*config.Middleware{},
|
||||
Services: map[string]*config.Service{
|
||||
"Test1": {
|
||||
LoadBalancer: &config.LoadBalancerService{
|
||||
Servers: []config.Server{
|
||||
{
|
||||
URL: "http://127.0.0.1:80",
|
||||
Weight: 1,
|
||||
},
|
||||
{
|
||||
URL: "http://127.0.0.2:80",
|
||||
Weight: 1,
|
||||
},
|
||||
},
|
||||
Method: "wrr",
|
||||
PassHostHeader: true,
|
||||
},
|
||||
},
|
||||
"Test2": {
|
||||
LoadBalancer: &config.LoadBalancerService{
|
||||
Servers: []config.Server{
|
||||
{
|
||||
URL: "http://128.0.0.1:80",
|
||||
Weight: 1,
|
||||
},
|
||||
},
|
||||
Method: "wrr",
|
||||
PassHostHeader: true,
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
desc: "one service some labels",
|
||||
containers: []rancherData{
|
||||
{
|
||||
Name: "Test",
|
||||
Labels: map[string]string{
|
||||
"traefik.http.services.Service1.loadbalancer.method": "wrr",
|
||||
"traefik.http.routers.Router1.rule": "Host(`foo.com`)",
|
||||
"traefik.http.routers.Router1.service": "Service1",
|
||||
},
|
||||
Port: "80/tcp",
|
||||
Containers: []string{"127.0.0.1"},
|
||||
Health: "",
|
||||
State: "",
|
||||
},
|
||||
},
|
||||
expected: &config.Configuration{
|
||||
TCP: &config.TCPConfiguration{
|
||||
Routers: map[string]*config.TCPRouter{},
|
||||
Services: map[string]*config.TCPService{},
|
||||
},
|
||||
HTTP: &config.HTTPConfiguration{
|
||||
Routers: map[string]*config.Router{
|
||||
"Router1": {
|
||||
Service: "Service1",
|
||||
Rule: "Host(`foo.com`)",
|
||||
},
|
||||
},
|
||||
Middlewares: map[string]*config.Middleware{},
|
||||
Services: map[string]*config.Service{
|
||||
"Service1": {
|
||||
LoadBalancer: &config.LoadBalancerService{
|
||||
Servers: []config.Server{
|
||||
{
|
||||
URL: "http://127.0.0.1:80",
|
||||
Weight: 1,
|
||||
},
|
||||
},
|
||||
Method: "wrr",
|
||||
PassHostHeader: true,
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
desc: "one service which is unhealthy",
|
||||
containers: []rancherData{
|
||||
{
|
||||
Name: "Test",
|
||||
Port: "80/tcp",
|
||||
Containers: []string{"127.0.0.1"},
|
||||
Health: "broken",
|
||||
State: "",
|
||||
},
|
||||
},
|
||||
expected: &config.Configuration{
|
||||
TCP: &config.TCPConfiguration{
|
||||
Routers: map[string]*config.TCPRouter{},
|
||||
Services: map[string]*config.TCPService{},
|
||||
},
|
||||
HTTP: &config.HTTPConfiguration{
|
||||
Routers: map[string]*config.Router{},
|
||||
Middlewares: map[string]*config.Middleware{},
|
||||
Services: map[string]*config.Service{},
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
desc: "one service which is upgrading",
|
||||
containers: []rancherData{
|
||||
{
|
||||
Name: "Test",
|
||||
Port: "80/tcp",
|
||||
Containers: []string{"127.0.0.1"},
|
||||
Health: "",
|
||||
State: "upgradefailed",
|
||||
},
|
||||
},
|
||||
expected: &config.Configuration{
|
||||
TCP: &config.TCPConfiguration{
|
||||
Routers: map[string]*config.TCPRouter{},
|
||||
Services: map[string]*config.TCPService{},
|
||||
},
|
||||
HTTP: &config.HTTPConfiguration{
|
||||
Routers: map[string]*config.Router{},
|
||||
Middlewares: map[string]*config.Middleware{},
|
||||
Services: map[string]*config.Service{},
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
desc: "one service with rule label and has a host exposed port",
|
||||
containers: []rancherData{
|
||||
{
|
||||
Name: "Test",
|
||||
Labels: map[string]string{
|
||||
"traefik.http.routers.Router1.rule": "Host(`foo.com`)",
|
||||
},
|
||||
Port: "12345:80/tcp",
|
||||
Containers: []string{"127.0.0.1"},
|
||||
Health: "",
|
||||
State: "",
|
||||
},
|
||||
},
|
||||
expected: &config.Configuration{
|
||||
TCP: &config.TCPConfiguration{
|
||||
Routers: map[string]*config.TCPRouter{},
|
||||
Services: map[string]*config.TCPService{},
|
||||
},
|
||||
HTTP: &config.HTTPConfiguration{
|
||||
Routers: map[string]*config.Router{
|
||||
"Router1": {
|
||||
Service: "Test",
|
||||
Rule: "Host(`foo.com`)",
|
||||
},
|
||||
},
|
||||
Middlewares: map[string]*config.Middleware{},
|
||||
Services: map[string]*config.Service{
|
||||
"Test": {
|
||||
LoadBalancer: &config.LoadBalancerService{
|
||||
Servers: []config.Server{
|
||||
{
|
||||
URL: "http://127.0.0.1:80",
|
||||
Weight: 1,
|
||||
},
|
||||
},
|
||||
Method: "wrr",
|
||||
PassHostHeader: true,
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
desc: "one service with non matching constraints",
|
||||
containers: []rancherData{
|
||||
{
|
||||
Name: "Test",
|
||||
Labels: map[string]string{
|
||||
"traefik.http.routers.Router1.rule": "Host(`foo.com`)",
|
||||
},
|
||||
Port: "12345:80/tcp",
|
||||
Containers: []string{"127.0.0.1"},
|
||||
Health: "",
|
||||
State: "",
|
||||
},
|
||||
},
|
||||
constraints: types.Constraints{
|
||||
&types.Constraint{
|
||||
Key: "tag",
|
||||
MustMatch: true,
|
||||
Regex: "bar",
|
||||
},
|
||||
},
|
||||
expected: &config.Configuration{
|
||||
TCP: &config.TCPConfiguration{
|
||||
Routers: map[string]*config.TCPRouter{},
|
||||
Services: map[string]*config.TCPService{},
|
||||
},
|
||||
HTTP: &config.HTTPConfiguration{
|
||||
Routers: map[string]*config.Router{},
|
||||
Middlewares: map[string]*config.Middleware{},
|
||||
Services: map[string]*config.Service{},
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
desc: "one service with matching constraints",
|
||||
containers: []rancherData{
|
||||
{
|
||||
Name: "Test",
|
||||
Labels: map[string]string{
|
||||
"traefik.tags": "foo",
|
||||
},
|
||||
Port: "80/tcp",
|
||||
Containers: []string{"127.0.0.1"},
|
||||
Health: "",
|
||||
State: "",
|
||||
},
|
||||
},
|
||||
constraints: types.Constraints{
|
||||
&types.Constraint{
|
||||
Key: "tag",
|
||||
MustMatch: true,
|
||||
Regex: "foo",
|
||||
},
|
||||
},
|
||||
expected: &config.Configuration{
|
||||
TCP: &config.TCPConfiguration{
|
||||
Routers: map[string]*config.TCPRouter{},
|
||||
Services: map[string]*config.TCPService{},
|
||||
},
|
||||
HTTP: &config.HTTPConfiguration{
|
||||
Routers: map[string]*config.Router{
|
||||
"Test": {
|
||||
Service: "Test",
|
||||
Rule: "Host(`Test.traefik.wtf`)",
|
||||
},
|
||||
},
|
||||
Middlewares: map[string]*config.Middleware{},
|
||||
Services: map[string]*config.Service{
|
||||
"Test": {
|
||||
LoadBalancer: &config.LoadBalancerService{
|
||||
Servers: []config.Server{
|
||||
{
|
||||
URL: "http://127.0.0.1:80",
|
||||
Weight: 1,
|
||||
},
|
||||
},
|
||||
Method: "wrr",
|
||||
PassHostHeader: true,
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
desc: "Middlewares used in router",
|
||||
containers: []rancherData{
|
||||
{
|
||||
Name: "Test",
|
||||
Labels: map[string]string{
|
||||
"traefik.http.middlewares.Middleware1.basicauth.users": "test:$apr1$H6uskkkW$IgXLP6ewTrSuBkTrqE8wj/,test2:$apr1$d9hr9HBB$4HxwgUir3HP4EsggP/QNo0",
|
||||
"traefik.http.routers.Test.middlewares": "Middleware1",
|
||||
},
|
||||
Port: "80/tcp",
|
||||
Containers: []string{"127.0.0.1"},
|
||||
Health: "",
|
||||
State: "",
|
||||
},
|
||||
},
|
||||
expected: &config.Configuration{
|
||||
TCP: &config.TCPConfiguration{
|
||||
Routers: map[string]*config.TCPRouter{},
|
||||
Services: map[string]*config.TCPService{},
|
||||
},
|
||||
HTTP: &config.HTTPConfiguration{
|
||||
Routers: map[string]*config.Router{
|
||||
"Test": {
|
||||
Service: "Test",
|
||||
Rule: "Host(`Test.traefik.wtf`)",
|
||||
Middlewares: []string{"Middleware1"},
|
||||
},
|
||||
},
|
||||
Middlewares: map[string]*config.Middleware{
|
||||
"Middleware1": {
|
||||
BasicAuth: &config.BasicAuth{
|
||||
Users: []string{
|
||||
"test:$apr1$H6uskkkW$IgXLP6ewTrSuBkTrqE8wj/",
|
||||
"test2:$apr1$d9hr9HBB$4HxwgUir3HP4EsggP/QNo0",
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
Services: map[string]*config.Service{
|
||||
"Test": {
|
||||
LoadBalancer: &config.LoadBalancerService{
|
||||
Servers: []config.Server{
|
||||
{
|
||||
URL: "http://127.0.0.1:80",
|
||||
Weight: 1,
|
||||
},
|
||||
},
|
||||
Method: "wrr",
|
||||
PassHostHeader: true,
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
desc: "Port in labels",
|
||||
containers: []rancherData{
|
||||
{
|
||||
Name: "Test",
|
||||
Labels: map[string]string{
|
||||
"traefik.http.services.Test.loadbalancer.server.port": "80",
|
||||
},
|
||||
Port: "",
|
||||
Containers: []string{"127.0.0.1"},
|
||||
Health: "",
|
||||
State: "",
|
||||
},
|
||||
},
|
||||
expected: &config.Configuration{
|
||||
TCP: &config.TCPConfiguration{
|
||||
Routers: map[string]*config.TCPRouter{},
|
||||
Services: map[string]*config.TCPService{},
|
||||
},
|
||||
HTTP: &config.HTTPConfiguration{
|
||||
Routers: map[string]*config.Router{
|
||||
"Test": {
|
||||
Service: "Test",
|
||||
Rule: "Host(`Test.traefik.wtf`)",
|
||||
},
|
||||
},
|
||||
Middlewares: map[string]*config.Middleware{},
|
||||
Services: map[string]*config.Service{
|
||||
"Test": {
|
||||
LoadBalancer: &config.LoadBalancerService{
|
||||
Servers: []config.Server{
|
||||
{
|
||||
URL: "http://127.0.0.1:80",
|
||||
Weight: 1,
|
||||
},
|
||||
},
|
||||
Method: "wrr",
|
||||
PassHostHeader: true,
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
desc: "tcp with label",
|
||||
containers: []rancherData{
|
||||
{
|
||||
Name: "Test",
|
||||
Labels: map[string]string{
|
||||
"traefik.tcp.routers.foo.rule": "HostSNI(`foo.bar`)",
|
||||
"traefik.tcp.routers.foo.tls": "true",
|
||||
},
|
||||
Port: "80/tcp",
|
||||
Containers: []string{"127.0.0.1"},
|
||||
Health: "",
|
||||
State: "",
|
||||
},
|
||||
},
|
||||
expected: &config.Configuration{
|
||||
|
||||
TCP: &config.TCPConfiguration{
|
||||
Routers: map[string]*config.TCPRouter{
|
||||
"foo": {
|
||||
Service: "Test",
|
||||
Rule: "HostSNI(`foo.bar`)",
|
||||
TLS: &config.RouterTCPTLSConfig{},
|
||||
},
|
||||
},
|
||||
Services: map[string]*config.TCPService{
|
||||
"Test": {
|
||||
LoadBalancer: &config.TCPLoadBalancerService{
|
||||
Servers: []config.TCPServer{
|
||||
{
|
||||
Address: "127.0.0.1:80",
|
||||
Weight: 1,
|
||||
},
|
||||
},
|
||||
Method: "wrr",
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
HTTP: &config.HTTPConfiguration{
|
||||
Routers: map[string]*config.Router{},
|
||||
Middlewares: map[string]*config.Middleware{},
|
||||
Services: map[string]*config.Service{},
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
desc: "tcp with label without rule",
|
||||
containers: []rancherData{
|
||||
{
|
||||
Name: "Test",
|
||||
Labels: map[string]string{
|
||||
"traefik.tcp.routers.foo.tls": "true",
|
||||
},
|
||||
Port: "80/tcp",
|
||||
Containers: []string{"127.0.0.1"},
|
||||
Health: "",
|
||||
State: "",
|
||||
},
|
||||
},
|
||||
expected: &config.Configuration{
|
||||
TCP: &config.TCPConfiguration{
|
||||
Routers: map[string]*config.TCPRouter{},
|
||||
Services: map[string]*config.TCPService{
|
||||
"Test": {
|
||||
LoadBalancer: &config.TCPLoadBalancerService{
|
||||
Servers: []config.TCPServer{
|
||||
{
|
||||
Address: "127.0.0.1:80",
|
||||
Weight: 1,
|
||||
},
|
||||
},
|
||||
Method: "wrr",
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
HTTP: &config.HTTPConfiguration{
|
||||
Routers: map[string]*config.Router{},
|
||||
Middlewares: map[string]*config.Middleware{},
|
||||
Services: map[string]*config.Service{},
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
desc: "tcp with label and port",
|
||||
containers: []rancherData{
|
||||
{
|
||||
Name: "Test",
|
||||
Labels: map[string]string{
|
||||
"traefik.tcp.routers.foo.rule": "HostSNI(`foo.bar`)",
|
||||
"traefik.tcp.routers.foo.tls": "true",
|
||||
"traefik.tcp.services.foo.loadbalancer.server.port": "8080",
|
||||
},
|
||||
Port: "80/tcp",
|
||||
Containers: []string{"127.0.0.1"},
|
||||
Health: "",
|
||||
State: "",
|
||||
},
|
||||
},
|
||||
expected: &config.Configuration{
|
||||
TCP: &config.TCPConfiguration{
|
||||
Routers: map[string]*config.TCPRouter{
|
||||
"foo": {
|
||||
Service: "foo",
|
||||
Rule: "HostSNI(`foo.bar`)",
|
||||
TLS: &config.RouterTCPTLSConfig{},
|
||||
},
|
||||
},
|
||||
Services: map[string]*config.TCPService{
|
||||
"foo": {
|
||||
LoadBalancer: &config.TCPLoadBalancerService{
|
||||
Servers: []config.TCPServer{
|
||||
{
|
||||
Address: "127.0.0.1:8080",
|
||||
Weight: 1,
|
||||
},
|
||||
},
|
||||
Method: "wrr",
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
HTTP: &config.HTTPConfiguration{
|
||||
Routers: map[string]*config.Router{},
|
||||
Middlewares: map[string]*config.Middleware{},
|
||||
Services: map[string]*config.Service{},
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
desc: "tcp with label and port and http service",
|
||||
containers: []rancherData{
|
||||
{
|
||||
Name: "Test",
|
||||
Labels: map[string]string{
|
||||
"traefik.tcp.routers.foo.rule": "HostSNI(`foo.bar`)",
|
||||
"traefik.tcp.routers.foo.tls": "true",
|
||||
"traefik.tcp.services.foo.loadbalancer.server.port": "8080",
|
||||
"traefik.http.services.Service1.loadbalancer.method": "drr",
|
||||
},
|
||||
Port: "80/tcp",
|
||||
Containers: []string{"127.0.0.1", "127.0.0.2"},
|
||||
Health: "",
|
||||
State: "",
|
||||
},
|
||||
},
|
||||
expected: &config.Configuration{
|
||||
TCP: &config.TCPConfiguration{
|
||||
Routers: map[string]*config.TCPRouter{
|
||||
"foo": {
|
||||
Service: "foo",
|
||||
Rule: "HostSNI(`foo.bar`)",
|
||||
TLS: &config.RouterTCPTLSConfig{},
|
||||
},
|
||||
},
|
||||
Services: map[string]*config.TCPService{
|
||||
"foo": {
|
||||
LoadBalancer: &config.TCPLoadBalancerService{
|
||||
Servers: []config.TCPServer{
|
||||
{
|
||||
Address: "127.0.0.1:8080",
|
||||
Weight: 1,
|
||||
},
|
||||
{
|
||||
Address: "127.0.0.2:8080",
|
||||
Weight: 1,
|
||||
},
|
||||
},
|
||||
Method: "wrr",
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
HTTP: &config.HTTPConfiguration{
|
||||
Routers: map[string]*config.Router{
|
||||
"Test": {
|
||||
Service: "Service1",
|
||||
Rule: "Host(`Test.traefik.wtf`)",
|
||||
},
|
||||
},
|
||||
Middlewares: map[string]*config.Middleware{},
|
||||
Services: map[string]*config.Service{
|
||||
"Service1": {
|
||||
LoadBalancer: &config.LoadBalancerService{
|
||||
Servers: []config.Server{
|
||||
{
|
||||
URL: "http://127.0.0.1:80",
|
||||
Weight: 1,
|
||||
},
|
||||
{
|
||||
URL: "http://127.0.0.2:80",
|
||||
Weight: 1,
|
||||
},
|
||||
},
|
||||
Method: "drr",
|
||||
PassHostHeader: true,
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
desc: "tcp with label for tcp service",
|
||||
containers: []rancherData{
|
||||
{
|
||||
Name: "Test",
|
||||
Labels: map[string]string{
|
||||
"traefik.tcp.services.foo.loadbalancer.server.port": "8080",
|
||||
},
|
||||
Port: "80/tcp",
|
||||
Containers: []string{"127.0.0.1"},
|
||||
Health: "",
|
||||
State: "",
|
||||
},
|
||||
},
|
||||
expected: &config.Configuration{
|
||||
TCP: &config.TCPConfiguration{
|
||||
Routers: map[string]*config.TCPRouter{},
|
||||
Services: map[string]*config.TCPService{
|
||||
"foo": {
|
||||
LoadBalancer: &config.TCPLoadBalancerService{
|
||||
Servers: []config.TCPServer{
|
||||
{
|
||||
Address: "127.0.0.1:8080",
|
||||
Weight: 1,
|
||||
},
|
||||
},
|
||||
Method: "wrr",
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
HTTP: &config.HTTPConfiguration{
|
||||
Routers: map[string]*config.Router{},
|
||||
Middlewares: map[string]*config.Middleware{},
|
||||
Services: map[string]*config.Service{},
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
for _, test := range testCases {
|
||||
test := test
|
||||
|
||||
t.Run(test.desc, func(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
p := Provider{
|
||||
ExposedByDefault: true,
|
||||
DefaultRule: "Host(`{{ normalize .Name }}.traefik.wtf`)",
|
||||
EnableServiceHealthFilter: true,
|
||||
}
|
||||
|
||||
p.Constraints = test.constraints
|
||||
|
||||
err := p.Init()
|
||||
require.NoError(t, err)
|
||||
|
||||
for i := 0; i < len(test.containers); i++ {
|
||||
var err error
|
||||
test.containers[i].ExtraConf, err = p.getConfiguration(test.containers[i])
|
||||
require.NoError(t, err)
|
||||
}
|
||||
|
||||
configuration := p.buildConfiguration(context.Background(), test.containers)
|
||||
|
||||
assert.Equal(t, test.expected, configuration)
|
||||
})
|
||||
}
|
||||
}
|
23
pkg/provider/rancher/label.go
Normal file
23
pkg/provider/rancher/label.go
Normal file
|
@ -0,0 +1,23 @@
|
|||
package rancher
|
||||
|
||||
import (
|
||||
"github.com/containous/traefik/pkg/provider/label"
|
||||
)
|
||||
|
||||
type configuration struct {
|
||||
Enable bool
|
||||
Tags []string
|
||||
}
|
||||
|
||||
func (p *Provider) getConfiguration(service rancherData) (configuration, error) {
|
||||
conf := configuration{
|
||||
Enable: p.ExposedByDefault,
|
||||
}
|
||||
|
||||
err := label.Decode(service.Labels, &conf, "traefik.rancher.", "traefik.enable", "traefik.tags")
|
||||
if err != nil {
|
||||
return configuration{}, err
|
||||
}
|
||||
|
||||
return conf, nil
|
||||
}
|
221
pkg/provider/rancher/rancher.go
Normal file
221
pkg/provider/rancher/rancher.go
Normal file
|
@ -0,0 +1,221 @@
|
|||
package rancher
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"text/template"
|
||||
"time"
|
||||
|
||||
"github.com/cenkalti/backoff"
|
||||
"github.com/containous/traefik/pkg/config"
|
||||
"github.com/containous/traefik/pkg/job"
|
||||
"github.com/containous/traefik/pkg/log"
|
||||
"github.com/containous/traefik/pkg/provider"
|
||||
"github.com/containous/traefik/pkg/safe"
|
||||
rancher "github.com/rancher/go-rancher-metadata/metadata"
|
||||
)
|
||||
|
||||
const (
|
||||
// DefaultTemplateRule The default template for the default rule.
|
||||
DefaultTemplateRule = "Host(`{{ normalize .Name }}`)"
|
||||
)
|
||||
|
||||
// Health
|
||||
const (
|
||||
healthy = "healthy"
|
||||
updatingHealthy = "updating-healthy"
|
||||
)
|
||||
|
||||
// State
|
||||
const (
|
||||
active = "active"
|
||||
running = "running"
|
||||
upgraded = "upgraded"
|
||||
upgrading = "upgrading"
|
||||
updatingActive = "updating-active"
|
||||
updatingRunning = "updating-running"
|
||||
)
|
||||
|
||||
var _ provider.Provider = (*Provider)(nil)
|
||||
|
||||
// Provider holds configurations of the provider.
|
||||
type Provider struct {
|
||||
provider.Constrainer `mapstructure:",squash" export:"true"`
|
||||
Watch bool `description:"Watch provider" export:"true"`
|
||||
DefaultRule string `description:"Default rule"`
|
||||
ExposedByDefault bool `description:"Expose containers by default" export:"true"`
|
||||
EnableServiceHealthFilter bool
|
||||
RefreshSeconds int
|
||||
defaultRuleTpl *template.Template
|
||||
IntervalPoll bool `description:"Poll the Rancher metadata service every 'rancher.refreshseconds' (less accurate)"`
|
||||
Prefix string `description:"Prefix used for accessing the Rancher metadata service"`
|
||||
}
|
||||
|
||||
type rancherData struct {
|
||||
Name string
|
||||
Labels map[string]string
|
||||
Containers []string
|
||||
Health string
|
||||
State string
|
||||
Port string
|
||||
ExtraConf configuration
|
||||
}
|
||||
|
||||
// Init the provider.
|
||||
func (p *Provider) Init() error {
|
||||
defaultRuleTpl, err := provider.MakeDefaultRuleTemplate(p.DefaultRule, nil)
|
||||
if err != nil {
|
||||
return fmt.Errorf("error while parsing default rule: %v", err)
|
||||
}
|
||||
|
||||
p.defaultRuleTpl = defaultRuleTpl
|
||||
return nil
|
||||
}
|
||||
|
||||
func (p *Provider) createClient(ctx context.Context) (rancher.Client, error) {
|
||||
metadataServiceURL := fmt.Sprintf("http://rancher-metadata.rancher.internal/%s", p.Prefix)
|
||||
client, err := rancher.NewClientAndWait(metadataServiceURL)
|
||||
if err != nil {
|
||||
log.FromContext(ctx).Errorf("Failed to create Rancher metadata service client: %v", err)
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return client, nil
|
||||
}
|
||||
|
||||
// Provide allows the rancher provider to provide configurations to traefik using the given configuration channel.
|
||||
func (p *Provider) Provide(configurationChan chan<- config.Message, pool *safe.Pool) error {
|
||||
pool.GoCtx(func(routineCtx context.Context) {
|
||||
ctxLog := log.With(routineCtx, log.Str(log.ProviderName, "rancher"))
|
||||
logger := log.FromContext(ctxLog)
|
||||
|
||||
operation := func() error {
|
||||
client, err := p.createClient(ctxLog)
|
||||
if err != nil {
|
||||
logger.Errorf("Failed to create the metadata client metadata service: %v", err)
|
||||
return err
|
||||
}
|
||||
|
||||
updateConfiguration := func(_ string) {
|
||||
stacks, err := client.GetStacks()
|
||||
if err != nil {
|
||||
logger.Errorf("Failed to query Rancher metadata service: %v", err)
|
||||
return
|
||||
}
|
||||
|
||||
rancherData := p.parseMetadataSourcedRancherData(ctxLog, stacks)
|
||||
|
||||
logger.Printf("Received Rancher data %+v", rancherData)
|
||||
|
||||
configuration := p.buildConfiguration(ctxLog, rancherData)
|
||||
configurationChan <- config.Message{
|
||||
ProviderName: "rancher",
|
||||
Configuration: configuration,
|
||||
}
|
||||
}
|
||||
updateConfiguration("init")
|
||||
|
||||
if p.Watch {
|
||||
if p.IntervalPoll {
|
||||
p.intervalPoll(ctxLog, client, updateConfiguration)
|
||||
} else {
|
||||
// Long polling should be favored for the most accurate configuration updates.
|
||||
// Holds the connection until there is either a change in the metadata repository or `p.RefreshSeconds` has elapsed.
|
||||
client.OnChangeCtx(ctxLog, p.RefreshSeconds, updateConfiguration)
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
notify := func(err error, time time.Duration) {
|
||||
logger.Errorf("Provider connection error %+v, retrying in %s", err, time)
|
||||
}
|
||||
err := backoff.RetryNotify(safe.OperationWithRecover(operation), backoff.WithContext(job.NewBackOff(backoff.NewExponentialBackOff()), ctxLog), notify)
|
||||
if err != nil {
|
||||
logger.Errorf("Cannot connect to Provider server: %+v", err)
|
||||
}
|
||||
})
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (p *Provider) intervalPoll(ctx context.Context, client rancher.Client, updateConfiguration func(string)) {
|
||||
ticker := time.NewTicker(time.Second * time.Duration(p.RefreshSeconds))
|
||||
defer ticker.Stop()
|
||||
|
||||
var version string
|
||||
for {
|
||||
select {
|
||||
case <-ticker.C:
|
||||
newVersion, err := client.GetVersion()
|
||||
if err != nil {
|
||||
log.FromContext(ctx).Errorf("Failed to create Rancher metadata service client: %v", err)
|
||||
} else if version != newVersion {
|
||||
version = newVersion
|
||||
updateConfiguration(version)
|
||||
}
|
||||
case <-ctx.Done():
|
||||
return
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (p *Provider) parseMetadataSourcedRancherData(ctx context.Context, stacks []rancher.Stack) (rancherDataList []rancherData) {
|
||||
for _, stack := range stacks {
|
||||
for _, service := range stack.Services {
|
||||
ctxSvc := log.With(ctx, log.Str("stack", stack.Name), log.Str("service", service.Name))
|
||||
logger := log.FromContext(ctxSvc)
|
||||
|
||||
servicePort := ""
|
||||
if len(service.Ports) > 0 {
|
||||
servicePort = service.Ports[0]
|
||||
}
|
||||
for _, port := range service.Ports {
|
||||
logger.Debugf("Set Port %s", port)
|
||||
}
|
||||
|
||||
var containerIPAddresses []string
|
||||
for _, container := range service.Containers {
|
||||
if containerFilter(ctxSvc, container.Name, container.HealthState, container.State) {
|
||||
containerIPAddresses = append(containerIPAddresses, container.PrimaryIp)
|
||||
}
|
||||
}
|
||||
|
||||
service := rancherData{
|
||||
Name: service.Name + "/" + stack.Name,
|
||||
State: service.State,
|
||||
Labels: service.Labels,
|
||||
Port: servicePort,
|
||||
Containers: containerIPAddresses,
|
||||
}
|
||||
|
||||
extraConf, err := p.getConfiguration(service)
|
||||
if err != nil {
|
||||
logger.Errorf("Skip container %s: %v", service.Name, err)
|
||||
continue
|
||||
}
|
||||
|
||||
service.ExtraConf = extraConf
|
||||
|
||||
rancherDataList = append(rancherDataList, service)
|
||||
}
|
||||
}
|
||||
return rancherDataList
|
||||
}
|
||||
|
||||
func containerFilter(ctx context.Context, name, healthState, state string) bool {
|
||||
logger := log.FromContext(ctx)
|
||||
|
||||
if healthState != "" && healthState != healthy && healthState != updatingHealthy {
|
||||
logger.Debugf("Filtering container %s with healthState of %s", name, healthState)
|
||||
return false
|
||||
}
|
||||
|
||||
if state != "" && state != running && state != updatingRunning && state != upgraded {
|
||||
logger.Debugf("Filtering container %s with state of %s", name, state)
|
||||
return false
|
||||
}
|
||||
|
||||
return true
|
||||
}
|
106
vendor/github.com/rancher/go-rancher-metadata/metadata/change.go
generated
vendored
Normal file
106
vendor/github.com/rancher/go-rancher-metadata/metadata/change.go
generated
vendored
Normal file
|
@ -0,0 +1,106 @@
|
|||
package metadata
|
||||
|
||||
import (
|
||||
"context"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"time"
|
||||
|
||||
"github.com/sirupsen/logrus"
|
||||
)
|
||||
|
||||
type timeout interface {
|
||||
Timeout() bool
|
||||
}
|
||||
|
||||
func (m *client) OnChangeWithError(intervalSeconds int, do func(string)) error {
|
||||
return m.onChangeFromVersionWithError("init", intervalSeconds, do)
|
||||
}
|
||||
|
||||
func (m *client) OnChange(intervalSeconds int, do func(string)) {
|
||||
version := "init"
|
||||
updateVersionAndDo := func(v string) {
|
||||
version = v
|
||||
do(version)
|
||||
}
|
||||
interval := time.Duration(intervalSeconds)
|
||||
for {
|
||||
if err := m.onChangeFromVersionWithError(version, intervalSeconds, updateVersionAndDo); err != nil {
|
||||
logrus.Errorf("Error reading metadata version: %v", err)
|
||||
}
|
||||
time.Sleep(interval * time.Second)
|
||||
}
|
||||
}
|
||||
|
||||
func (m *client) onChangeFromVersionWithError(version string, intervalSeconds int, do func(string)) error {
|
||||
for {
|
||||
newVersion, err := m.waitVersion(intervalSeconds, version)
|
||||
if err != nil {
|
||||
return err
|
||||
} else if version == newVersion {
|
||||
logrus.Debug("No changes in metadata version")
|
||||
} else {
|
||||
logrus.Debugf("Metadata Version has been changed. Old version: %s. New version: %s.", version, newVersion)
|
||||
version = newVersion
|
||||
do(newVersion)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (m *client) waitVersion(maxWait int, version string) (string, error) {
|
||||
for {
|
||||
resp, err := m.SendRequest(fmt.Sprintf("/version?wait=true&value=%s&maxWait=%d", version, maxWait))
|
||||
if err != nil {
|
||||
t, ok := err.(timeout)
|
||||
if ok && t.Timeout() {
|
||||
continue
|
||||
}
|
||||
return "", err
|
||||
}
|
||||
|
||||
err = json.Unmarshal(resp, &version)
|
||||
return version, err
|
||||
}
|
||||
}
|
||||
|
||||
func (m *client) OnChangeCtx(ctx context.Context, intervalSeconds int, do func(string)) {
|
||||
m.onChangeFromVersionWithErrorCtx(ctx, "init", intervalSeconds, do)
|
||||
}
|
||||
|
||||
func (m *client) onChangeFromVersionWithErrorCtx(ctx context.Context, version string, intervalSeconds int, do func(string)) {
|
||||
for {
|
||||
select {
|
||||
case <-ctx.Done():
|
||||
return
|
||||
default:
|
||||
}
|
||||
|
||||
newVersion, err := m.waitVersionCtx(ctx, intervalSeconds, version)
|
||||
if err != nil {
|
||||
t, ok := err.(timeout)
|
||||
if !ok || !t.Timeout() {
|
||||
logrus.Errorf("Error reading metadata version: %v", err)
|
||||
time.Sleep(time.Duration(intervalSeconds) * time.Second)
|
||||
}
|
||||
continue
|
||||
}
|
||||
|
||||
if version == newVersion {
|
||||
logrus.Debug("No changes in metadata version")
|
||||
} else {
|
||||
logrus.Debugf("Metadata Version has been changed. Old version: %s. New version: %s.", version, newVersion)
|
||||
version = newVersion
|
||||
do(newVersion)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (m *client) waitVersionCtx(ctx context.Context, maxWait int, version string) (string, error) {
|
||||
resp, err := m.SendRequestCtx(ctx, fmt.Sprintf("/version?wait=true&value=%s&maxWait=%d", version, maxWait))
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
|
||||
err = json.Unmarshal(resp, &version)
|
||||
return version, err
|
||||
}
|
312
vendor/github.com/rancher/go-rancher-metadata/metadata/metadata.go
generated
vendored
Normal file
312
vendor/github.com/rancher/go-rancher-metadata/metadata/metadata.go
generated
vendored
Normal file
|
@ -0,0 +1,312 @@
|
|||
package metadata
|
||||
|
||||
import (
|
||||
"context"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
"net/http"
|
||||
"time"
|
||||
)
|
||||
|
||||
type Client interface {
|
||||
OnChangeWithError(int, func(string)) error
|
||||
OnChange(int, func(string))
|
||||
OnChangeCtx(context.Context, int, func(string))
|
||||
SendRequest(string) ([]byte, error)
|
||||
GetVersion() (string, error)
|
||||
GetSelfHost() (Host, error)
|
||||
GetSelfContainer() (Container, error)
|
||||
GetSelfServiceByName(string) (Service, error)
|
||||
GetSelfService() (Service, error)
|
||||
GetSelfStack() (Stack, error)
|
||||
GetServices() ([]Service, error)
|
||||
GetStacks() ([]Stack, error)
|
||||
GetStackByName(string) (Stack, error)
|
||||
GetContainers() ([]Container, error)
|
||||
GetServiceContainers(string, string) ([]Container, error)
|
||||
GetHosts() ([]Host, error)
|
||||
GetHost(string) (Host, error)
|
||||
GetNetworks() ([]Network, error)
|
||||
}
|
||||
|
||||
type client struct {
|
||||
url string
|
||||
ip string
|
||||
client *http.Client
|
||||
}
|
||||
|
||||
func newClient(url, ip string) *client {
|
||||
return &client{url, ip, &http.Client{Timeout: 10 * time.Second}}
|
||||
}
|
||||
|
||||
func NewClient(url string) Client {
|
||||
ip := ""
|
||||
return newClient(url, ip)
|
||||
}
|
||||
|
||||
func NewClientWithIPAndWait(url, ip string) (Client, error) {
|
||||
client := newClient(url, ip)
|
||||
|
||||
if err := testConnection(client); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return client, nil
|
||||
}
|
||||
|
||||
func NewClientAndWait(url string) (Client, error) {
|
||||
ip := ""
|
||||
client := newClient(url, ip)
|
||||
|
||||
if err := testConnection(client); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return client, nil
|
||||
}
|
||||
|
||||
func (m *client) SendRequest(path string) ([]byte, error) {
|
||||
req, err := http.NewRequest("GET", m.url+path, nil)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
req.Header.Add("Accept", "application/json")
|
||||
if m.ip != "" {
|
||||
req.Header.Add("X-Forwarded-For", m.ip)
|
||||
}
|
||||
|
||||
resp, err := m.client.Do(req)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
defer resp.Body.Close()
|
||||
|
||||
if resp.StatusCode != 200 {
|
||||
return nil, fmt.Errorf("Error %v accessing %v path", resp.StatusCode, path)
|
||||
}
|
||||
|
||||
body, err := ioutil.ReadAll(resp.Body)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return body, nil
|
||||
}
|
||||
|
||||
func (m *client) SendRequestCtx(ctx context.Context, path string) ([]byte, error) {
|
||||
req, err := http.NewRequest("GET", m.url+path, nil)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
req.Header.Add("Accept", "application/json")
|
||||
if m.ip != "" {
|
||||
req.Header.Add("X-Forwarded-For", m.ip)
|
||||
}
|
||||
|
||||
resp, err := m.client.Do(req.WithContext(ctx))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
defer resp.Body.Close()
|
||||
|
||||
if resp.StatusCode != 200 {
|
||||
return nil, fmt.Errorf("error %v accessing %v path", resp.StatusCode, path)
|
||||
}
|
||||
|
||||
body, err := ioutil.ReadAll(resp.Body)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return body, nil
|
||||
}
|
||||
|
||||
func (m *client) GetVersion() (string, error) {
|
||||
resp, err := m.SendRequest("/version")
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
return string(resp[:]), nil
|
||||
}
|
||||
|
||||
func (m *client) GetSelfHost() (Host, error) {
|
||||
resp, err := m.SendRequest("/self/host")
|
||||
var host Host
|
||||
if err != nil {
|
||||
return host, err
|
||||
}
|
||||
|
||||
if err = json.Unmarshal(resp, &host); err != nil {
|
||||
return host, err
|
||||
}
|
||||
|
||||
return host, nil
|
||||
}
|
||||
|
||||
func (m *client) GetSelfContainer() (Container, error) {
|
||||
resp, err := m.SendRequest("/self/container")
|
||||
var container Container
|
||||
if err != nil {
|
||||
return container, err
|
||||
}
|
||||
|
||||
if err = json.Unmarshal(resp, &container); err != nil {
|
||||
return container, err
|
||||
}
|
||||
|
||||
return container, nil
|
||||
}
|
||||
|
||||
func (m *client) GetSelfServiceByName(name string) (Service, error) {
|
||||
resp, err := m.SendRequest("/self/stack/services/" + name)
|
||||
var service Service
|
||||
if err != nil {
|
||||
return service, err
|
||||
}
|
||||
|
||||
if err = json.Unmarshal(resp, &service); err != nil {
|
||||
return service, err
|
||||
}
|
||||
|
||||
return service, nil
|
||||
}
|
||||
|
||||
func (m *client) GetSelfService() (Service, error) {
|
||||
resp, err := m.SendRequest("/self/service")
|
||||
var service Service
|
||||
if err != nil {
|
||||
return service, err
|
||||
}
|
||||
|
||||
if err = json.Unmarshal(resp, &service); err != nil {
|
||||
return service, err
|
||||
}
|
||||
|
||||
return service, nil
|
||||
}
|
||||
|
||||
func (m *client) GetSelfStack() (Stack, error) {
|
||||
resp, err := m.SendRequest("/self/stack")
|
||||
var stack Stack
|
||||
if err != nil {
|
||||
return stack, err
|
||||
}
|
||||
|
||||
if err = json.Unmarshal(resp, &stack); err != nil {
|
||||
return stack, err
|
||||
}
|
||||
|
||||
return stack, nil
|
||||
}
|
||||
|
||||
func (m *client) GetServices() ([]Service, error) {
|
||||
resp, err := m.SendRequest("/services")
|
||||
var services []Service
|
||||
if err != nil {
|
||||
return services, err
|
||||
}
|
||||
|
||||
if err = json.Unmarshal(resp, &services); err != nil {
|
||||
return services, err
|
||||
}
|
||||
return services, nil
|
||||
}
|
||||
|
||||
func (m *client) GetStacks() ([]Stack, error) {
|
||||
resp, err := m.SendRequest("/stacks")
|
||||
var stacks []Stack
|
||||
if err != nil {
|
||||
return stacks, err
|
||||
}
|
||||
|
||||
if err = json.Unmarshal(resp, &stacks); err != nil {
|
||||
return stacks, err
|
||||
}
|
||||
return stacks, nil
|
||||
}
|
||||
|
||||
func (m *client) GetStackByName(name string) (Stack, error) {
|
||||
resp, err := m.SendRequest("/stacks/" + name)
|
||||
var stack Stack
|
||||
if err != nil {
|
||||
return stack, err
|
||||
}
|
||||
|
||||
if err = json.Unmarshal(resp, &stack); err != nil {
|
||||
return stack, err
|
||||
}
|
||||
|
||||
return stack, nil
|
||||
}
|
||||
|
||||
func (m *client) GetContainers() ([]Container, error) {
|
||||
resp, err := m.SendRequest("/containers")
|
||||
var containers []Container
|
||||
if err != nil {
|
||||
return containers, err
|
||||
}
|
||||
|
||||
if err = json.Unmarshal(resp, &containers); err != nil {
|
||||
return containers, err
|
||||
}
|
||||
return containers, nil
|
||||
}
|
||||
|
||||
func (m *client) GetServiceContainers(serviceName string, stackName string) ([]Container, error) {
|
||||
var serviceContainers = []Container{}
|
||||
containers, err := m.GetContainers()
|
||||
if err != nil {
|
||||
return serviceContainers, err
|
||||
}
|
||||
|
||||
for _, container := range containers {
|
||||
if container.StackName == stackName && container.ServiceName == serviceName {
|
||||
serviceContainers = append(serviceContainers, container)
|
||||
}
|
||||
}
|
||||
|
||||
return serviceContainers, nil
|
||||
}
|
||||
|
||||
func (m *client) GetHosts() ([]Host, error) {
|
||||
resp, err := m.SendRequest("/hosts")
|
||||
var hosts []Host
|
||||
if err != nil {
|
||||
return hosts, err
|
||||
}
|
||||
|
||||
if err = json.Unmarshal(resp, &hosts); err != nil {
|
||||
return hosts, err
|
||||
}
|
||||
return hosts, nil
|
||||
}
|
||||
|
||||
func (m *client) GetHost(UUID string) (Host, error) {
|
||||
var host Host
|
||||
hosts, err := m.GetHosts()
|
||||
if err != nil {
|
||||
return host, err
|
||||
}
|
||||
for _, host := range hosts {
|
||||
if host.UUID == UUID {
|
||||
return host, nil
|
||||
}
|
||||
}
|
||||
|
||||
return host, fmt.Errorf("could not find host by UUID %v", UUID)
|
||||
}
|
||||
|
||||
func (m *client) GetNetworks() ([]Network, error) {
|
||||
resp, err := m.SendRequest("/networks")
|
||||
var networks []Network
|
||||
if err != nil {
|
||||
return networks, err
|
||||
}
|
||||
|
||||
if err = json.Unmarshal(resp, &networks); err != nil {
|
||||
return networks, err
|
||||
}
|
||||
|
||||
return networks, nil
|
||||
}
|
156
vendor/github.com/rancher/go-rancher-metadata/metadata/types.go
generated
vendored
Normal file
156
vendor/github.com/rancher/go-rancher-metadata/metadata/types.go
generated
vendored
Normal file
|
@ -0,0 +1,156 @@
|
|||
package metadata
|
||||
|
||||
type Stack struct {
|
||||
EnvironmentName string `json:"environment_name"`
|
||||
EnvironmentUUID string `json:"environment_uuid"`
|
||||
Name string `json:"name"`
|
||||
UUID string `json:"uuid"`
|
||||
Services []Service `json:"services"`
|
||||
System bool `json:"system"`
|
||||
}
|
||||
|
||||
type HealthCheck struct {
|
||||
HealthyThreshold int `json:"healthy_threshold"`
|
||||
Interval int `json:"interval"`
|
||||
Port int `json:"port"`
|
||||
RequestLine string `json:"request_line"`
|
||||
ResponseTimeout int `json:"response_timeout"`
|
||||
UnhealthyThreshold int `json:"unhealthy_threshold"`
|
||||
}
|
||||
|
||||
type Service struct {
|
||||
Scale int `json:"scale"`
|
||||
Name string `json:"name"`
|
||||
StackName string `json:"stack_name"`
|
||||
StackUUID string `json:"stack_uuid"`
|
||||
Kind string `json:"kind"`
|
||||
Hostname string `json:"hostname"`
|
||||
Vip string `json:"vip"`
|
||||
CreateIndex int `json:"create_index"`
|
||||
UUID string `json:"uuid"`
|
||||
ExternalIps []string `json:"external_ips"`
|
||||
Sidekicks []string `json:"sidekicks"`
|
||||
Containers []Container `json:"containers"`
|
||||
Ports []string `json:"ports"`
|
||||
Labels map[string]string `json:"labels"`
|
||||
Links map[string]string `json:"links"`
|
||||
Metadata map[string]interface{} `json:"metadata"`
|
||||
Token string `json:"token"`
|
||||
Fqdn string `json:"fqdn"`
|
||||
HealthCheck HealthCheck `json:"health_check"`
|
||||
PrimaryServiceName string `json:"primary_service_name"`
|
||||
LBConfig LBConfig `json:"lb_config"`
|
||||
EnvironmentUUID string `json:"environment_uuid"`
|
||||
State string `json:"state"`
|
||||
System bool `json:"system"`
|
||||
EnvironmentName string `json:"environment_name"`
|
||||
Selector string `json:"selector"`
|
||||
}
|
||||
|
||||
type Container struct {
|
||||
Name string `json:"name"`
|
||||
PrimaryIp string `json:"primary_ip"`
|
||||
PrimaryMacAddress string `json:"primary_mac_address"`
|
||||
Ips []string `json:"ips"`
|
||||
Ports []string `json:"ports"`
|
||||
ServiceName string `json:"service_name"`
|
||||
ServiceIndex string `json:"service_index"`
|
||||
StackName string `json:"stack_name"`
|
||||
StackUUID string `json:"stack_uuid"`
|
||||
Labels map[string]string `json:"labels"`
|
||||
CreateIndex int `json:"create_index"`
|
||||
HostUUID string `json:"host_uuid"`
|
||||
UUID string `json:"uuid"`
|
||||
State string `json:"state"`
|
||||
HealthState string `json:"health_state"`
|
||||
ExternalId string `json:"external_id"`
|
||||
StartCount int `json:"start_count"`
|
||||
MemoryReservation int64 `json:"memory_reservation"`
|
||||
MilliCPUReservation int64 `json:"milli_cpu_reservation"`
|
||||
Dns []string `json:"dns"`
|
||||
DnsSearch []string `json:"dns_search"`
|
||||
HealthCheckHosts []string `json:"health_check_hosts"`
|
||||
NetworkFromContainerUUID string `json:"network_from_container_uuid"`
|
||||
NetworkUUID string `json:"network_uuid"`
|
||||
Links map[string]string `json:"links"`
|
||||
System bool `json:"system"`
|
||||
EnvironmentUUID string `json:"environment_uuid"`
|
||||
HealthCheck HealthCheck `json:"health_check"`
|
||||
EnvironmentName string `json:"environment_name"`
|
||||
ServiceUUID string `json:"service_uuid"`
|
||||
}
|
||||
|
||||
type Network struct {
|
||||
Name string `json:"name"`
|
||||
UUID string `json:"uuid"`
|
||||
EnvironmentUUID string `json:"environment_uuid"`
|
||||
Metadata map[string]interface{} `json:"metadata"`
|
||||
HostPorts bool `json:"host_ports"`
|
||||
Default bool `json:"is_default"`
|
||||
Policy []NetworkPolicyRule `json:"policy,omitempty"`
|
||||
DefaultPolicyAction string `json:"default_policy_action"`
|
||||
}
|
||||
|
||||
type Host struct {
|
||||
Name string `json:"name"`
|
||||
AgentIP string `json:"agent_ip"`
|
||||
HostId int `json:"host_id"`
|
||||
Labels map[string]string `json:"labels"`
|
||||
UUID string `json:"uuid"`
|
||||
Hostname string `json:"hostname"`
|
||||
Memory int64 `json:"memory"`
|
||||
MilliCPU int64 `json:"milli_cpu"`
|
||||
LocalStorageMb int64 `json:"local_storage_mb"`
|
||||
EnvironmentUUID string `json:"environment_uuid"`
|
||||
State string `json:"state"`
|
||||
}
|
||||
|
||||
type PortRule struct {
|
||||
SourcePort int `json:"source_port"`
|
||||
Protocol string `json:"protocol"`
|
||||
Path string `json:"path"`
|
||||
Hostname string `json:"hostname"`
|
||||
Service string `json:"service"`
|
||||
TargetPort int `json:"target_port"`
|
||||
Priority int `json:"priority"`
|
||||
BackendName string `json:"backend_name"`
|
||||
Selector string `json:"selector"`
|
||||
Container string `json:"container"`
|
||||
ContainerUUID string `json:"container_uuid"`
|
||||
}
|
||||
|
||||
type LBConfig struct {
|
||||
CertificateIDs []string `json:"certificate_ids"`
|
||||
DefaultCertificateID string `json:"default_certificate_id"`
|
||||
PortRules []PortRule `json:"port_rules"`
|
||||
Config string `json:"config"`
|
||||
StickinessPolicy LBStickinessPolicy `json:"stickiness_policy"`
|
||||
}
|
||||
|
||||
type LBStickinessPolicy struct {
|
||||
Name string `json:"name"`
|
||||
Cookie string `json:"cookie"`
|
||||
Domain string `json:"domain"`
|
||||
Indirect bool `json:"indirect"`
|
||||
Nocache bool `json:"nocache"`
|
||||
Postonly bool `json:"postonly"`
|
||||
Mode string `json:"mode"`
|
||||
}
|
||||
|
||||
type NetworkPolicyRuleBetween struct {
|
||||
Selector string `yaml:"selector,omitempty"`
|
||||
GroupBy string `yaml:"groupBy,omitempty"`
|
||||
}
|
||||
|
||||
type NetworkPolicyRuleMember struct {
|
||||
Selector string `yaml:"selector,omitempty"`
|
||||
}
|
||||
|
||||
type NetworkPolicyRule struct {
|
||||
From *NetworkPolicyRuleMember `yaml:"from"`
|
||||
To *NetworkPolicyRuleMember `yaml:"to"`
|
||||
Ports []string `yaml:"ports"`
|
||||
Within string `yaml:"within"`
|
||||
Between *NetworkPolicyRuleBetween `yaml:"between"`
|
||||
Action string `yaml:"action"`
|
||||
}
|
19
vendor/github.com/rancher/go-rancher-metadata/metadata/utils.go
generated
vendored
Normal file
19
vendor/github.com/rancher/go-rancher-metadata/metadata/utils.go
generated
vendored
Normal file
|
@ -0,0 +1,19 @@
|
|||
package metadata
|
||||
|
||||
import (
|
||||
"time"
|
||||
)
|
||||
|
||||
func testConnection(mdClient Client) error {
|
||||
var err error
|
||||
maxTime := 20 * time.Second
|
||||
|
||||
for i := 1 * time.Second; i < maxTime; i *= time.Duration(2) {
|
||||
if _, err = mdClient.GetVersion(); err != nil {
|
||||
time.Sleep(i)
|
||||
} else {
|
||||
return nil
|
||||
}
|
||||
}
|
||||
return err
|
||||
}
|
18
vendor/github.com/sirupsen/logrus/alt_exit.go
generated
vendored
18
vendor/github.com/sirupsen/logrus/alt_exit.go
generated
vendored
|
@ -51,9 +51,9 @@ func Exit(code int) {
|
|||
os.Exit(code)
|
||||
}
|
||||
|
||||
// RegisterExitHandler adds a Logrus Exit handler, call logrus.Exit to invoke
|
||||
// all handlers. The handlers will also be invoked when any Fatal log entry is
|
||||
// made.
|
||||
// RegisterExitHandler appends a Logrus Exit handler to the list of handlers,
|
||||
// call logrus.Exit to invoke all handlers. The handlers will also be invoked when
|
||||
// any Fatal log entry is made.
|
||||
//
|
||||
// This method is useful when a caller wishes to use logrus to log a fatal
|
||||
// message but also needs to gracefully shutdown. An example usecase could be
|
||||
|
@ -62,3 +62,15 @@ func Exit(code int) {
|
|||
func RegisterExitHandler(handler func()) {
|
||||
handlers = append(handlers, handler)
|
||||
}
|
||||
|
||||
// DeferExitHandler prepends a Logrus Exit handler to the list of handlers,
|
||||
// call logrus.Exit to invoke all handlers. The handlers will also be invoked when
|
||||
// any Fatal log entry is made.
|
||||
//
|
||||
// This method is useful when a caller wishes to use logrus to log a fatal
|
||||
// message but also needs to gracefully shutdown. An example usecase could be
|
||||
// closing database connections, or sending a alert that the application is
|
||||
// closing.
|
||||
func DeferExitHandler(handler func()) {
|
||||
handlers = append([]func(){handler}, handlers...)
|
||||
}
|
||||
|
|
242
vendor/github.com/sirupsen/logrus/entry.go
generated
vendored
242
vendor/github.com/sirupsen/logrus/entry.go
generated
vendored
|
@ -2,13 +2,33 @@ package logrus
|
|||
|
||||
import (
|
||||
"bytes"
|
||||
"context"
|
||||
"fmt"
|
||||
"os"
|
||||
"reflect"
|
||||
"runtime"
|
||||
"strings"
|
||||
"sync"
|
||||
"time"
|
||||
)
|
||||
|
||||
var bufferPool *sync.Pool
|
||||
var (
|
||||
bufferPool *sync.Pool
|
||||
|
||||
// qualified package name, cached at first use
|
||||
logrusPackage string
|
||||
|
||||
// Positions in the call stack when tracing to report the calling method
|
||||
minimumCallerDepth int
|
||||
|
||||
// Used for caller information initialisation
|
||||
callerInitOnce sync.Once
|
||||
)
|
||||
|
||||
const (
|
||||
maximumCallerDepth int = 25
|
||||
knownLogrusFrames int = 4
|
||||
)
|
||||
|
||||
func init() {
|
||||
bufferPool = &sync.Pool{
|
||||
|
@ -16,15 +36,18 @@ func init() {
|
|||
return new(bytes.Buffer)
|
||||
},
|
||||
}
|
||||
|
||||
// start at the bottom of the stack before the package-name cache is primed
|
||||
minimumCallerDepth = 1
|
||||
}
|
||||
|
||||
// Defines the key when adding errors using WithError.
|
||||
var ErrorKey = "error"
|
||||
|
||||
// An entry is the final or intermediate Logrus logging entry. It contains all
|
||||
// the fields passed with WithField{,s}. It's finally logged when Debug, Info,
|
||||
// Warn, Error, Fatal or Panic is called on it. These objects can be reused and
|
||||
// passed around as much as you wish to avoid field duplication.
|
||||
// the fields passed with WithField{,s}. It's finally logged when Trace, Debug,
|
||||
// Info, Warn, Error, Fatal or Panic is called on it. These objects can be
|
||||
// reused and passed around as much as you wish to avoid field duplication.
|
||||
type Entry struct {
|
||||
Logger *Logger
|
||||
|
||||
|
@ -34,22 +57,31 @@ type Entry struct {
|
|||
// Time at which the log entry was created
|
||||
Time time.Time
|
||||
|
||||
// Level the log entry was logged at: Debug, Info, Warn, Error, Fatal or Panic
|
||||
// Level the log entry was logged at: Trace, Debug, Info, Warn, Error, Fatal or Panic
|
||||
// This field will be set on entry firing and the value will be equal to the one in Logger struct field.
|
||||
Level Level
|
||||
|
||||
// Message passed to Debug, Info, Warn, Error, Fatal or Panic
|
||||
// Calling method, with package name
|
||||
Caller *runtime.Frame
|
||||
|
||||
// Message passed to Trace, Debug, Info, Warn, Error, Fatal or Panic
|
||||
Message string
|
||||
|
||||
// When formatter is called in entry.log(), a Buffer may be set to entry
|
||||
Buffer *bytes.Buffer
|
||||
|
||||
// Contains the context set by the user. Useful for hook processing etc.
|
||||
Context context.Context
|
||||
|
||||
// err may contain a field formatting error
|
||||
err string
|
||||
}
|
||||
|
||||
func NewEntry(logger *Logger) *Entry {
|
||||
return &Entry{
|
||||
Logger: logger,
|
||||
// Default is five fields, give a little extra room
|
||||
Data: make(Fields, 5),
|
||||
// Default is three fields, plus one optional. Give a little extra room.
|
||||
Data: make(Fields, 6),
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -69,6 +101,12 @@ func (entry *Entry) WithError(err error) *Entry {
|
|||
return entry.WithField(ErrorKey, err)
|
||||
}
|
||||
|
||||
// Add a context to the Entry.
|
||||
func (entry *Entry) WithContext(ctx context.Context) *Entry {
|
||||
entry.Context = ctx
|
||||
return entry
|
||||
}
|
||||
|
||||
// Add a single field to the Entry.
|
||||
func (entry *Entry) WithField(key string, value interface{}) *Entry {
|
||||
return entry.WithFields(Fields{key: value})
|
||||
|
@ -80,15 +118,88 @@ func (entry *Entry) WithFields(fields Fields) *Entry {
|
|||
for k, v := range entry.Data {
|
||||
data[k] = v
|
||||
}
|
||||
fieldErr := entry.err
|
||||
for k, v := range fields {
|
||||
isErrField := false
|
||||
if t := reflect.TypeOf(v); t != nil {
|
||||
switch t.Kind() {
|
||||
case reflect.Func:
|
||||
isErrField = true
|
||||
case reflect.Ptr:
|
||||
isErrField = t.Elem().Kind() == reflect.Func
|
||||
}
|
||||
}
|
||||
if isErrField {
|
||||
tmp := fmt.Sprintf("can not add field %q", k)
|
||||
if fieldErr != "" {
|
||||
fieldErr = entry.err + ", " + tmp
|
||||
} else {
|
||||
fieldErr = tmp
|
||||
}
|
||||
} else {
|
||||
data[k] = v
|
||||
}
|
||||
return &Entry{Logger: entry.Logger, Data: data, Time: entry.Time}
|
||||
}
|
||||
return &Entry{Logger: entry.Logger, Data: data, Time: entry.Time, err: fieldErr, Context: entry.Context}
|
||||
}
|
||||
|
||||
// Overrides the time of the Entry.
|
||||
func (entry *Entry) WithTime(t time.Time) *Entry {
|
||||
return &Entry{Logger: entry.Logger, Data: entry.Data, Time: t}
|
||||
return &Entry{Logger: entry.Logger, Data: entry.Data, Time: t, err: entry.err, Context: entry.Context}
|
||||
}
|
||||
|
||||
// getPackageName reduces a fully qualified function name to the package name
|
||||
// There really ought to be to be a better way...
|
||||
func getPackageName(f string) string {
|
||||
for {
|
||||
lastPeriod := strings.LastIndex(f, ".")
|
||||
lastSlash := strings.LastIndex(f, "/")
|
||||
if lastPeriod > lastSlash {
|
||||
f = f[:lastPeriod]
|
||||
} else {
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
return f
|
||||
}
|
||||
|
||||
// getCaller retrieves the name of the first non-logrus calling function
|
||||
func getCaller() *runtime.Frame {
|
||||
|
||||
// cache this package's fully-qualified name
|
||||
callerInitOnce.Do(func() {
|
||||
pcs := make([]uintptr, 2)
|
||||
_ = runtime.Callers(0, pcs)
|
||||
logrusPackage = getPackageName(runtime.FuncForPC(pcs[1]).Name())
|
||||
|
||||
// now that we have the cache, we can skip a minimum count of known-logrus functions
|
||||
// XXX this is dubious, the number of frames may vary
|
||||
minimumCallerDepth = knownLogrusFrames
|
||||
})
|
||||
|
||||
// Restrict the lookback frames to avoid runaway lookups
|
||||
pcs := make([]uintptr, maximumCallerDepth)
|
||||
depth := runtime.Callers(minimumCallerDepth, pcs)
|
||||
frames := runtime.CallersFrames(pcs[:depth])
|
||||
|
||||
for f, again := frames.Next(); again; f, again = frames.Next() {
|
||||
pkg := getPackageName(f.Function)
|
||||
|
||||
// If the caller isn't part of this package, we're done
|
||||
if pkg != logrusPackage {
|
||||
return &f
|
||||
}
|
||||
}
|
||||
|
||||
// if we got here, we failed to find the caller's context
|
||||
return nil
|
||||
}
|
||||
|
||||
func (entry Entry) HasCaller() (has bool) {
|
||||
return entry.Logger != nil &&
|
||||
entry.Logger.ReportCaller &&
|
||||
entry.Caller != nil
|
||||
}
|
||||
|
||||
// This function is not declared with a pointer value because otherwise
|
||||
|
@ -107,6 +218,9 @@ func (entry Entry) log(level Level, msg string) {
|
|||
|
||||
entry.Level = level
|
||||
entry.Message = msg
|
||||
if entry.Logger.ReportCaller {
|
||||
entry.Caller = getCaller()
|
||||
}
|
||||
|
||||
entry.fireHooks()
|
||||
|
||||
|
@ -150,26 +264,30 @@ func (entry *Entry) write() {
|
|||
}
|
||||
}
|
||||
|
||||
func (entry *Entry) Debug(args ...interface{}) {
|
||||
if entry.Logger.IsLevelEnabled(DebugLevel) {
|
||||
entry.log(DebugLevel, fmt.Sprint(args...))
|
||||
func (entry *Entry) Log(level Level, args ...interface{}) {
|
||||
if entry.Logger.IsLevelEnabled(level) {
|
||||
entry.log(level, fmt.Sprint(args...))
|
||||
}
|
||||
}
|
||||
|
||||
func (entry *Entry) Trace(args ...interface{}) {
|
||||
entry.Log(TraceLevel, args...)
|
||||
}
|
||||
|
||||
func (entry *Entry) Debug(args ...interface{}) {
|
||||
entry.Log(DebugLevel, args...)
|
||||
}
|
||||
|
||||
func (entry *Entry) Print(args ...interface{}) {
|
||||
entry.Info(args...)
|
||||
}
|
||||
|
||||
func (entry *Entry) Info(args ...interface{}) {
|
||||
if entry.Logger.IsLevelEnabled(InfoLevel) {
|
||||
entry.log(InfoLevel, fmt.Sprint(args...))
|
||||
}
|
||||
entry.Log(InfoLevel, args...)
|
||||
}
|
||||
|
||||
func (entry *Entry) Warn(args ...interface{}) {
|
||||
if entry.Logger.IsLevelEnabled(WarnLevel) {
|
||||
entry.log(WarnLevel, fmt.Sprint(args...))
|
||||
}
|
||||
entry.Log(WarnLevel, args...)
|
||||
}
|
||||
|
||||
func (entry *Entry) Warning(args ...interface{}) {
|
||||
|
@ -177,37 +295,37 @@ func (entry *Entry) Warning(args ...interface{}) {
|
|||
}
|
||||
|
||||
func (entry *Entry) Error(args ...interface{}) {
|
||||
if entry.Logger.IsLevelEnabled(ErrorLevel) {
|
||||
entry.log(ErrorLevel, fmt.Sprint(args...))
|
||||
}
|
||||
entry.Log(ErrorLevel, args...)
|
||||
}
|
||||
|
||||
func (entry *Entry) Fatal(args ...interface{}) {
|
||||
if entry.Logger.IsLevelEnabled(FatalLevel) {
|
||||
entry.log(FatalLevel, fmt.Sprint(args...))
|
||||
}
|
||||
Exit(1)
|
||||
entry.Log(FatalLevel, args...)
|
||||
entry.Logger.Exit(1)
|
||||
}
|
||||
|
||||
func (entry *Entry) Panic(args ...interface{}) {
|
||||
if entry.Logger.IsLevelEnabled(PanicLevel) {
|
||||
entry.log(PanicLevel, fmt.Sprint(args...))
|
||||
}
|
||||
entry.Log(PanicLevel, args...)
|
||||
panic(fmt.Sprint(args...))
|
||||
}
|
||||
|
||||
// Entry Printf family functions
|
||||
|
||||
func (entry *Entry) Debugf(format string, args ...interface{}) {
|
||||
if entry.Logger.IsLevelEnabled(DebugLevel) {
|
||||
entry.Debug(fmt.Sprintf(format, args...))
|
||||
func (entry *Entry) Logf(level Level, format string, args ...interface{}) {
|
||||
if entry.Logger.IsLevelEnabled(level) {
|
||||
entry.Log(level, fmt.Sprintf(format, args...))
|
||||
}
|
||||
}
|
||||
|
||||
func (entry *Entry) Infof(format string, args ...interface{}) {
|
||||
if entry.Logger.IsLevelEnabled(InfoLevel) {
|
||||
entry.Info(fmt.Sprintf(format, args...))
|
||||
func (entry *Entry) Tracef(format string, args ...interface{}) {
|
||||
entry.Logf(TraceLevel, format, args...)
|
||||
}
|
||||
|
||||
func (entry *Entry) Debugf(format string, args ...interface{}) {
|
||||
entry.Logf(DebugLevel, format, args...)
|
||||
}
|
||||
|
||||
func (entry *Entry) Infof(format string, args ...interface{}) {
|
||||
entry.Logf(InfoLevel, format, args...)
|
||||
}
|
||||
|
||||
func (entry *Entry) Printf(format string, args ...interface{}) {
|
||||
|
@ -215,9 +333,7 @@ func (entry *Entry) Printf(format string, args ...interface{}) {
|
|||
}
|
||||
|
||||
func (entry *Entry) Warnf(format string, args ...interface{}) {
|
||||
if entry.Logger.IsLevelEnabled(WarnLevel) {
|
||||
entry.Warn(fmt.Sprintf(format, args...))
|
||||
}
|
||||
entry.Logf(WarnLevel, format, args...)
|
||||
}
|
||||
|
||||
func (entry *Entry) Warningf(format string, args ...interface{}) {
|
||||
|
@ -225,36 +341,36 @@ func (entry *Entry) Warningf(format string, args ...interface{}) {
|
|||
}
|
||||
|
||||
func (entry *Entry) Errorf(format string, args ...interface{}) {
|
||||
if entry.Logger.IsLevelEnabled(ErrorLevel) {
|
||||
entry.Error(fmt.Sprintf(format, args...))
|
||||
}
|
||||
entry.Logf(ErrorLevel, format, args...)
|
||||
}
|
||||
|
||||
func (entry *Entry) Fatalf(format string, args ...interface{}) {
|
||||
if entry.Logger.IsLevelEnabled(FatalLevel) {
|
||||
entry.Fatal(fmt.Sprintf(format, args...))
|
||||
}
|
||||
Exit(1)
|
||||
entry.Logf(FatalLevel, format, args...)
|
||||
entry.Logger.Exit(1)
|
||||
}
|
||||
|
||||
func (entry *Entry) Panicf(format string, args ...interface{}) {
|
||||
if entry.Logger.IsLevelEnabled(PanicLevel) {
|
||||
entry.Panic(fmt.Sprintf(format, args...))
|
||||
}
|
||||
entry.Logf(PanicLevel, format, args...)
|
||||
}
|
||||
|
||||
// Entry Println family functions
|
||||
|
||||
func (entry *Entry) Debugln(args ...interface{}) {
|
||||
if entry.Logger.IsLevelEnabled(DebugLevel) {
|
||||
entry.Debug(entry.sprintlnn(args...))
|
||||
func (entry *Entry) Logln(level Level, args ...interface{}) {
|
||||
if entry.Logger.IsLevelEnabled(level) {
|
||||
entry.Log(level, entry.sprintlnn(args...))
|
||||
}
|
||||
}
|
||||
|
||||
func (entry *Entry) Infoln(args ...interface{}) {
|
||||
if entry.Logger.IsLevelEnabled(InfoLevel) {
|
||||
entry.Info(entry.sprintlnn(args...))
|
||||
func (entry *Entry) Traceln(args ...interface{}) {
|
||||
entry.Logln(TraceLevel, args...)
|
||||
}
|
||||
|
||||
func (entry *Entry) Debugln(args ...interface{}) {
|
||||
entry.Logln(DebugLevel, args...)
|
||||
}
|
||||
|
||||
func (entry *Entry) Infoln(args ...interface{}) {
|
||||
entry.Logln(InfoLevel, args...)
|
||||
}
|
||||
|
||||
func (entry *Entry) Println(args ...interface{}) {
|
||||
|
@ -262,9 +378,7 @@ func (entry *Entry) Println(args ...interface{}) {
|
|||
}
|
||||
|
||||
func (entry *Entry) Warnln(args ...interface{}) {
|
||||
if entry.Logger.IsLevelEnabled(WarnLevel) {
|
||||
entry.Warn(entry.sprintlnn(args...))
|
||||
}
|
||||
entry.Logln(WarnLevel, args...)
|
||||
}
|
||||
|
||||
func (entry *Entry) Warningln(args ...interface{}) {
|
||||
|
@ -272,22 +386,16 @@ func (entry *Entry) Warningln(args ...interface{}) {
|
|||
}
|
||||
|
||||
func (entry *Entry) Errorln(args ...interface{}) {
|
||||
if entry.Logger.IsLevelEnabled(ErrorLevel) {
|
||||
entry.Error(entry.sprintlnn(args...))
|
||||
}
|
||||
entry.Logln(ErrorLevel, args...)
|
||||
}
|
||||
|
||||
func (entry *Entry) Fatalln(args ...interface{}) {
|
||||
if entry.Logger.IsLevelEnabled(FatalLevel) {
|
||||
entry.Fatal(entry.sprintlnn(args...))
|
||||
}
|
||||
Exit(1)
|
||||
entry.Logln(FatalLevel, args...)
|
||||
entry.Logger.Exit(1)
|
||||
}
|
||||
|
||||
func (entry *Entry) Panicln(args ...interface{}) {
|
||||
if entry.Logger.IsLevelEnabled(PanicLevel) {
|
||||
entry.Panic(entry.sprintlnn(args...))
|
||||
}
|
||||
entry.Logln(PanicLevel, args...)
|
||||
}
|
||||
|
||||
// Sprintlnn => Sprint no newline. This is to get the behavior of how
|
||||
|
|
27
vendor/github.com/sirupsen/logrus/exported.go
generated
vendored
27
vendor/github.com/sirupsen/logrus/exported.go
generated
vendored
|
@ -1,6 +1,7 @@
|
|||
package logrus
|
||||
|
||||
import (
|
||||
"context"
|
||||
"io"
|
||||
"time"
|
||||
)
|
||||
|
@ -24,6 +25,12 @@ func SetFormatter(formatter Formatter) {
|
|||
std.SetFormatter(formatter)
|
||||
}
|
||||
|
||||
// SetReportCaller sets whether the standard logger will include the calling
|
||||
// method as a field.
|
||||
func SetReportCaller(include bool) {
|
||||
std.SetReportCaller(include)
|
||||
}
|
||||
|
||||
// SetLevel sets the standard logger level.
|
||||
func SetLevel(level Level) {
|
||||
std.SetLevel(level)
|
||||
|
@ -49,6 +56,11 @@ func WithError(err error) *Entry {
|
|||
return std.WithField(ErrorKey, err)
|
||||
}
|
||||
|
||||
// WithContext creates an entry from the standard logger and adds a context to it.
|
||||
func WithContext(ctx context.Context) *Entry {
|
||||
return std.WithContext(ctx)
|
||||
}
|
||||
|
||||
// WithField creates an entry from the standard logger and adds a field to
|
||||
// it. If you want multiple fields, use `WithFields`.
|
||||
//
|
||||
|
@ -77,6 +89,11 @@ func WithTime(t time.Time) *Entry {
|
|||
return std.WithTime(t)
|
||||
}
|
||||
|
||||
// Trace logs a message at level Trace on the standard logger.
|
||||
func Trace(args ...interface{}) {
|
||||
std.Trace(args...)
|
||||
}
|
||||
|
||||
// Debug logs a message at level Debug on the standard logger.
|
||||
func Debug(args ...interface{}) {
|
||||
std.Debug(args...)
|
||||
|
@ -117,6 +134,11 @@ func Fatal(args ...interface{}) {
|
|||
std.Fatal(args...)
|
||||
}
|
||||
|
||||
// Tracef logs a message at level Trace on the standard logger.
|
||||
func Tracef(format string, args ...interface{}) {
|
||||
std.Tracef(format, args...)
|
||||
}
|
||||
|
||||
// Debugf logs a message at level Debug on the standard logger.
|
||||
func Debugf(format string, args ...interface{}) {
|
||||
std.Debugf(format, args...)
|
||||
|
@ -157,6 +179,11 @@ func Fatalf(format string, args ...interface{}) {
|
|||
std.Fatalf(format, args...)
|
||||
}
|
||||
|
||||
// Traceln logs a message at level Trace on the standard logger.
|
||||
func Traceln(args ...interface{}) {
|
||||
std.Traceln(args...)
|
||||
}
|
||||
|
||||
// Debugln logs a message at level Debug on the standard logger.
|
||||
func Debugln(args ...interface{}) {
|
||||
std.Debugln(args...)
|
||||
|
|
33
vendor/github.com/sirupsen/logrus/formatter.go
generated
vendored
33
vendor/github.com/sirupsen/logrus/formatter.go
generated
vendored
|
@ -2,7 +2,16 @@ package logrus
|
|||
|
||||
import "time"
|
||||
|
||||
const defaultTimestampFormat = time.RFC3339
|
||||
// Default key names for the default fields
|
||||
const (
|
||||
defaultTimestampFormat = time.RFC3339
|
||||
FieldKeyMsg = "msg"
|
||||
FieldKeyLevel = "level"
|
||||
FieldKeyTime = "time"
|
||||
FieldKeyLogrusError = "logrus_error"
|
||||
FieldKeyFunc = "func"
|
||||
FieldKeyFile = "file"
|
||||
)
|
||||
|
||||
// The Formatter interface is used to implement a custom Formatter. It takes an
|
||||
// `Entry`. It exposes all the fields, including the default ones:
|
||||
|
@ -18,7 +27,7 @@ type Formatter interface {
|
|||
Format(*Entry) ([]byte, error)
|
||||
}
|
||||
|
||||
// This is to not silently overwrite `time`, `msg` and `level` fields when
|
||||
// This is to not silently overwrite `time`, `msg`, `func` and `level` fields when
|
||||
// dumping it. If this code wasn't there doing:
|
||||
//
|
||||
// logrus.WithField("level", 1).Info("hello")
|
||||
|
@ -30,7 +39,7 @@ type Formatter interface {
|
|||
//
|
||||
// It's not exported because it's still using Data in an opinionated way. It's to
|
||||
// avoid code duplication between the two default formatters.
|
||||
func prefixFieldClashes(data Fields, fieldMap FieldMap) {
|
||||
func prefixFieldClashes(data Fields, fieldMap FieldMap, reportCaller bool) {
|
||||
timeKey := fieldMap.resolve(FieldKeyTime)
|
||||
if t, ok := data[timeKey]; ok {
|
||||
data["fields."+timeKey] = t
|
||||
|
@ -48,4 +57,22 @@ func prefixFieldClashes(data Fields, fieldMap FieldMap) {
|
|||
data["fields."+levelKey] = l
|
||||
delete(data, levelKey)
|
||||
}
|
||||
|
||||
logrusErrKey := fieldMap.resolve(FieldKeyLogrusError)
|
||||
if l, ok := data[logrusErrKey]; ok {
|
||||
data["fields."+logrusErrKey] = l
|
||||
delete(data, logrusErrKey)
|
||||
}
|
||||
|
||||
// If reportCaller is not set, 'func' will not conflict.
|
||||
if reportCaller {
|
||||
funcKey := fieldMap.resolve(FieldKeyFunc)
|
||||
if l, ok := data[funcKey]; ok {
|
||||
data["fields."+funcKey] = l
|
||||
}
|
||||
fileKey := fieldMap.resolve(FieldKeyFile)
|
||||
if l, ok := data[fileKey]; ok {
|
||||
data["fields."+fileKey] = l
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
37
vendor/github.com/sirupsen/logrus/json_formatter.go
generated
vendored
37
vendor/github.com/sirupsen/logrus/json_formatter.go
generated
vendored
|
@ -4,6 +4,7 @@ import (
|
|||
"bytes"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"runtime"
|
||||
)
|
||||
|
||||
type fieldKey string
|
||||
|
@ -11,13 +12,6 @@ type fieldKey string
|
|||
// FieldMap allows customization of the key names for default fields.
|
||||
type FieldMap map[fieldKey]string
|
||||
|
||||
// Default key names for the default fields
|
||||
const (
|
||||
FieldKeyMsg = "msg"
|
||||
FieldKeyLevel = "level"
|
||||
FieldKeyTime = "time"
|
||||
)
|
||||
|
||||
func (f FieldMap) resolve(key fieldKey) string {
|
||||
if k, ok := f[key]; ok {
|
||||
return k
|
||||
|
@ -44,17 +38,24 @@ type JSONFormatter struct {
|
|||
// FieldKeyTime: "@timestamp",
|
||||
// FieldKeyLevel: "@level",
|
||||
// FieldKeyMsg: "@message",
|
||||
// FieldKeyFunc: "@caller",
|
||||
// },
|
||||
// }
|
||||
FieldMap FieldMap
|
||||
|
||||
// CallerPrettyfier can be set by the user to modify the content
|
||||
// of the function and file keys in the json data when ReportCaller is
|
||||
// activated. If any of the returned value is the empty string the
|
||||
// corresponding key will be removed from json fields.
|
||||
CallerPrettyfier func(*runtime.Frame) (function string, file string)
|
||||
|
||||
// PrettyPrint will indent all json logs
|
||||
PrettyPrint bool
|
||||
}
|
||||
|
||||
// Format renders a single log entry
|
||||
func (f *JSONFormatter) Format(entry *Entry) ([]byte, error) {
|
||||
data := make(Fields, len(entry.Data)+3)
|
||||
data := make(Fields, len(entry.Data)+4)
|
||||
for k, v := range entry.Data {
|
||||
switch v := v.(type) {
|
||||
case error:
|
||||
|
@ -72,18 +73,34 @@ func (f *JSONFormatter) Format(entry *Entry) ([]byte, error) {
|
|||
data = newData
|
||||
}
|
||||
|
||||
prefixFieldClashes(data, f.FieldMap)
|
||||
prefixFieldClashes(data, f.FieldMap, entry.HasCaller())
|
||||
|
||||
timestampFormat := f.TimestampFormat
|
||||
if timestampFormat == "" {
|
||||
timestampFormat = defaultTimestampFormat
|
||||
}
|
||||
|
||||
if entry.err != "" {
|
||||
data[f.FieldMap.resolve(FieldKeyLogrusError)] = entry.err
|
||||
}
|
||||
if !f.DisableTimestamp {
|
||||
data[f.FieldMap.resolve(FieldKeyTime)] = entry.Time.Format(timestampFormat)
|
||||
}
|
||||
data[f.FieldMap.resolve(FieldKeyMsg)] = entry.Message
|
||||
data[f.FieldMap.resolve(FieldKeyLevel)] = entry.Level.String()
|
||||
if entry.HasCaller() {
|
||||
funcVal := entry.Caller.Function
|
||||
fileVal := fmt.Sprintf("%s:%d", entry.Caller.File, entry.Caller.Line)
|
||||
if f.CallerPrettyfier != nil {
|
||||
funcVal, fileVal = f.CallerPrettyfier(entry.Caller)
|
||||
}
|
||||
if funcVal != "" {
|
||||
data[f.FieldMap.resolve(FieldKeyFunc)] = funcVal
|
||||
}
|
||||
if fileVal != "" {
|
||||
data[f.FieldMap.resolve(FieldKeyFile)] = fileVal
|
||||
}
|
||||
}
|
||||
|
||||
var b *bytes.Buffer
|
||||
if entry.Buffer != nil {
|
||||
|
@ -97,7 +114,7 @@ func (f *JSONFormatter) Format(entry *Entry) ([]byte, error) {
|
|||
encoder.SetIndent("", " ")
|
||||
}
|
||||
if err := encoder.Encode(data); err != nil {
|
||||
return nil, fmt.Errorf("Failed to marshal fields to JSON, %v", err)
|
||||
return nil, fmt.Errorf("failed to marshal fields to JSON, %v", err)
|
||||
}
|
||||
|
||||
return b.Bytes(), nil
|
||||
|
|
184
vendor/github.com/sirupsen/logrus/logger.go
generated
vendored
184
vendor/github.com/sirupsen/logrus/logger.go
generated
vendored
|
@ -1,6 +1,7 @@
|
|||
package logrus
|
||||
|
||||
import (
|
||||
"context"
|
||||
"io"
|
||||
"os"
|
||||
"sync"
|
||||
|
@ -24,6 +25,10 @@ type Logger struct {
|
|||
// own that implements the `Formatter` interface, see the `README` or included
|
||||
// formatters for examples.
|
||||
Formatter Formatter
|
||||
|
||||
// Flag for whether to log caller info (off by default)
|
||||
ReportCaller bool
|
||||
|
||||
// The logging level the logger should log at. This is typically (and defaults
|
||||
// to) `logrus.Info`, which allows Info(), Warn(), Error() and Fatal() to be
|
||||
// logged.
|
||||
|
@ -32,8 +37,12 @@ type Logger struct {
|
|||
mu MutexWrap
|
||||
// Reusable empty entry
|
||||
entryPool sync.Pool
|
||||
// Function to exit the application, defaults to `os.Exit()`
|
||||
ExitFunc exitFunc
|
||||
}
|
||||
|
||||
type exitFunc func(int)
|
||||
|
||||
type MutexWrap struct {
|
||||
lock sync.Mutex
|
||||
disabled bool
|
||||
|
@ -73,6 +82,8 @@ func New() *Logger {
|
|||
Formatter: new(TextFormatter),
|
||||
Hooks: make(LevelHooks),
|
||||
Level: InfoLevel,
|
||||
ExitFunc: os.Exit,
|
||||
ReportCaller: false,
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -114,6 +125,13 @@ func (logger *Logger) WithError(err error) *Entry {
|
|||
return entry.WithError(err)
|
||||
}
|
||||
|
||||
// Add a context to the log entry.
|
||||
func (logger *Logger) WithContext(ctx context.Context) *Entry {
|
||||
entry := logger.newEntry()
|
||||
defer logger.releaseEntry(entry)
|
||||
return entry.WithContext(ctx)
|
||||
}
|
||||
|
||||
// Overrides the time of the log entry.
|
||||
func (logger *Logger) WithTime(t time.Time) *Entry {
|
||||
entry := logger.newEntry()
|
||||
|
@ -121,20 +139,24 @@ func (logger *Logger) WithTime(t time.Time) *Entry {
|
|||
return entry.WithTime(t)
|
||||
}
|
||||
|
||||
func (logger *Logger) Debugf(format string, args ...interface{}) {
|
||||
if logger.IsLevelEnabled(DebugLevel) {
|
||||
func (logger *Logger) Logf(level Level, format string, args ...interface{}) {
|
||||
if logger.IsLevelEnabled(level) {
|
||||
entry := logger.newEntry()
|
||||
entry.Debugf(format, args...)
|
||||
entry.Logf(level, format, args...)
|
||||
logger.releaseEntry(entry)
|
||||
}
|
||||
}
|
||||
|
||||
func (logger *Logger) Infof(format string, args ...interface{}) {
|
||||
if logger.IsLevelEnabled(InfoLevel) {
|
||||
entry := logger.newEntry()
|
||||
entry.Infof(format, args...)
|
||||
logger.releaseEntry(entry)
|
||||
func (logger *Logger) Tracef(format string, args ...interface{}) {
|
||||
logger.Logf(TraceLevel, format, args...)
|
||||
}
|
||||
|
||||
func (logger *Logger) Debugf(format string, args ...interface{}) {
|
||||
logger.Logf(DebugLevel, format, args...)
|
||||
}
|
||||
|
||||
func (logger *Logger) Infof(format string, args ...interface{}) {
|
||||
logger.Logf(InfoLevel, format, args...)
|
||||
}
|
||||
|
||||
func (logger *Logger) Printf(format string, args ...interface{}) {
|
||||
|
@ -144,123 +166,91 @@ func (logger *Logger) Printf(format string, args ...interface{}) {
|
|||
}
|
||||
|
||||
func (logger *Logger) Warnf(format string, args ...interface{}) {
|
||||
if logger.IsLevelEnabled(WarnLevel) {
|
||||
entry := logger.newEntry()
|
||||
entry.Warnf(format, args...)
|
||||
logger.releaseEntry(entry)
|
||||
}
|
||||
logger.Logf(WarnLevel, format, args...)
|
||||
}
|
||||
|
||||
func (logger *Logger) Warningf(format string, args ...interface{}) {
|
||||
if logger.IsLevelEnabled(WarnLevel) {
|
||||
entry := logger.newEntry()
|
||||
entry.Warnf(format, args...)
|
||||
logger.releaseEntry(entry)
|
||||
}
|
||||
logger.Warnf(format, args...)
|
||||
}
|
||||
|
||||
func (logger *Logger) Errorf(format string, args ...interface{}) {
|
||||
if logger.IsLevelEnabled(ErrorLevel) {
|
||||
entry := logger.newEntry()
|
||||
entry.Errorf(format, args...)
|
||||
logger.releaseEntry(entry)
|
||||
}
|
||||
logger.Logf(ErrorLevel, format, args...)
|
||||
}
|
||||
|
||||
func (logger *Logger) Fatalf(format string, args ...interface{}) {
|
||||
if logger.IsLevelEnabled(FatalLevel) {
|
||||
entry := logger.newEntry()
|
||||
entry.Fatalf(format, args...)
|
||||
logger.releaseEntry(entry)
|
||||
}
|
||||
Exit(1)
|
||||
logger.Logf(FatalLevel, format, args...)
|
||||
logger.Exit(1)
|
||||
}
|
||||
|
||||
func (logger *Logger) Panicf(format string, args ...interface{}) {
|
||||
if logger.IsLevelEnabled(PanicLevel) {
|
||||
logger.Logf(PanicLevel, format, args...)
|
||||
}
|
||||
|
||||
func (logger *Logger) Log(level Level, args ...interface{}) {
|
||||
if logger.IsLevelEnabled(level) {
|
||||
entry := logger.newEntry()
|
||||
entry.Panicf(format, args...)
|
||||
entry.Log(level, args...)
|
||||
logger.releaseEntry(entry)
|
||||
}
|
||||
}
|
||||
|
||||
func (logger *Logger) Trace(args ...interface{}) {
|
||||
logger.Log(TraceLevel, args...)
|
||||
}
|
||||
|
||||
func (logger *Logger) Debug(args ...interface{}) {
|
||||
if logger.IsLevelEnabled(DebugLevel) {
|
||||
entry := logger.newEntry()
|
||||
entry.Debug(args...)
|
||||
logger.releaseEntry(entry)
|
||||
}
|
||||
logger.Log(DebugLevel, args...)
|
||||
}
|
||||
|
||||
func (logger *Logger) Info(args ...interface{}) {
|
||||
if logger.IsLevelEnabled(InfoLevel) {
|
||||
entry := logger.newEntry()
|
||||
entry.Info(args...)
|
||||
logger.releaseEntry(entry)
|
||||
}
|
||||
logger.Log(InfoLevel, args...)
|
||||
}
|
||||
|
||||
func (logger *Logger) Print(args ...interface{}) {
|
||||
entry := logger.newEntry()
|
||||
entry.Info(args...)
|
||||
entry.Print(args...)
|
||||
logger.releaseEntry(entry)
|
||||
}
|
||||
|
||||
func (logger *Logger) Warn(args ...interface{}) {
|
||||
if logger.IsLevelEnabled(WarnLevel) {
|
||||
entry := logger.newEntry()
|
||||
entry.Warn(args...)
|
||||
logger.releaseEntry(entry)
|
||||
}
|
||||
logger.Log(WarnLevel, args...)
|
||||
}
|
||||
|
||||
func (logger *Logger) Warning(args ...interface{}) {
|
||||
if logger.IsLevelEnabled(WarnLevel) {
|
||||
entry := logger.newEntry()
|
||||
entry.Warn(args...)
|
||||
logger.releaseEntry(entry)
|
||||
}
|
||||
logger.Warn(args...)
|
||||
}
|
||||
|
||||
func (logger *Logger) Error(args ...interface{}) {
|
||||
if logger.IsLevelEnabled(ErrorLevel) {
|
||||
entry := logger.newEntry()
|
||||
entry.Error(args...)
|
||||
logger.releaseEntry(entry)
|
||||
}
|
||||
logger.Log(ErrorLevel, args...)
|
||||
}
|
||||
|
||||
func (logger *Logger) Fatal(args ...interface{}) {
|
||||
if logger.IsLevelEnabled(FatalLevel) {
|
||||
entry := logger.newEntry()
|
||||
entry.Fatal(args...)
|
||||
logger.releaseEntry(entry)
|
||||
}
|
||||
Exit(1)
|
||||
logger.Log(FatalLevel, args...)
|
||||
logger.Exit(1)
|
||||
}
|
||||
|
||||
func (logger *Logger) Panic(args ...interface{}) {
|
||||
if logger.IsLevelEnabled(PanicLevel) {
|
||||
logger.Log(PanicLevel, args...)
|
||||
}
|
||||
|
||||
func (logger *Logger) Logln(level Level, args ...interface{}) {
|
||||
if logger.IsLevelEnabled(level) {
|
||||
entry := logger.newEntry()
|
||||
entry.Panic(args...)
|
||||
entry.Logln(level, args...)
|
||||
logger.releaseEntry(entry)
|
||||
}
|
||||
}
|
||||
|
||||
func (logger *Logger) Traceln(args ...interface{}) {
|
||||
logger.Logln(TraceLevel, args...)
|
||||
}
|
||||
|
||||
func (logger *Logger) Debugln(args ...interface{}) {
|
||||
if logger.IsLevelEnabled(DebugLevel) {
|
||||
entry := logger.newEntry()
|
||||
entry.Debugln(args...)
|
||||
logger.releaseEntry(entry)
|
||||
}
|
||||
logger.Logln(DebugLevel, args...)
|
||||
}
|
||||
|
||||
func (logger *Logger) Infoln(args ...interface{}) {
|
||||
if logger.IsLevelEnabled(InfoLevel) {
|
||||
entry := logger.newEntry()
|
||||
entry.Infoln(args...)
|
||||
logger.releaseEntry(entry)
|
||||
}
|
||||
logger.Logln(InfoLevel, args...)
|
||||
}
|
||||
|
||||
func (logger *Logger) Println(args ...interface{}) {
|
||||
|
@ -270,44 +260,32 @@ func (logger *Logger) Println(args ...interface{}) {
|
|||
}
|
||||
|
||||
func (logger *Logger) Warnln(args ...interface{}) {
|
||||
if logger.IsLevelEnabled(WarnLevel) {
|
||||
entry := logger.newEntry()
|
||||
entry.Warnln(args...)
|
||||
logger.releaseEntry(entry)
|
||||
}
|
||||
logger.Logln(WarnLevel, args...)
|
||||
}
|
||||
|
||||
func (logger *Logger) Warningln(args ...interface{}) {
|
||||
if logger.IsLevelEnabled(WarnLevel) {
|
||||
entry := logger.newEntry()
|
||||
entry.Warnln(args...)
|
||||
logger.releaseEntry(entry)
|
||||
}
|
||||
logger.Warnln(args...)
|
||||
}
|
||||
|
||||
func (logger *Logger) Errorln(args ...interface{}) {
|
||||
if logger.IsLevelEnabled(ErrorLevel) {
|
||||
entry := logger.newEntry()
|
||||
entry.Errorln(args...)
|
||||
logger.releaseEntry(entry)
|
||||
}
|
||||
logger.Logln(ErrorLevel, args...)
|
||||
}
|
||||
|
||||
func (logger *Logger) Fatalln(args ...interface{}) {
|
||||
if logger.IsLevelEnabled(FatalLevel) {
|
||||
entry := logger.newEntry()
|
||||
entry.Fatalln(args...)
|
||||
logger.releaseEntry(entry)
|
||||
}
|
||||
Exit(1)
|
||||
logger.Logln(FatalLevel, args...)
|
||||
logger.Exit(1)
|
||||
}
|
||||
|
||||
func (logger *Logger) Panicln(args ...interface{}) {
|
||||
if logger.IsLevelEnabled(PanicLevel) {
|
||||
entry := logger.newEntry()
|
||||
entry.Panicln(args...)
|
||||
logger.releaseEntry(entry)
|
||||
logger.Logln(PanicLevel, args...)
|
||||
}
|
||||
|
||||
func (logger *Logger) Exit(code int) {
|
||||
runHandlers()
|
||||
if logger.ExitFunc == nil {
|
||||
logger.ExitFunc = os.Exit
|
||||
}
|
||||
logger.ExitFunc(code)
|
||||
}
|
||||
|
||||
//When file is opened with appending mode, it's safe to
|
||||
|
@ -357,6 +335,12 @@ func (logger *Logger) SetOutput(output io.Writer) {
|
|||
logger.Out = output
|
||||
}
|
||||
|
||||
func (logger *Logger) SetReportCaller(reportCaller bool) {
|
||||
logger.mu.Lock()
|
||||
defer logger.mu.Unlock()
|
||||
logger.ReportCaller = reportCaller
|
||||
}
|
||||
|
||||
// ReplaceHooks replaces the logger hooks and returns the old ones
|
||||
func (logger *Logger) ReplaceHooks(hooks LevelHooks) LevelHooks {
|
||||
logger.mu.Lock()
|
||||
|
|
68
vendor/github.com/sirupsen/logrus/logrus.go
generated
vendored
68
vendor/github.com/sirupsen/logrus/logrus.go
generated
vendored
|
@ -14,23 +14,12 @@ type Level uint32
|
|||
|
||||
// Convert the Level to a string. E.g. PanicLevel becomes "panic".
|
||||
func (level Level) String() string {
|
||||
switch level {
|
||||
case DebugLevel:
|
||||
return "debug"
|
||||
case InfoLevel:
|
||||
return "info"
|
||||
case WarnLevel:
|
||||
return "warning"
|
||||
case ErrorLevel:
|
||||
return "error"
|
||||
case FatalLevel:
|
||||
return "fatal"
|
||||
case PanicLevel:
|
||||
return "panic"
|
||||
}
|
||||
|
||||
if b, err := level.MarshalText(); err == nil {
|
||||
return string(b)
|
||||
} else {
|
||||
return "unknown"
|
||||
}
|
||||
}
|
||||
|
||||
// ParseLevel takes a string level and returns the Logrus log level constant.
|
||||
func ParseLevel(lvl string) (Level, error) {
|
||||
|
@ -47,12 +36,47 @@ func ParseLevel(lvl string) (Level, error) {
|
|||
return InfoLevel, nil
|
||||
case "debug":
|
||||
return DebugLevel, nil
|
||||
case "trace":
|
||||
return TraceLevel, nil
|
||||
}
|
||||
|
||||
var l Level
|
||||
return l, fmt.Errorf("not a valid logrus Level: %q", lvl)
|
||||
}
|
||||
|
||||
// UnmarshalText implements encoding.TextUnmarshaler.
|
||||
func (level *Level) UnmarshalText(text []byte) error {
|
||||
l, err := ParseLevel(string(text))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
*level = Level(l)
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (level Level) MarshalText() ([]byte, error) {
|
||||
switch level {
|
||||
case TraceLevel:
|
||||
return []byte("trace"), nil
|
||||
case DebugLevel:
|
||||
return []byte("debug"), nil
|
||||
case InfoLevel:
|
||||
return []byte("info"), nil
|
||||
case WarnLevel:
|
||||
return []byte("warning"), nil
|
||||
case ErrorLevel:
|
||||
return []byte("error"), nil
|
||||
case FatalLevel:
|
||||
return []byte("fatal"), nil
|
||||
case PanicLevel:
|
||||
return []byte("panic"), nil
|
||||
}
|
||||
|
||||
return nil, fmt.Errorf("not a valid logrus level %d", level)
|
||||
}
|
||||
|
||||
// A constant exposing all logging levels
|
||||
var AllLevels = []Level{
|
||||
PanicLevel,
|
||||
|
@ -61,6 +85,7 @@ var AllLevels = []Level{
|
|||
WarnLevel,
|
||||
InfoLevel,
|
||||
DebugLevel,
|
||||
TraceLevel,
|
||||
}
|
||||
|
||||
// These are the different logging levels. You can set the logging level to log
|
||||
|
@ -69,7 +94,7 @@ const (
|
|||
// PanicLevel level, highest level of severity. Logs and then calls panic with the
|
||||
// message passed to Debug, Info, ...
|
||||
PanicLevel Level = iota
|
||||
// FatalLevel level. Logs and then calls `os.Exit(1)`. It will exit even if the
|
||||
// FatalLevel level. Logs and then calls `logger.Exit(1)`. It will exit even if the
|
||||
// logging level is set to Panic.
|
||||
FatalLevel
|
||||
// ErrorLevel level. Logs. Used for errors that should definitely be noted.
|
||||
|
@ -82,6 +107,8 @@ const (
|
|||
InfoLevel
|
||||
// DebugLevel level. Usually only enabled when debugging. Very verbose logging.
|
||||
DebugLevel
|
||||
// TraceLevel level. Designates finer-grained informational events than the Debug.
|
||||
TraceLevel
|
||||
)
|
||||
|
||||
// Won't compile if StdLogger can't be realized by a log.Logger
|
||||
|
@ -148,3 +175,12 @@ type FieldLogger interface {
|
|||
// IsFatalEnabled() bool
|
||||
// IsPanicEnabled() bool
|
||||
}
|
||||
|
||||
// Ext1FieldLogger (the first extension to FieldLogger) is superfluous, it is
|
||||
// here for consistancy. Do not use. Use Logger or Entry instead.
|
||||
type Ext1FieldLogger interface {
|
||||
FieldLogger
|
||||
Tracef(format string, args ...interface{})
|
||||
Trace(args ...interface{})
|
||||
Traceln(args ...interface{})
|
||||
}
|
||||
|
|
13
vendor/github.com/sirupsen/logrus/terminal_appengine.go
generated
vendored
13
vendor/github.com/sirupsen/logrus/terminal_appengine.go
generated
vendored
|
@ -1,13 +0,0 @@
|
|||
// Based on ssh/terminal:
|
||||
// Copyright 2018 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
// +build appengine
|
||||
|
||||
package logrus
|
||||
|
||||
import "io"
|
||||
|
||||
func initTerminal(w io.Writer) {
|
||||
}
|
17
vendor/github.com/sirupsen/logrus/terminal_bsd.go
generated
vendored
17
vendor/github.com/sirupsen/logrus/terminal_bsd.go
generated
vendored
|
@ -1,17 +0,0 @@
|
|||
// +build darwin freebsd openbsd netbsd dragonfly
|
||||
// +build !appengine,!js
|
||||
|
||||
package logrus
|
||||
|
||||
import (
|
||||
"io"
|
||||
|
||||
"golang.org/x/sys/unix"
|
||||
)
|
||||
|
||||
const ioctlReadTermios = unix.TIOCGETA
|
||||
|
||||
type Termios unix.Termios
|
||||
|
||||
func initTerminal(w io.Writer) {
|
||||
}
|
9
vendor/github.com/sirupsen/logrus/terminal_check_aix.go
generated
vendored
Normal file
9
vendor/github.com/sirupsen/logrus/terminal_check_aix.go
generated
vendored
Normal file
|
@ -0,0 +1,9 @@
|
|||
// +build !appengine,!js,!windows,aix
|
||||
|
||||
package logrus
|
||||
|
||||
import "io"
|
||||
|
||||
func checkIfTerminal(w io.Writer) bool {
|
||||
return false
|
||||
}
|
2
vendor/github.com/sirupsen/logrus/terminal_check_notappengine.go
generated
vendored
2
vendor/github.com/sirupsen/logrus/terminal_check_notappengine.go
generated
vendored
|
@ -1,4 +1,4 @@
|
|||
// +build !appengine,!js,!windows
|
||||
// +build !appengine,!js,!windows,!aix
|
||||
|
||||
package logrus
|
||||
|
||||
|
|
21
vendor/github.com/sirupsen/logrus/terminal_linux.go
generated
vendored
21
vendor/github.com/sirupsen/logrus/terminal_linux.go
generated
vendored
|
@ -1,21 +0,0 @@
|
|||
// Based on ssh/terminal:
|
||||
// Copyright 2013 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
// +build !appengine,!js
|
||||
|
||||
package logrus
|
||||
|
||||
import (
|
||||
"io"
|
||||
|
||||
"golang.org/x/sys/unix"
|
||||
)
|
||||
|
||||
const ioctlReadTermios = unix.TCGETS
|
||||
|
||||
type Termios unix.Termios
|
||||
|
||||
func initTerminal(w io.Writer) {
|
||||
}
|
8
vendor/github.com/sirupsen/logrus/terminal_notwindows.go
generated
vendored
Normal file
8
vendor/github.com/sirupsen/logrus/terminal_notwindows.go
generated
vendored
Normal file
|
@ -0,0 +1,8 @@
|
|||
// +build !windows
|
||||
|
||||
package logrus
|
||||
|
||||
import "io"
|
||||
|
||||
func initTerminal(w io.Writer) {
|
||||
}
|
87
vendor/github.com/sirupsen/logrus/text_formatter.go
generated
vendored
87
vendor/github.com/sirupsen/logrus/text_formatter.go
generated
vendored
|
@ -4,6 +4,7 @@ import (
|
|||
"bytes"
|
||||
"fmt"
|
||||
"os"
|
||||
"runtime"
|
||||
"sort"
|
||||
"strings"
|
||||
"sync"
|
||||
|
@ -11,18 +12,13 @@ import (
|
|||
)
|
||||
|
||||
const (
|
||||
nocolor = 0
|
||||
red = 31
|
||||
green = 32
|
||||
yellow = 33
|
||||
blue = 36
|
||||
gray = 37
|
||||
)
|
||||
|
||||
var (
|
||||
baseTimestamp time.Time
|
||||
emptyFieldMap FieldMap
|
||||
)
|
||||
var baseTimestamp time.Time
|
||||
|
||||
func init() {
|
||||
baseTimestamp = time.Now()
|
||||
|
@ -76,6 +72,12 @@ type TextFormatter struct {
|
|||
// FieldKeyMsg: "@message"}}
|
||||
FieldMap FieldMap
|
||||
|
||||
// CallerPrettyfier can be set by the user to modify the content
|
||||
// of the function and file keys in the json data when ReportCaller is
|
||||
// activated. If any of the returned value is the empty string the
|
||||
// corresponding key will be removed from json fields.
|
||||
CallerPrettyfier func(*runtime.Frame) (function string, file string)
|
||||
|
||||
terminalInitOnce sync.Once
|
||||
}
|
||||
|
||||
|
@ -90,7 +92,7 @@ func (f *TextFormatter) init(entry *Entry) {
|
|||
}
|
||||
|
||||
func (f *TextFormatter) isColored() bool {
|
||||
isColored := f.ForceColors || f.isTerminal
|
||||
isColored := f.ForceColors || (f.isTerminal && (runtime.GOOS != "windows"))
|
||||
|
||||
if f.EnvironmentOverrideColors {
|
||||
if force, ok := os.LookupEnv("CLICOLOR_FORCE"); ok && force != "0" {
|
||||
|
@ -107,14 +109,19 @@ func (f *TextFormatter) isColored() bool {
|
|||
|
||||
// Format renders a single log entry
|
||||
func (f *TextFormatter) Format(entry *Entry) ([]byte, error) {
|
||||
prefixFieldClashes(entry.Data, f.FieldMap)
|
||||
|
||||
keys := make([]string, 0, len(entry.Data))
|
||||
for k := range entry.Data {
|
||||
data := make(Fields)
|
||||
for k, v := range entry.Data {
|
||||
data[k] = v
|
||||
}
|
||||
prefixFieldClashes(data, f.FieldMap, entry.HasCaller())
|
||||
keys := make([]string, 0, len(data))
|
||||
for k := range data {
|
||||
keys = append(keys, k)
|
||||
}
|
||||
|
||||
fixedKeys := make([]string, 0, 3+len(entry.Data))
|
||||
var funcVal, fileVal string
|
||||
|
||||
fixedKeys := make([]string, 0, 4+len(data))
|
||||
if !f.DisableTimestamp {
|
||||
fixedKeys = append(fixedKeys, f.FieldMap.resolve(FieldKeyTime))
|
||||
}
|
||||
|
@ -122,6 +129,19 @@ func (f *TextFormatter) Format(entry *Entry) ([]byte, error) {
|
|||
if entry.Message != "" {
|
||||
fixedKeys = append(fixedKeys, f.FieldMap.resolve(FieldKeyMsg))
|
||||
}
|
||||
if entry.err != "" {
|
||||
fixedKeys = append(fixedKeys, f.FieldMap.resolve(FieldKeyLogrusError))
|
||||
}
|
||||
if entry.HasCaller() {
|
||||
fixedKeys = append(fixedKeys,
|
||||
f.FieldMap.resolve(FieldKeyFunc), f.FieldMap.resolve(FieldKeyFile))
|
||||
if f.CallerPrettyfier != nil {
|
||||
funcVal, fileVal = f.CallerPrettyfier(entry.Caller)
|
||||
} else {
|
||||
funcVal = entry.Caller.Function
|
||||
fileVal = fmt.Sprintf("%s:%d", entry.Caller.File, entry.Caller.Line)
|
||||
}
|
||||
}
|
||||
|
||||
if !f.DisableSorting {
|
||||
if f.SortingFunc == nil {
|
||||
|
@ -153,19 +173,26 @@ func (f *TextFormatter) Format(entry *Entry) ([]byte, error) {
|
|||
timestampFormat = defaultTimestampFormat
|
||||
}
|
||||
if f.isColored() {
|
||||
f.printColored(b, entry, keys, timestampFormat)
|
||||
f.printColored(b, entry, keys, data, timestampFormat)
|
||||
} else {
|
||||
|
||||
for _, key := range fixedKeys {
|
||||
var value interface{}
|
||||
switch key {
|
||||
case f.FieldMap.resolve(FieldKeyTime):
|
||||
switch {
|
||||
case key == f.FieldMap.resolve(FieldKeyTime):
|
||||
value = entry.Time.Format(timestampFormat)
|
||||
case f.FieldMap.resolve(FieldKeyLevel):
|
||||
case key == f.FieldMap.resolve(FieldKeyLevel):
|
||||
value = entry.Level.String()
|
||||
case f.FieldMap.resolve(FieldKeyMsg):
|
||||
case key == f.FieldMap.resolve(FieldKeyMsg):
|
||||
value = entry.Message
|
||||
case key == f.FieldMap.resolve(FieldKeyLogrusError):
|
||||
value = entry.err
|
||||
case key == f.FieldMap.resolve(FieldKeyFunc) && entry.HasCaller():
|
||||
value = funcVal
|
||||
case key == f.FieldMap.resolve(FieldKeyFile) && entry.HasCaller():
|
||||
value = fileVal
|
||||
default:
|
||||
value = entry.Data[key]
|
||||
value = data[key]
|
||||
}
|
||||
f.appendKeyValue(b, key, value)
|
||||
}
|
||||
|
@ -175,10 +202,10 @@ func (f *TextFormatter) Format(entry *Entry) ([]byte, error) {
|
|||
return b.Bytes(), nil
|
||||
}
|
||||
|
||||
func (f *TextFormatter) printColored(b *bytes.Buffer, entry *Entry, keys []string, timestampFormat string) {
|
||||
func (f *TextFormatter) printColored(b *bytes.Buffer, entry *Entry, keys []string, data Fields, timestampFormat string) {
|
||||
var levelColor int
|
||||
switch entry.Level {
|
||||
case DebugLevel:
|
||||
case DebugLevel, TraceLevel:
|
||||
levelColor = gray
|
||||
case WarnLevel:
|
||||
levelColor = yellow
|
||||
|
@ -197,15 +224,27 @@ func (f *TextFormatter) printColored(b *bytes.Buffer, entry *Entry, keys []strin
|
|||
// the behavior of logrus text_formatter the same as the stdlib log package
|
||||
entry.Message = strings.TrimSuffix(entry.Message, "\n")
|
||||
|
||||
caller := ""
|
||||
|
||||
if entry.HasCaller() {
|
||||
funcVal := fmt.Sprintf("%s()", entry.Caller.Function)
|
||||
fileVal := fmt.Sprintf("%s:%d", entry.Caller.File, entry.Caller.Line)
|
||||
|
||||
if f.CallerPrettyfier != nil {
|
||||
funcVal, fileVal = f.CallerPrettyfier(entry.Caller)
|
||||
}
|
||||
caller = fileVal + " " + funcVal
|
||||
}
|
||||
|
||||
if f.DisableTimestamp {
|
||||
fmt.Fprintf(b, "\x1b[%dm%s\x1b[0m %-44s ", levelColor, levelText, entry.Message)
|
||||
fmt.Fprintf(b, "\x1b[%dm%s\x1b[0m%s %-44s ", levelColor, levelText, caller, entry.Message)
|
||||
} else if !f.FullTimestamp {
|
||||
fmt.Fprintf(b, "\x1b[%dm%s\x1b[0m[%04d] %-44s ", levelColor, levelText, int(entry.Time.Sub(baseTimestamp)/time.Second), entry.Message)
|
||||
fmt.Fprintf(b, "\x1b[%dm%s\x1b[0m[%04d]%s %-44s ", levelColor, levelText, int(entry.Time.Sub(baseTimestamp)/time.Second), caller, entry.Message)
|
||||
} else {
|
||||
fmt.Fprintf(b, "\x1b[%dm%s\x1b[0m[%s] %-44s ", levelColor, levelText, entry.Time.Format(timestampFormat), entry.Message)
|
||||
fmt.Fprintf(b, "\x1b[%dm%s\x1b[0m[%s]%s %-44s ", levelColor, levelText, entry.Time.Format(timestampFormat), caller, entry.Message)
|
||||
}
|
||||
for _, k := range keys {
|
||||
v := entry.Data[k]
|
||||
v := data[k]
|
||||
fmt.Fprintf(b, " \x1b[%dm%s\x1b[0m=", levelColor, k)
|
||||
f.appendValue(b, v)
|
||||
}
|
||||
|
|
2
vendor/github.com/sirupsen/logrus/writer.go
generated
vendored
2
vendor/github.com/sirupsen/logrus/writer.go
generated
vendored
|
@ -24,6 +24,8 @@ func (entry *Entry) WriterLevel(level Level) *io.PipeWriter {
|
|||
var printFunc func(args ...interface{})
|
||||
|
||||
switch level {
|
||||
case TraceLevel:
|
||||
printFunc = entry.Trace
|
||||
case DebugLevel:
|
||||
printFunc = entry.Debug
|
||||
case InfoLevel:
|
||||
|
|
Loading…
Reference in a new issue