Merge tag 'v1.6.0-rc6' into master

This commit is contained in:
Fernandez Ludovic 2018-04-18 10:13:22 +02:00
commit e6ce61fdf0
48 changed files with 1115 additions and 472 deletions

View file

@ -1,5 +1,24 @@
# Change Log
## [v1.6.0-rc6](https://github.com/containous/traefik/tree/v1.6.0-rc6) (2018-04-17)
[All Commits](https://github.com/containous/traefik/compare/v1.6.0-rc5...v1.6.0-rc6)
**Enhancements:**
- **[acme]** Create backup file during migration from ACME V1 to ACME V2 ([#3191](https://github.com/containous/traefik/pull/3191) by [nmengin](https://github.com/nmengin))
- **[servicefabric]** Use shared label system ([#3197](https://github.com/containous/traefik/pull/3197) by [ldez](https://github.com/ldez))
**Bug fixes:**
- **[docker]** Fix multiple frontends with docker-compose --scale ([#3190](https://github.com/containous/traefik/pull/3190) by [jbdoumenjou](https://github.com/jbdoumenjou))
- **[metrics]** Fix duplicated tags in InfluxDB ([#3189](https://github.com/containous/traefik/pull/3189) by [mmatur](https://github.com/mmatur))
- **[middleware,tracing]** Fix nil value when tracing is enabled ([#3192](https://github.com/containous/traefik/pull/3192) by [mmatur](https://github.com/mmatur))
- **[middleware]** Fix panic in atomic on ARM and x86-32 platforms ([#3195](https://github.com/containous/traefik/pull/3195) by [mmatur](https://github.com/mmatur))
- **[middleware]** Redirect to HTTPS first before basic auth if header redirect (secure) is set ([#3187](https://github.com/containous/traefik/pull/3187) by [SantoDE](https://github.com/SantoDE))
- **[servicefabric]** Fix backend name for stateful service and more. ([#3183](https://github.com/containous/traefik/pull/3183) by [ldez](https://github.com/ldez))
- Add missing argument in log. ([#3188](https://github.com/containous/traefik/pull/3188) by [chemidy](https://github.com/chemidy))
**Documentation:**
- **[provider]** Fix template version documentation. ([#3184](https://github.com/containous/traefik/pull/3184) by [ldez](https://github.com/ldez))
## [v1.6.0-rc5](https://github.com/containous/traefik/tree/v1.6.0-rc5) (2018-04-12)
[All Commits](https://github.com/containous/traefik/compare/v1.6.0-rc4...v1.6.0-rc5)

11
Gopkg.lock generated
View file

@ -263,8 +263,8 @@
[[projects]]
name = "github.com/containous/traefik-extra-service-fabric"
packages = ["."]
revision = "503022efdc178146d598911092af75690510a80c"
version = "v1.1.3"
revision = "2889df8d4f84315e6e527588554ed0ce9d062305"
version = "v1.1.5"
[[projects]]
name = "github.com/coreos/bbolt"
@ -569,8 +569,8 @@
"metrics/statsd",
"util/conn"
]
revision = "f66b0e13579bfc5a48b9e2a94b1209c107ea1f41"
version = "v0.3.0"
revision = "ca4112baa34cb55091301bdc13b1420a122b1b9e"
version = "v0.7.0"
[[projects]]
name = "github.com/go-logfmt/logfmt"
@ -761,7 +761,6 @@
version = "v1.3.7"
[[projects]]
branch = "master"
name = "github.com/jjcollinge/servicefabric"
packages = ["."]
revision = "8eebe170fa1ba25d3dfb928b3f86a7313b13b9fe"
@ -1675,6 +1674,6 @@
[solve-meta]
analyzer-name = "dep"
analyzer-version = 1
inputs-digest = "c441208e9bf330e85e2939b383515f58a4957286960b43c444e6f512d1ff94ee"
inputs-digest = "c7d91203842be1915ca08a31917a079489bff7ffc6f2e494330e9556b4730a06"
solver-name = "gps-cdcl"
solver-version = 1

View file

@ -66,7 +66,7 @@
[[constraint]]
name = "github.com/containous/traefik-extra-service-fabric"
version = "1.1.3"
version = "1.1.5"
[[constraint]]
name = "github.com/coreos/go-systemd"
@ -97,7 +97,7 @@
[[constraint]]
name = "github.com/go-kit/kit"
version = "0.3.0"
version = "0.7.0"
[[constraint]]
branch = "master"

View file

@ -46,38 +46,42 @@ func (s *LocalStore) Get() (*Account, error) {
if err := json.Unmarshal(file, &account); err != nil {
return nil, err
}
// Check if ACME Account is in ACME V1 format
if account != nil && account.Registration != nil {
isOldRegistration, err := regexp.MatchString(acme.RegistrationURLPathV1Regexp, account.Registration.URI)
if err != nil {
return nil, err
}
if isOldRegistration {
account.Email = ""
account.Registration = nil
account.PrivateKey = nil
}
}
}
return account, nil
}
// RemoveAccountV1Values removes ACME account V1 values
func RemoveAccountV1Values(account *Account) error {
// Check if ACME Account is in ACME V1 format
if account != nil && account.Registration != nil {
isOldRegistration, err := regexp.MatchString(acme.RegistrationURLPathV1Regexp, account.Registration.URI)
if err != nil {
return err
}
if isOldRegistration {
account.Email = ""
account.Registration = nil
account.PrivateKey = nil
}
}
return nil
}
// ConvertToNewFormat converts old acme.json format to the new one and store the result into the file (used for the backward compatibility)
func ConvertToNewFormat(fileName string) {
localStore := acme.NewLocalStore(fileName)
storeAccount, err := localStore.GetAccount()
if err != nil {
log.Warnf("Failed to read new account, ACME data conversion is not available : %v", err)
log.Errorf("Failed to read new account, ACME data conversion is not available : %v", err)
return
}
storeCertificates, err := localStore.GetCertificates()
if err != nil {
log.Warnf("Failed to read new certificates, ACME data conversion is not available : %v", err)
log.Errorf("Failed to read new certificates, ACME data conversion is not available : %v", err)
return
}
@ -86,13 +90,25 @@ func ConvertToNewFormat(fileName string) {
account, err := localStore.Get()
if err != nil {
log.Warnf("Failed to read old account, ACME data conversion is not available : %v", err)
log.Errorf("Failed to read old account, ACME data conversion is not available : %v", err)
return
}
// Convert ACME data from old to new format
newAccount := &acme.Account{}
if account != nil && len(account.Email) > 0 {
err = backupACMEFile(fileName, account)
if err != nil {
log.Errorf("Unable to create a backup for the V1 formatted ACME file: %s", err.Error())
return
}
err = RemoveAccountV1Values(account)
if err != nil {
log.Errorf("Unable to remove ACME Account V1 values: %s", err.Error())
return
}
newAccount = &acme.Account{
PrivateKey: account.PrivateKey,
Registration: account.Registration,
@ -107,8 +123,8 @@ func ConvertToNewFormat(fileName string) {
Domain: cert.Domains,
})
}
// If account is in the old format, storeCertificates is nil or empty
// and has to be initialized
// If account is in the old format, storeCertificates is nil or empty and has to be initialized
storeCertificates = newCertificates
}
@ -119,7 +135,16 @@ func ConvertToNewFormat(fileName string) {
}
}
// FromNewToOldFormat converts new acme.json format to the old one (used for the backward compatibility)
func backupACMEFile(originalFileName string, account interface{}) error {
// write account to file
data, err := json.MarshalIndent(account, "", " ")
if err != nil {
return err
}
return ioutil.WriteFile(originalFileName+".bak", data, 0600)
}
// FromNewToOldFormat converts new acme account to the old one (used for the backward compatibility)
func FromNewToOldFormat(fileName string) (*Account, error) {
localStore := acme.NewLocalStore(fileName)

View file

@ -134,10 +134,16 @@ func migrateACMEData(fileName string) (*acme.Account, error) {
if accountFromNewFormat == nil {
// convert ACME json file to KV store (used for backward compatibility)
localStore := acme.NewLocalStore(fileName)
account, err = localStore.Get()
if err != nil {
return nil, err
}
err = acme.RemoveAccountV1Values(account)
if err != nil {
return nil, err
}
} else {
account = accountFromNewFormat
}

View file

@ -11,6 +11,8 @@ import (
"github.com/containous/traefik/api"
"github.com/containous/traefik/log"
"github.com/containous/traefik/middlewares/tracing"
"github.com/containous/traefik/middlewares/tracing/jaeger"
"github.com/containous/traefik/middlewares/tracing/zipkin"
"github.com/containous/traefik/ping"
acmeprovider "github.com/containous/traefik/provider/acme"
"github.com/containous/traefik/provider/boltdb"
@ -313,6 +315,43 @@ func (gc *GlobalConfiguration) SetEffectiveConfiguration(configFile string) {
}
gc.initACMEProvider()
gc.initTracing()
}
func (gc *GlobalConfiguration) initTracing() {
if gc.Tracing != nil {
switch gc.Tracing.Backend {
case jaeger.Name:
if gc.Tracing.Jaeger == nil {
gc.Tracing.Jaeger = &jaeger.Config{
SamplingServerURL: "http://localhost:5778/sampling",
SamplingType: "const",
SamplingParam: 1.0,
LocalAgentHostPort: "127.0.0.1:6832",
}
}
if gc.Tracing.Zipkin != nil {
log.Warn("Zipkin configuration will be ignored")
gc.Tracing.Zipkin = nil
}
case zipkin.Name:
if gc.Tracing.Zipkin == nil {
gc.Tracing.Zipkin = &zipkin.Config{
HTTPEndpoint: "http://localhost:9411/api/v1/spans",
SameSpan: false,
ID128Bit: true,
Debug: false,
}
}
if gc.Tracing.Jaeger != nil {
log.Warn("Jaeger configuration will be ignored")
gc.Tracing.Jaeger = nil
}
default:
log.Warnf("Unknown tracer %q", gc.Tracing.Backend)
return
}
}
}
func (gc *GlobalConfiguration) initACMEProvider() {

View file

@ -5,14 +5,18 @@ import (
"time"
"github.com/containous/flaeg"
"github.com/containous/traefik/middlewares/tracing"
"github.com/containous/traefik/middlewares/tracing/jaeger"
"github.com/containous/traefik/middlewares/tracing/zipkin"
"github.com/containous/traefik/provider"
"github.com/containous/traefik/provider/file"
"github.com/stretchr/testify/assert"
)
const defaultConfigFile = "traefik.toml"
func TestSetEffectiveConfigurationGraceTimeout(t *testing.T) {
tests := []struct {
testCases := []struct {
desc string
legacyGraceTimeout time.Duration
lifeCycleGraceTimeout time.Duration
@ -37,10 +41,11 @@ func TestSetEffectiveConfigurationGraceTimeout(t *testing.T) {
},
}
for _, test := range tests {
for _, test := range testCases {
test := test
t.Run(test.desc, func(t *testing.T) {
t.Parallel()
gc := &GlobalConfiguration{
GraceTimeOut: flaeg.Duration(test.legacyGraceTimeout),
}
@ -52,17 +57,14 @@ func TestSetEffectiveConfigurationGraceTimeout(t *testing.T) {
gc.SetEffectiveConfiguration(defaultConfigFile)
gotGraceTimeout := time.Duration(gc.LifeCycle.GraceTimeOut)
if gotGraceTimeout != test.wantGraceTimeout {
t.Fatalf("got effective grace timeout %d, want %d", gotGraceTimeout, test.wantGraceTimeout)
}
assert.Equal(t, test.wantGraceTimeout, time.Duration(gc.LifeCycle.GraceTimeOut))
})
}
}
func TestSetEffectiveConfigurationFileProviderFilename(t *testing.T) {
tests := []struct {
testCases := []struct {
desc string
fileProvider *file.Provider
wantFileProviderFilename string
@ -84,20 +86,128 @@ func TestSetEffectiveConfigurationFileProviderFilename(t *testing.T) {
},
}
for _, test := range tests {
for _, test := range testCases {
test := test
t.Run(test.desc, func(t *testing.T) {
t.Parallel()
gc := &GlobalConfiguration{
File: test.fileProvider,
}
gc.SetEffectiveConfiguration(defaultConfigFile)
gotFileProviderFilename := gc.File.Filename
if gotFileProviderFilename != test.wantFileProviderFilename {
t.Fatalf("got file provider file name %q, want %q", gotFileProviderFilename, test.wantFileProviderFilename)
}
assert.Equal(t, test.wantFileProviderFilename, gc.File.Filename)
})
}
}
func TestSetEffectiveConfigurationTracing(t *testing.T) {
testCases := []struct {
desc string
tracing *tracing.Tracing
expected *tracing.Tracing
}{
{
desc: "no tracing configuration",
tracing: &tracing.Tracing{},
expected: &tracing.Tracing{},
},
{
desc: "tracing bad backend name",
tracing: &tracing.Tracing{
Backend: "powpow",
},
expected: &tracing.Tracing{
Backend: "powpow",
},
},
{
desc: "tracing jaeger backend name",
tracing: &tracing.Tracing{
Backend: "jaeger",
Zipkin: &zipkin.Config{
HTTPEndpoint: "http://localhost:9411/api/v1/spans",
SameSpan: false,
ID128Bit: true,
Debug: false,
},
},
expected: &tracing.Tracing{
Backend: "jaeger",
Jaeger: &jaeger.Config{
SamplingServerURL: "http://localhost:5778/sampling",
SamplingType: "const",
SamplingParam: 1.0,
LocalAgentHostPort: "127.0.0.1:6832",
},
Zipkin: nil,
},
},
{
desc: "tracing zipkin backend name",
tracing: &tracing.Tracing{
Backend: "zipkin",
Jaeger: &jaeger.Config{
SamplingServerURL: "http://localhost:5778/sampling",
SamplingType: "const",
SamplingParam: 1.0,
LocalAgentHostPort: "127.0.0.1:6832",
},
},
expected: &tracing.Tracing{
Backend: "zipkin",
Jaeger: nil,
Zipkin: &zipkin.Config{
HTTPEndpoint: "http://localhost:9411/api/v1/spans",
SameSpan: false,
ID128Bit: true,
Debug: false,
},
},
},
{
desc: "tracing zipkin backend name value override",
tracing: &tracing.Tracing{
Backend: "zipkin",
Jaeger: &jaeger.Config{
SamplingServerURL: "http://localhost:5778/sampling",
SamplingType: "const",
SamplingParam: 1.0,
LocalAgentHostPort: "127.0.0.1:6832",
},
Zipkin: &zipkin.Config{
HTTPEndpoint: "http://powpow:9411/api/v1/spans",
SameSpan: true,
ID128Bit: true,
Debug: true,
},
},
expected: &tracing.Tracing{
Backend: "zipkin",
Jaeger: nil,
Zipkin: &zipkin.Config{
HTTPEndpoint: "http://powpow:9411/api/v1/spans",
SameSpan: true,
ID128Bit: true,
Debug: true,
},
},
},
}
for _, test := range testCases {
test := test
t.Run(test.desc, func(t *testing.T) {
t.Parallel()
gc := &GlobalConfiguration{
Tracing: test.tracing,
}
gc.SetEffectiveConfiguration(defaultConfigFile)
assert.Equal(t, test.expected, gc.Tracing)
})
}
}

View file

@ -543,3 +543,14 @@ Do not hesitate to complete it.
| [RFC2136](https://tools.ietf.org/html/rfc2136) | `rfc2136` | Not tested yet |
| [Route 53](https://aws.amazon.com/route53/) | `route53` | YES |
| [VULTR](https://www.vultr.com) | `vultr` | Not tested yet |
## ACME V2 migration
During migration from ACME V1 to ACME V2 with a storage file, a backup is created with the content of the ACME V1 file.
To obtain the name of the backup file, Træfik concatenates the option `acme.storage` and the suffix `.bak`.
For example : if `acme.storage` value is `/etc/traefik/acme/acme.json`, the backup file will be named `/etc/traefik/acme/acme.json.bak`.
!!! note
When Træfik is launched in a container, do not forget to create a volume of the parent folder to get the backup file on the host.
Otherwise, the backup file will be deleted when the container will be stopped and Træfik will not generate it again.

View file

@ -73,7 +73,7 @@ prefix = "traefik"
# - "1": previous template version (must be used only with older custom templates, see "filename")
# - "2": current template version (must be used to force template version when "filename" is used)
#
# templateVersion = "2"
# templateVersion = 2
```
This backend will create routes matching on hostname based on the service name used in Consul.

View file

@ -46,7 +46,7 @@ watch = true
# - "1": previous template version (must be used only with older custom templates, see "filename")
# - "2": current template version (must be used to force template version when "filename" is used)
#
# templateVersion = "2"
# templateVersion = 2
# Expose containers by default in Traefik.
# If set to false, containers that don't have `traefik.enable=true` will be ignored.
@ -139,7 +139,7 @@ swarmMode = true
# - "1": previous template version (must be used only with older custom templates, see "filename")
# - "2": current template version (must be used to force template version when "filename" is used)
#
# templateVersion = "2"
# templateVersion = 2
# Expose services by default in Traefik.
#

View file

@ -92,7 +92,7 @@ secretAccessKey = "123"
# - "1": previous template version (must be used only with older custom templates, see "filename")
# - "2": current template version (must be used to force template version when "filename" is used)
#
# templateVersion = "2"
# templateVersion = 2
```
If `accessKeyID`/`secretAccessKey` is not given credentials will be resolved in the following order:

View file

@ -52,7 +52,7 @@ domain = "marathon.localhost"
# - "1": previous template version (must be used only with older custom templates, see "filename")
# - "2": current template version (must be used to force template version when "filename" is used)
#
# templateVersion = "2"
# templateVersion = 2
# Expose Marathon apps by default in Traefik.
#

View file

@ -55,7 +55,7 @@ domain = "mesos.localhost"
# - "1": previous template version (must be used only with older custom templates, see "filename")
# - "2": current template version (must be used to force template version when "filename" is used)
#
# templateVersion = "2"
# templateVersion = 2
# TLS client configuration. https://golang.org/pkg/crypto/tls/#Config
#

View file

@ -61,7 +61,7 @@ enableServiceHealthFilter = true
# - "1": previous template version (must be used only with older custom templates, see "filename")
# - "2": current template version (must be used to force template version when "filename" is used)
#
# templateVersion = "2"
# templateVersion = 2
```
To enable constraints see [backend-specific constraints section](/configuration/commons/#backend-specific).

View file

@ -0,0 +1,78 @@
package integration
import (
"encoding/json"
"io/ioutil"
"net/http"
"os"
"time"
"github.com/containous/traefik/integration/try"
"github.com/containous/traefik/testhelpers"
"github.com/containous/traefik/types"
"github.com/go-check/check"
checker "github.com/vdemeester/shakers"
)
const (
composeProject = "minimal"
)
// Docker test suites
type DockerComposeSuite struct {
BaseSuite
}
func (s *DockerComposeSuite) SetUpSuite(c *check.C) {
s.createComposeProject(c, composeProject)
s.composeProject.Start(c)
}
func (s *DockerComposeSuite) TearDownSuite(c *check.C) {
// shutdown and delete compose project
if s.composeProject != nil {
s.composeProject.Stop(c)
}
}
func (s *DockerComposeSuite) TestComposeScale(c *check.C) {
var serviceCount = 2
var composeService = "whoami1"
s.composeProject.Scale(c, composeService, serviceCount)
file := s.adaptFileForHost(c, "fixtures/docker/simple.toml")
defer os.Remove(file)
cmd, display := s.traefikCmd(withConfigFile(file))
defer display(c)
err := cmd.Start()
c.Assert(err, checker.IsNil)
defer cmd.Process.Kill()
req := testhelpers.MustNewRequest(http.MethodGet, "http://127.0.0.1:8000/whoami", nil)
req.Host = "my.super.host"
_, err = try.ResponseUntilStatusCode(req, 1500*time.Millisecond, http.StatusOK)
c.Assert(err, checker.IsNil)
resp, err := http.Get("http://127.0.0.1:8080/api/providers/docker")
c.Assert(err, checker.IsNil)
defer resp.Body.Close()
body, err := ioutil.ReadAll(resp.Body)
c.Assert(err, checker.IsNil)
var provider types.Configuration
c.Assert(json.Unmarshal(body, &provider), checker.IsNil)
// check that we have only one backend with n servers
c.Assert(provider.Backends, checker.HasLen, 1)
myBackend := provider.Backends["backend-"+composeService+"-integrationtest"+composeProject]
c.Assert(myBackend, checker.NotNil)
c.Assert(myBackend.Servers, checker.HasLen, serviceCount)
// check that we have only one frontend
c.Assert(provider.Frontends, checker.HasLen, 1)
}

View file

@ -99,7 +99,7 @@ func (s *DockerSuite) TestSimpleConfiguration(c *check.C) {
defer cmd.Process.Kill()
// TODO validate : run on 80
// Expected a 404 as we did not comfigure anything
// Expected a 404 as we did not configure anything
err = try.GetRequest("http://127.0.0.1:8000/", 500*time.Millisecond, try.StatusCodeIs(http.StatusNotFound))
c.Assert(err, checker.IsNil)
}

View file

@ -41,6 +41,7 @@ func init() {
check.Suite(&ConstraintSuite{})
check.Suite(&ConsulCatalogSuite{})
check.Suite(&ConsulSuite{})
check.Suite(&DockerComposeSuite{})
check.Suite(&DockerSuite{})
check.Suite(&DynamoDBSuite{})
check.Suite(&EtcdSuite{})

View file

@ -0,0 +1,4 @@
whoami1:
image: emilevauge/whoami
labels:
- traefik.frontend.rule=PathPrefix:/whoami

View file

@ -24,21 +24,21 @@ func TestInfluxDB(t *testing.T) {
}
expectedBackend := []string{
`(traefik\.backend\.requests\.total,code=200,method=GET,service=test count=1) [\d]{19}`,
`(traefik\.backend\.requests\.total,code=404,method=GET,service=test count=1) [\d]{19}`,
`(traefik\.backend\.request\.duration(?:,backend=test)?,code=200,method=GET,service=test(?:,url=http://127.0.0.1)? p50=10000,p90=10000,p95=10000,p99=10000) [\d]{19}`,
`(traefik\.backend\.retries\.total(?:,code=[\d]{3},method=GET)?,service=test count=2) [\d]{19}`,
`(traefik\.backend\.requests\.total,backend=test,code=200,method=GET count=1) [\d]{19}`,
`(traefik\.backend\.requests\.total,backend=test,code=404,method=GET count=1) [\d]{19}`,
`(traefik\.backend\.request\.duration,backend=test,code=200 p50=10000,p90=10000,p95=10000,p99=10000) [\d]{19}`,
`(traefik\.backend\.retries\.total(?:,code=[\d]{3},method=GET)?,backend=test count=2) [\d]{19}`,
`(traefik\.config\.reload\.total(?:[a-z=0-9A-Z,]+)? count=1) [\d]{19}`,
`(traefik\.config\.reload\.total\.failure(?:[a-z=0-9A-Z,]+)? count=1) [\d]{19}`,
`(traefik\.backend\.server\.up,backend=test(?:[a-z=0-9A-Z,]+)?,url=http://127.0.0.1 value=1) [\d]{19}`,
}
msgBackend := udp.ReceiveString(t, func() {
influxDBRegistry.BackendReqsCounter().With("service", "test", "code", strconv.Itoa(http.StatusOK), "method", http.MethodGet).Add(1)
influxDBRegistry.BackendReqsCounter().With("service", "test", "code", strconv.Itoa(http.StatusNotFound), "method", http.MethodGet).Add(1)
influxDBRegistry.BackendRetriesCounter().With("service", "test").Add(1)
influxDBRegistry.BackendRetriesCounter().With("service", "test").Add(1)
influxDBRegistry.BackendReqDurationHistogram().With("service", "test", "code", strconv.Itoa(http.StatusOK)).Observe(10000)
influxDBRegistry.BackendReqsCounter().With("backend", "test", "code", strconv.Itoa(http.StatusOK), "method", http.MethodGet).Add(1)
influxDBRegistry.BackendReqsCounter().With("backend", "test", "code", strconv.Itoa(http.StatusNotFound), "method", http.MethodGet).Add(1)
influxDBRegistry.BackendRetriesCounter().With("backend", "test").Add(1)
influxDBRegistry.BackendRetriesCounter().With("backend", "test").Add(1)
influxDBRegistry.BackendReqDurationHistogram().With("backend", "test", "code", strconv.Itoa(http.StatusOK)).Observe(10000)
influxDBRegistry.ConfigReloadsCounter().Add(1)
influxDBRegistry.ConfigReloadsFailureCounter().Add(1)
influxDBRegistry.BackendServerUpGauge().With("backend", "test", "url", "http://127.0.0.1").Set(1)
@ -47,9 +47,9 @@ func TestInfluxDB(t *testing.T) {
assertMessage(t, msgBackend, expectedBackend)
expectedEntrypoint := []string{
`(traefik\.entrypoint\.requests\.total(?:,backend=test,code=[\d]{3})?,entrypoint=test(?:[a-z=0-9A-Z,:/.]+)? count=1) [\d]{19}`,
`(traefik\.entrypoint\.request\.duration(?:,backend=test,code=[\d]{3})?,entrypoint=test(?:[a-z=0-9A-Z,:/.]+)? p50=10000,p90=10000,p95=10000,p99=10000) [\d]{19}`,
`(traefik\.entrypoint\.connections\.open(?:[a-z=0-9A-Z,]+)?,entrypoint=test,(?:[a-z=0-9A-Z,:.//]+)? value=1) [\d]{19}`,
`(traefik\.entrypoint\.requests\.total,entrypoint=test(?:[a-z=0-9A-Z,:/.]+)? count=1) [\d]{19}`,
`(traefik\.entrypoint\.request\.duration(?:,code=[\d]{3})?,entrypoint=test(?:[a-z=0-9A-Z,:/.]+)? p50=10000,p90=10000,p95=10000,p99=10000) [\d]{19}`,
`(traefik\.entrypoint\.connections\.open,entrypoint=test value=1) [\d]{19}`,
}
msgEntrypoint := udp.ReceiveString(t, func() {

View file

@ -335,6 +335,12 @@ func (g *gauge) With(labelValues ...string) metrics.Gauge {
}
}
func (g *gauge) Add(delta float64) {
collector := g.gv.With(g.labelNamesValues.ToLabels())
collector.Add(delta)
g.collectors <- newCollector(g.name, g.labelNamesValues, collector)
}
func (g *gauge) Set(value float64) {
collector := g.gv.With(g.labelNamesValues.ToLabels())
collector.Set(value)

View file

@ -41,11 +41,13 @@ func NewBackendMetricsMiddleware(registry metrics.Registry, backendName string)
}
type metricsMiddleware struct {
// Important: Since this int64 field is using sync/atomic, it has to be at the top of the struct due to a bug on 32-bit platform
// See: https://golang.org/pkg/sync/atomic/ for more information
openConns int64
reqsCounter gokitmetrics.Counter
reqDurationHistogram gokitmetrics.Histogram
openConnsGauge gokitmetrics.Gauge
baseLabels []string
openConns int64
}
func (m *metricsMiddleware) ServeHTTP(rw http.ResponseWriter, r *http.Request, next http.HandlerFunc) {

View file

@ -48,7 +48,7 @@ func (c *Config) Setup(componentName string) (opentracing.Tracer, io.Closer, err
log.Warnf("Could not initialize jaeger tracer: %s", err.Error())
return nil, nil, err
}
log.Debugf("jaeger tracer configured", err)
log.Debug("Jaeger tracer configured")
return opentracing.GlobalTracer(), closer, nil
}

View file

@ -3,6 +3,7 @@ package zipkin
import (
"io"
"github.com/containous/traefik/log"
opentracing "github.com/opentracing/opentracing-go"
zipkin "github.com/openzipkin/zipkin-go-opentracing"
)
@ -39,5 +40,7 @@ func (c *Config) Setup(serviceName string) (opentracing.Tracer, io.Closer, error
// Without this, child spans are getting the NOOP tracer
opentracing.SetGlobalTracer(tracer)
log.Debug("Zipkin tracer configured")
return tracer, collector, nil
}

View file

@ -52,6 +52,7 @@ func (s *LocalStore) get() (*StoredData, error) {
return nil, err
}
}
// Check if ACME Account is in ACME V1 format
if s.storedData.Account != nil && s.storedData.Account.Registration != nil {
isOldRegistration, err := regexp.MatchString(RegistrationURLPathV1Regexp, s.storedData.Account.Registration.URI)
@ -63,6 +64,21 @@ func (s *LocalStore) get() (*StoredData, error) {
s.SaveDataChan <- s.storedData
}
}
// Delete all certificates with no value
var certificates []*Certificate
for _, certificate := range s.storedData.Certificates {
if len(certificate.Certificate) == 0 || len(certificate.Key) == 0 {
log.Debugf("Delete certificate %v for domains %v which have no value.", certificate, certificate.Domain.ToStrArray())
continue
}
certificates = append(certificates, certificate)
}
if len(certificates) < len(s.storedData.Certificates) {
s.storedData.Certificates = certificates
s.SaveDataChan <- s.storedData
}
}
}

View file

@ -41,7 +41,7 @@ type Configuration struct {
Storage string `description:"Storage to use."`
EntryPoint string `description:"EntryPoint to use."`
OnHostRule bool `description:"Enable certificate generation on frontends Host rules."`
OnDemand bool `description:"Enable on demand certificate generation. This will request a certificate from Let's Encrypt during the first TLS handshake for a hostname that does not yet have a certificate."` //deprecated
OnDemand bool `description:"Enable on demand certificate generation. This will request a certificate from Let's Encrypt during the first TLS handshake for a hostname that does not yet have a certificate."` // Deprecated
DNSChallenge *DNSChallenge `description:"Activate DNS-01 Challenge"`
HTTPChallenge *HTTPChallenge `description:"Activate HTTP-01 Challenge"`
Domains []types.Domain `description:"CN and SANs (alternative domains) to each main domain using format: --acme.domains='main.com,san1.com,san2.com' --acme.domains='*.main.net'. No SANs for wildcards domain. Wildcard domains only accepted with DNSChallenge"`
@ -225,11 +225,17 @@ func (p *Provider) resolveCertificate(domain types.Domain, domainFromConfigurati
}
bundle := true
certificate, failures := client.ObtainCertificate(uncheckedDomains, bundle, nil, OSCPMustStaple)
if len(failures) > 0 {
return nil, fmt.Errorf("cannot obtain certificates %+v", failures)
}
log.Debugf("Certificates obtained for domain %+v", uncheckedDomains)
if len(certificate.Certificate) == 0 || len(certificate.PrivateKey) == 0 {
return nil, fmt.Errorf("domains %v generate certificate with no value: %v", uncheckedDomains, certificate)
}
log.Debugf("Certificates obtained for domains %+v", uncheckedDomains)
if len(uncheckedDomains) > 1 {
domain = types.Domain{Main: uncheckedDomains[0], SANs: uncheckedDomains[1:]}
} else {
@ -446,16 +452,25 @@ func (p *Provider) renewCertificates() {
log.Infof("Error renewing certificate from LE : %+v, %v", certificate.Domain, err)
continue
}
log.Infof("Renewing certificate from LE : %+v", certificate.Domain)
renewedCert, err := client.RenewCertificate(acme.CertificateResource{
Domain: certificate.Domain.Main,
PrivateKey: certificate.Key,
Certificate: certificate.Certificate,
}, true, OSCPMustStaple)
if err != nil {
log.Errorf("Error renewing certificate from LE: %v, %v", certificate.Domain, err)
continue
}
if len(renewedCert.Certificate) == 0 || len(renewedCert.PrivateKey) == 0 {
log.Errorf("domains %v renew certificate with no value: %v", certificate.Domain.ToStrArray(), certificate)
continue
}
p.addCertificateForDomain(certificate.Domain, renewedCert.Certificate, renewedCert.PrivateKey)
}
}
@ -473,6 +488,7 @@ func (p *Provider) AddRoutes(router *mux.Router) {
log.Debugf("Unable to split host and port: %v. Fallback to request host.", err)
domain = req.Host
}
tokenValue := getTokenValue(token, domain, p.Store)
if len(tokenValue) > 0 {
rw.WriteHeader(http.StatusOK)

View file

@ -67,12 +67,13 @@ func (p *Provider) buildConfigurationV2(containersInspected []dockerData) *types
container.SegmentLabels = labels
container.SegmentName = segmentName
// Frontends
if _, exists := serviceNames[container.ServiceName+segmentName]; !exists {
serviceNamesKey := getServiceNameKey(container, p.SwarmMode, segmentName)
if _, exists := serviceNames[serviceNamesKey]; !exists {
frontendName := p.getFrontendName(container, idx)
frontends[frontendName] = append(frontends[frontendName], container)
if len(container.ServiceName+segmentName) > 0 {
serviceNames[container.ServiceName+segmentName] = struct{}{}
if len(serviceNamesKey) > 0 {
serviceNames[serviceNamesKey] = struct{}{}
}
}
@ -104,6 +105,16 @@ func (p *Provider) buildConfigurationV2(containersInspected []dockerData) *types
return configuration
}
func getServiceNameKey(container dockerData, swarmMode bool, segmentName string) string {
serviceNameKey := container.ServiceName
if values, err := label.GetStringMultipleStrict(container.Labels, labelDockerComposeProject, labelDockerComposeService); !swarmMode && err == nil {
serviceNameKey = values[labelDockerComposeService] + values[labelDockerComposeProject]
}
return serviceNameKey + segmentName
}
func (p *Provider) containerFilter(container dockerData) bool {
if !label.IsEnabled(container.Labels, p.ExposedByDefault) {
log.Debugf("Filtering disabled container %s", container.Name)

View file

@ -311,6 +311,95 @@ func TestDockerBuildConfiguration(t *testing.T) {
},
},
},
{
desc: "when docker compose scale with different compose service names",
containers: []docker.ContainerJSON{
containerJSON(
name("test_0"),
labels(map[string]string{
labelDockerComposeProject: "myProject",
labelDockerComposeService: "myService",
}),
ports(nat.PortMap{
"80/tcp": {},
}),
withNetwork("bridge", ipv4("127.0.0.1")),
),
containerJSON(
name("test_1"),
labels(map[string]string{
labelDockerComposeProject: "myProject",
labelDockerComposeService: "myService",
}),
ports(nat.PortMap{
"80/tcp": {},
}),
withNetwork("bridge", ipv4("127.0.0.2")),
),
containerJSON(
name("test_2"),
labels(map[string]string{
labelDockerComposeProject: "myProject",
labelDockerComposeService: "myService2",
}),
ports(nat.PortMap{
"80/tcp": {},
}),
withNetwork("bridge", ipv4("127.0.0.3")),
),
},
expectedFrontends: map[string]*types.Frontend{
"frontend-Host-myService-myProject-docker-localhost-0": {
Backend: "backend-myService-myProject",
PassHostHeader: true,
EntryPoints: []string{},
BasicAuth: []string{},
Routes: map[string]types.Route{
"route-frontend-Host-myService-myProject-docker-localhost-0": {
Rule: "Host:myService.myProject.docker.localhost",
},
},
},
"frontend-Host-myService2-myProject-docker-localhost-2": {
Backend: "backend-myService2-myProject",
PassHostHeader: true,
EntryPoints: []string{},
BasicAuth: []string{},
Routes: map[string]types.Route{
"route-frontend-Host-myService2-myProject-docker-localhost-2": {
Rule: "Host:myService2.myProject.docker.localhost",
},
},
},
},
expectedBackends: map[string]*types.Backend{
"backend-myService-myProject": {
Servers: map[string]types.Server{
"server-test-0": {
URL: "http://127.0.0.1:80",
Weight: label.DefaultWeight,
}, "server-test-1": {
URL: "http://127.0.0.2:80",
Weight: label.DefaultWeight,
},
},
CircuitBreaker: nil,
},
"backend-myService2-myProject": {
Servers: map[string]types.Server{
"server-test-2": {
URL: "http://127.0.0.3:80",
Weight: label.DefaultWeight,
},
},
CircuitBreaker: nil,
},
},
},
}
for _, test := range testCases {

View file

@ -125,11 +125,14 @@ func (p *Provider) buildConfigurationV1(containersInspected []dockerData) *types
servers := map[string][]dockerData{}
serviceNames := make(map[string]struct{})
for idx, container := range filteredContainers {
if _, exists := serviceNames[container.ServiceName]; !exists {
serviceNamesKey := getServiceNameKey(container, p.SwarmMode, "")
if _, exists := serviceNames[serviceNamesKey]; !exists {
frontendName := p.getFrontendNameV1(container, idx)
frontends[frontendName] = append(frontends[frontendName], container)
if len(container.ServiceName) > 0 {
serviceNames[container.ServiceName] = struct{}{}
if len(serviceNamesKey) > 0 {
serviceNames[serviceNamesKey] = struct{}{}
}
}
backendName := getBackendNameV1(container)

View file

@ -1096,7 +1096,7 @@ func (s *Server) loadConfig(configurations types.Configurations, globalConfigura
errorPageName, frontendName, errorPage.Backend)
} else if config.Backends[errorPage.Backend] == nil {
log.Errorf("Error when creating error page %q for frontend %q: the backend %q doesn't exist.",
errorPageName, errorPage.Backend)
errorPageName, frontendName, errorPage.Backend)
} else {
errorPagesHandler, err := errorpages.NewHandler(errorPage, entryPointName+providerName+errorPage.Backend)
if err != nil {
@ -1174,6 +1174,16 @@ func (s *Server) loadConfig(configurations types.Configurations, globalConfigura
}
}
if headerMiddleware != nil {
log.Debugf("Adding header middleware for frontend %s", frontendName)
n.Use(s.tracingMiddleware.NewNegroniHandlerWrapper("Header", headerMiddleware, false))
}
if secureMiddleware != nil {
log.Debugf("Adding secure middleware for frontend %s", frontendName)
n.UseFunc(secureMiddleware.HandlerFuncWithNextForRequestOnly)
}
if len(frontend.BasicAuth) > 0 {
users := types.Users{}
for _, user := range frontend.BasicAuth {
@ -1192,16 +1202,6 @@ func (s *Server) loadConfig(configurations types.Configurations, globalConfigura
}
}
if headerMiddleware != nil {
log.Debugf("Adding header middleware for frontend %s", frontendName)
n.Use(s.tracingMiddleware.NewNegroniHandlerWrapper("Header", headerMiddleware, false))
}
if secureMiddleware != nil {
log.Debugf("Adding secure middleware for frontend %s", frontendName)
n.UseFunc(secureMiddleware.HandlerFuncWithNextForRequestOnly)
}
if config.Backends[frontend.Backend].Buffering != nil {
bufferedLb, err := s.buildBufferingMiddleware(lb, config.Backends[frontend.Backend].Buffering)

View file

@ -32,7 +32,12 @@ func (g *CollectingGauge) With(labelValues ...string) metrics.Gauge {
}
// Set is there to satisfy the metrics.Gauge interface.
func (g *CollectingGauge) Set(delta float64) {
func (g *CollectingGauge) Set(value float64) {
g.GaugeValue = value
}
// Add is there to satisfy the metrics.Gauge interface.
func (g *CollectingGauge) Add(delta float64) {
g.GaugeValue = delta
}

View file

@ -1,7 +1,10 @@
package servicefabric
import (
"encoding/json"
"errors"
"net/http"
"strings"
"time"
"github.com/cenk/backoff"
@ -18,6 +21,11 @@ var _ provider.Provider = (*Provider)(nil)
const traefikServiceFabricExtensionKey = "Traefik"
const (
kindStateful = "Stateful"
kindStateless = "Stateless"
)
// Provider holds for configuration for the provider
type Provider struct {
provider.BaseProvider `mapstructure:",squash"`
@ -66,7 +74,7 @@ func (p *Provider) updateConfig(configurationChan chan<- types.ConfigMessage, po
log.Info("Checking service fabric config")
}
configuration, err := p.buildConfiguration(sfClient)
configuration, err := p.getConfiguration(sfClient)
if err != nil {
return err
}
@ -90,6 +98,15 @@ func (p *Provider) updateConfig(configurationChan chan<- types.ConfigMessage, po
return nil
}
func (p *Provider) getConfiguration(sfClient sfClient) (*types.Configuration, error) {
services, err := getClusterServices(sfClient)
if err != nil {
return nil, err
}
return p.buildConfiguration(services)
}
func getClusterServices(sfClient sfClient) ([]ServiceItemExtended, error) {
apps, err := sfClient.GetApplications()
if err != nil {
@ -171,11 +188,6 @@ func getValidInstances(sfClient sfClient, app sf.ApplicationItem, service sf.Ser
return validInstances
}
func isPrimary(instance replicaInstance) bool {
_, data := instance.GetReplicaData()
return data.ReplicaRole == "Primary"
}
func isHealthy(instanceData *sf.ReplicaItemBase) bool {
return instanceData != nil && (instanceData.ReplicaStatus == "Ready" && instanceData.HealthState != "Error")
}
@ -185,6 +197,54 @@ func hasHTTPEndpoint(instanceData *sf.ReplicaItemBase) bool {
return err == nil
}
func getReplicaDefaultEndpoint(replicaData *sf.ReplicaItemBase) (string, error) {
endpoints, err := decodeEndpointData(replicaData.Address)
if err != nil {
return "", err
}
var defaultHTTPEndpoint string
for _, v := range endpoints {
if strings.Contains(v, "http") {
defaultHTTPEndpoint = v
break
}
}
if len(defaultHTTPEndpoint) == 0 {
return "", errors.New("no default endpoint found")
}
return defaultHTTPEndpoint, nil
}
func decodeEndpointData(endpointData string) (map[string]string, error) {
var endpointsMap map[string]map[string]string
if endpointData == "" {
return nil, errors.New("endpoint data is empty")
}
err := json.Unmarshal([]byte(endpointData), &endpointsMap)
if err != nil {
return nil, err
}
endpoints, endpointsExist := endpointsMap["Endpoints"]
if !endpointsExist {
return nil, errors.New("endpoint doesn't exist in endpoint data")
}
return endpoints, nil
}
func isStateful(service ServiceItemExtended) bool {
return service.ServiceKind == kindStateful
}
func isStateless(service ServiceItemExtended) bool {
return service.ServiceKind == kindStateless
}
// Return a set of labels from the Extension and Property manager
// Allow Extension labels to disable importing labels from the property manager.
func getLabels(sfClient sfClient, service *sf.ServiceItem, app *sf.ApplicationItem) (map[string]string, error) {

View file

@ -1,9 +1,7 @@
package servicefabric
import (
"encoding/json"
"errors"
"math"
"strings"
"text/template"
@ -14,12 +12,7 @@ import (
sf "github.com/jjcollinge/servicefabric"
)
func (p *Provider) buildConfiguration(sfClient sfClient) (*types.Configuration, error) {
services, err := getClusterServices(sfClient)
if err != nil {
return nil, err
}
func (p *Provider) buildConfiguration(services []ServiceItemExtended) (*types.Configuration, error) {
var sfFuncMap = template.FuncMap{
// Services
"getServices": getServices,
@ -38,7 +31,7 @@ func (p *Provider) buildConfiguration(sfClient sfClient) (*types.Configuration,
"filterServicesByLabelValue": filterServicesByLabelValue, // FIXME unused
// Backend functions
"getWeight": getFuncServiceIntLabel(label.TraefikWeight, label.DefaultWeightInt),
"getWeight": getFuncServiceIntLabel(label.TraefikWeight, label.DefaultWeight),
"getProtocol": getFuncServiceStringLabel(label.TraefikProtocol, label.DefaultProtocol),
"getMaxConn": getMaxConn,
"getHealthCheck": getHealthCheck,
@ -46,9 +39,9 @@ func (p *Provider) buildConfiguration(sfClient sfClient) (*types.Configuration,
"getLoadBalancer": getLoadBalancer,
// Frontend Functions
"getPriority": getFuncServiceIntLabel(label.TraefikFrontendPriority, label.DefaultFrontendPriorityInt),
"getPassHostHeader": getFuncServiceBoolLabel(label.TraefikFrontendPassHostHeader, label.DefaultPassHostHeaderBool),
"getPassTLSCert": getFuncBoolLabel(label.TraefikFrontendPassTLSCert, false),
"getPriority": getFuncServiceIntLabel(label.TraefikFrontendPriority, label.DefaultFrontendPriority),
"getPassHostHeader": getFuncServiceBoolLabel(label.TraefikFrontendPassHostHeader, label.DefaultPassHostHeader),
"getPassTLSCert": getFuncBoolLabel(label.TraefikFrontendPassTLSCert, label.DefaultPassTLSCert),
"getEntryPoints": getFuncServiceSliceStringLabel(label.TraefikFrontendEntryPoints),
"getBasicAuth": getFuncServiceSliceStringLabel(label.TraefikFrontendAuthBasic),
"getFrontendRules": getFuncServiceLabelWithPrefix(label.TraefikFrontendRule),
@ -70,12 +63,9 @@ func (p *Provider) buildConfiguration(sfClient sfClient) (*types.Configuration,
return p.GetConfiguration(tmpl, sfFuncMap, templateObjects)
}
func isStateful(service ServiceItemExtended) bool {
return service.ServiceKind == "Stateful"
}
func isStateless(service ServiceItemExtended) bool {
return service.ServiceKind == "Stateless"
func isPrimary(instance replicaInstance) bool {
_, data := instance.GetReplicaData()
return data.ReplicaRole == "Primary"
}
func getBackendName(service ServiceItemExtended, partition PartitionItemExtended) string {
@ -92,26 +82,6 @@ func getDefaultEndpoint(instance replicaInstance) string {
return endpoint
}
func getReplicaDefaultEndpoint(replicaData *sf.ReplicaItemBase) (string, error) {
endpoints, err := decodeEndpointData(replicaData.Address)
if err != nil {
return "", err
}
var defaultHTTPEndpoint string
for _, v := range endpoints {
if strings.Contains(v, "http") {
defaultHTTPEndpoint = v
break
}
}
if len(defaultHTTPEndpoint) == 0 {
return "", errors.New("no default endpoint found")
}
return defaultHTTPEndpoint, nil
}
func getNamedEndpoint(instance replicaInstance, endpointName string) string {
id, data := instance.GetReplicaData()
endpoint, err := getReplicaNamedEndpoint(data, endpointName)
@ -175,170 +145,30 @@ func filterServicesByLabelValue(services []ServiceItemExtended, key, expectedVal
return srvWithLabel
}
func decodeEndpointData(endpointData string) (map[string]string, error) {
var endpointsMap map[string]map[string]string
if endpointData == "" {
return nil, errors.New("endpoint data is empty")
}
err := json.Unmarshal([]byte(endpointData), &endpointsMap)
if err != nil {
return nil, err
}
endpoints, endpointsExist := endpointsMap["Endpoints"]
if !endpointsExist {
return nil, errors.New("endpoint doesn't exist in endpoint data")
}
return endpoints, nil
}
func getHeaders(service ServiceItemExtended) *types.Headers {
headers := &types.Headers{
CustomRequestHeaders: label.GetMapValue(service.Labels, label.TraefikFrontendRequestHeaders),
CustomResponseHeaders: label.GetMapValue(service.Labels, label.TraefikFrontendResponseHeaders),
SSLProxyHeaders: label.GetMapValue(service.Labels, label.TraefikFrontendSSLProxyHeaders),
AllowedHosts: label.GetSliceStringValue(service.Labels, label.TraefikFrontendAllowedHosts),
HostsProxyHeaders: label.GetSliceStringValue(service.Labels, label.TraefikFrontendHostsProxyHeaders),
STSSeconds: label.GetInt64Value(service.Labels, label.TraefikFrontendSTSSeconds, 0),
SSLRedirect: label.GetBoolValue(service.Labels, label.TraefikFrontendSSLRedirect, false),
SSLTemporaryRedirect: label.GetBoolValue(service.Labels, label.TraefikFrontendSSLTemporaryRedirect, false),
STSIncludeSubdomains: label.GetBoolValue(service.Labels, label.TraefikFrontendSTSIncludeSubdomains, false),
STSPreload: label.GetBoolValue(service.Labels, label.TraefikFrontendSTSPreload, false),
ForceSTSHeader: label.GetBoolValue(service.Labels, label.TraefikFrontendForceSTSHeader, false),
FrameDeny: label.GetBoolValue(service.Labels, label.TraefikFrontendFrameDeny, false),
ContentTypeNosniff: label.GetBoolValue(service.Labels, label.TraefikFrontendContentTypeNosniff, false),
BrowserXSSFilter: label.GetBoolValue(service.Labels, label.TraefikFrontendBrowserXSSFilter, false),
IsDevelopment: label.GetBoolValue(service.Labels, label.TraefikFrontendIsDevelopment, false),
SSLHost: label.GetStringValue(service.Labels, label.TraefikFrontendSSLHost, ""),
CustomFrameOptionsValue: label.GetStringValue(service.Labels, label.TraefikFrontendCustomFrameOptionsValue, ""),
ContentSecurityPolicy: label.GetStringValue(service.Labels, label.TraefikFrontendContentSecurityPolicy, ""),
PublicKey: label.GetStringValue(service.Labels, label.TraefikFrontendPublicKey, ""),
ReferrerPolicy: label.GetStringValue(service.Labels, label.TraefikFrontendReferrerPolicy, ""),
CustomBrowserXSSValue: label.GetStringValue(service.Labels, label.TraefikFrontendCustomBrowserXSSValue, ""),
}
if !headers.HasSecureHeadersDefined() && !headers.HasCustomHeadersDefined() {
return nil
}
return headers
return label.GetHeaders(service.Labels)
}
func getWhiteList(service ServiceItemExtended) *types.WhiteList {
if label.Has(service.Labels, label.TraefikFrontendWhitelistSourceRange) {
log.Warnf("Deprecated configuration found: %s. Please use %s.", label.TraefikFrontendWhitelistSourceRange, label.TraefikFrontendWhiteListSourceRange)
}
ranges := label.GetSliceStringValue(service.Labels, label.TraefikFrontendWhiteListSourceRange)
if len(ranges) > 0 {
return &types.WhiteList{
SourceRange: ranges,
UseXForwardedFor: label.GetBoolValue(service.Labels, label.TraefikFrontendWhiteListUseXForwardedFor, false),
}
}
// TODO: Deprecated
values := label.GetSliceStringValue(service.Labels, label.TraefikFrontendWhitelistSourceRange)
if len(values) > 0 {
return &types.WhiteList{
SourceRange: values,
UseXForwardedFor: false,
}
}
return nil
return label.GetWhiteList(service.Labels)
}
func getRedirect(service ServiceItemExtended) *types.Redirect {
permanent := label.GetBoolValue(service.Labels, label.TraefikFrontendRedirectPermanent, false)
if label.Has(service.Labels, label.TraefikFrontendRedirectEntryPoint) {
return &types.Redirect{
EntryPoint: label.GetStringValue(service.Labels, label.TraefikFrontendRedirectEntryPoint, ""),
Permanent: permanent,
}
}
if label.Has(service.Labels, label.TraefikFrontendRedirectRegex) &&
label.Has(service.Labels, label.TraefikFrontendRedirectReplacement) {
return &types.Redirect{
Regex: label.GetStringValue(service.Labels, label.TraefikFrontendRedirectRegex, ""),
Replacement: label.GetStringValue(service.Labels, label.TraefikFrontendRedirectReplacement, ""),
Permanent: permanent,
}
}
return nil
return label.GetRedirect(service.Labels)
}
func getMaxConn(service ServiceItemExtended) *types.MaxConn {
amount := label.GetInt64Value(service.Labels, label.TraefikBackendMaxConnAmount, math.MinInt64)
extractorFunc := label.GetStringValue(service.Labels, label.TraefikBackendMaxConnExtractorFunc, label.DefaultBackendMaxconnExtractorFunc)
if amount == math.MinInt64 || len(extractorFunc) == 0 {
return nil
}
return &types.MaxConn{
Amount: amount,
ExtractorFunc: extractorFunc,
}
return label.GetMaxConn(service.Labels)
}
func getHealthCheck(service ServiceItemExtended) *types.HealthCheck {
path := label.GetStringValue(service.Labels, label.TraefikBackendHealthCheckPath, "")
if len(path) == 0 {
return nil
}
port := label.GetIntValue(service.Labels, label.TraefikBackendHealthCheckPort, label.DefaultBackendHealthCheckPort)
interval := label.GetStringValue(service.Labels, label.TraefikBackendHealthCheckInterval, "")
return &types.HealthCheck{
Path: path,
Port: port,
Interval: interval,
}
return label.GetHealthCheck(service.Labels)
}
func getCircuitBreaker(service ServiceItemExtended) *types.CircuitBreaker {
circuitBreaker := label.GetStringValue(service.Labels, label.TraefikBackendCircuitBreakerExpression, "")
if len(circuitBreaker) == 0 {
return nil
}
return &types.CircuitBreaker{Expression: circuitBreaker}
return label.GetCircuitBreaker(service.Labels)
}
func getLoadBalancer(service ServiceItemExtended) *types.LoadBalancer {
if !label.HasPrefix(service.Labels, label.TraefikBackendLoadBalancer) {
return nil
}
method := label.GetStringValue(service.Labels, label.TraefikBackendLoadBalancerMethod, label.DefaultBackendLoadBalancerMethod)
lb := &types.LoadBalancer{
Method: method,
Sticky: getSticky(service),
}
if label.GetBoolValue(service.Labels, label.TraefikBackendLoadBalancerStickiness, false) {
cookieName := label.GetStringValue(service.Labels, label.TraefikBackendLoadBalancerStickinessCookieName, label.DefaultBackendLoadbalancerStickinessCookieName)
lb.Stickiness = &types.Stickiness{CookieName: cookieName}
}
return lb
}
// TODO: Deprecated
// replaced by Stickiness
// Deprecated
func getSticky(service ServiceItemExtended) bool {
if label.Has(service.Labels, label.TraefikBackendLoadBalancerSticky) {
log.Warnf("Deprecated configuration found: %s. Please use %s.", label.TraefikBackendLoadBalancerSticky, label.TraefikBackendLoadBalancerStickiness)
}
return label.GetBoolValue(service.Labels, label.TraefikBackendLoadBalancerSticky, false)
return label.GetLoadBalancer(service.Labels)
}

View file

@ -59,7 +59,7 @@ const tmpl = `
{{range $instance := $partition.Instances}}
[backends."{{ $service.Name }}".servers."{{ $instance.ID }}"]
url = "{{ getDefaultEndpoint $instance }}"
weight = {{ getLabelValue $service "backend.weight" "1" }}
weight = {{ getWeight $service }}
{{end}}
{{else if isStateful $service}}
@ -199,17 +199,18 @@ const tmpl = `
rule = "{{ $value }}"
{{end}}
{{else if isStateful $service}}
{{else if isStateful $service }}
{{range $partition := $service.Partitions }}
{{ $partitionId := $partition.PartitionInformation.ID }}
{{if hasLabel $service "frontend.rule" }}
[frontends."{{ $service.Name }}/{{ $partitionId }}"]
backend = "{{ getBackendName $service.Name $partition }}"
{{ $rule := getLabelValue $service (print "traefik.frontend.rule.partition." $partitionId) "" }}
{{if $rule }}
[frontends."{{ $service.Name }}/{{ $partitionId }}"]
backend = "{{ getBackendName $service $partition }}"
[frontends."{{ $service.Name }}/{{ $partitionId }}".routes.default]
rule = {{ getLabelValue $service "frontend.rule.partition.$partitionId" "" }}
rule = "{{ $rule }}"
{{end}}
{{end}}

View file

@ -35,14 +35,15 @@
// idea to log simple values without formatting them. This practice allows
// the chosen logger to encode values in the most appropriate way.
//
// Log Context
// Contextual Loggers
//
// A log context stores keyvals that it includes in all log events. Building
// appropriate log contexts reduces repetition and aids consistency in the
// resulting log output. We can use a context to improve the RunTask example.
// A contextual logger stores keyvals that it includes in all log events.
// Building appropriate contextual loggers reduces repetition and aids
// consistency in the resulting log output. With and WithPrefix add context to
// a logger. We can use With to improve the RunTask example.
//
// func RunTask(task Task, logger log.Logger) string {
// logger = log.NewContext(logger).With("taskID", task.ID)
// logger = log.With(logger, "taskID", task.ID)
// logger.Log("event", "starting task")
// ...
// taskHelper(task.Cmd, logger)
@ -51,19 +52,18 @@
// }
//
// The improved version emits the same log events as the original for the
// first and last calls to Log. The call to taskHelper highlights that a
// context may be passed as a logger to other functions. Each log event
// created by the called function will include the task.ID even though the
// function does not have access to that value. Using log contexts this way
// simplifies producing log output that enables tracing the life cycle of
// individual tasks. (See the Context example for the full code of the
// above snippet.)
// first and last calls to Log. Passing the contextual logger to taskHelper
// enables each log event created by taskHelper to include the task.ID even
// though taskHelper does not have access to that value. Using contextual
// loggers this way simplifies producing log output that enables tracing the
// life cycle of individual tasks. (See the Contextual example for the full
// code of the above snippet.)
//
// Dynamic Context Values
// Dynamic Contextual Values
//
// A Valuer function stored in a log context generates a new value each time
// the context logs an event. The Valuer example demonstrates how this
// feature works.
// A Valuer function stored in a contextual logger generates a new value each
// time an event is logged. The Valuer example demonstrates how this feature
// works.
//
// Valuers provide the basis for consistently logging timestamps and source
// code location. The log package defines several valuers for that purpose.
@ -72,7 +72,7 @@
// entries contain a timestamp and source location looks like this:
//
// logger := log.NewLogfmtLogger(log.NewSyncWriter(os.Stdout))
// logger = log.NewContext(logger).With("ts", log.DefaultTimestampUTC, "caller", log.DefaultCaller)
// logger = log.With(logger, "ts", log.DefaultTimestampUTC, "caller", log.DefaultCaller)
//
// Concurrent Safety
//
@ -90,4 +90,27 @@
// handled atomically within the wrapped logger, but it typically serializes
// both the formatting and output logic. Use a SyncLogger if the formatting
// logger may perform multiple writes per log event.
//
// Error Handling
//
// This package relies on the practice of wrapping or decorating loggers with
// other loggers to provide composable pieces of functionality. It also means
// that Logger.Log must return an error because some
// implementations—especially those that output log data to an io.Writer—may
// encounter errors that cannot be handled locally. This in turn means that
// Loggers that wrap other loggers should return errors from the wrapped
// logger up the stack.
//
// Fortunately, the decorator pattern also provides a way to avoid the
// necessity to check for errors every time an application calls Logger.Log.
// An application required to panic whenever its Logger encounters
// an error could initialize its logger as follows.
//
// fmtlogger := log.NewLogfmtLogger(log.NewSyncWriter(os.Stdout))
// logger := log.LoggerFunc(func(keyvals ...interface{}) error {
// if err := fmtlogger.Log(keyvals...); err != nil {
// panic(err)
// }
// return nil
// })
package log

View file

@ -44,9 +44,6 @@ func merge(dst map[string]interface{}, k, v interface{}) {
default:
key = fmt.Sprint(x)
}
if x, ok := v.(error); ok {
v = safeError(x)
}
// We want json.Marshaler and encoding.TextMarshaller to take priority over
// err.Error() and v.String(). But json.Marshall (called later) does that by

View file

@ -6,7 +6,7 @@ import "errors"
// log event from keyvals, a variadic sequence of alternating keys and values.
// Implementations must be safe for concurrent use by multiple goroutines. In
// particular, any implementation of Logger that appends to keyvals or
// modifies any of its elements must make a copy first.
// modifies or retains any of its elements must make a copy first.
type Logger interface {
Log(keyvals ...interface{}) error
}
@ -15,62 +15,100 @@ type Logger interface {
// the missing value.
var ErrMissingValue = errors.New("(MISSING)")
// NewContext returns a new Context that logs to logger.
func NewContext(logger Logger) *Context {
if c, ok := logger.(*Context); ok {
return c
// With returns a new contextual logger with keyvals prepended to those passed
// to calls to Log. If logger is also a contextual logger created by With or
// WithPrefix, keyvals is appended to the existing context.
//
// The returned Logger replaces all value elements (odd indexes) containing a
// Valuer with their generated value for each call to its Log method.
func With(logger Logger, keyvals ...interface{}) Logger {
if len(keyvals) == 0 {
return logger
}
l := newContext(logger)
kvs := append(l.keyvals, keyvals...)
if len(kvs)%2 != 0 {
kvs = append(kvs, ErrMissingValue)
}
return &context{
logger: l.logger,
// Limiting the capacity of the stored keyvals ensures that a new
// backing array is created if the slice must grow in Log or With.
// Using the extra capacity without copying risks a data race that
// would violate the Logger interface contract.
keyvals: kvs[:len(kvs):len(kvs)],
hasValuer: l.hasValuer || containsValuer(keyvals),
}
return &Context{logger: logger}
}
// Context must always have the same number of stack frames between calls to
// WithPrefix returns a new contextual logger with keyvals prepended to those
// passed to calls to Log. If logger is also a contextual logger created by
// With or WithPrefix, keyvals is prepended to the existing context.
//
// The returned Logger replaces all value elements (odd indexes) containing a
// Valuer with their generated value for each call to its Log method.
func WithPrefix(logger Logger, keyvals ...interface{}) Logger {
if len(keyvals) == 0 {
return logger
}
l := newContext(logger)
// Limiting the capacity of the stored keyvals ensures that a new
// backing array is created if the slice must grow in Log or With.
// Using the extra capacity without copying risks a data race that
// would violate the Logger interface contract.
n := len(l.keyvals) + len(keyvals)
if len(keyvals)%2 != 0 {
n++
}
kvs := make([]interface{}, 0, n)
kvs = append(kvs, keyvals...)
if len(kvs)%2 != 0 {
kvs = append(kvs, ErrMissingValue)
}
kvs = append(kvs, l.keyvals...)
return &context{
logger: l.logger,
keyvals: kvs,
hasValuer: l.hasValuer || containsValuer(keyvals),
}
}
// context is the Logger implementation returned by With and WithPrefix. It
// wraps a Logger and holds keyvals that it includes in all log events. Its
// Log method calls bindValues to generate values for each Valuer in the
// context keyvals.
//
// A context must always have the same number of stack frames between calls to
// its Log method and the eventual binding of Valuers to their value. This
// requirement comes from the functional requirement to allow a context to
// resolve application call site information for a log.Caller stored in the
// resolve application call site information for a Caller stored in the
// context. To do this we must be able to predict the number of logging
// functions on the stack when bindValues is called.
//
// Three implementation details provide the needed stack depth consistency.
// The first two of these details also result in better amortized performance,
// and thus make sense even without the requirements regarding stack depth.
// The third detail, however, is subtle and tied to the implementation of the
// Go compiler.
// Two implementation details provide the needed stack depth consistency.
//
// 1. NewContext avoids introducing an additional layer when asked to
// wrap another Context.
// 2. With avoids introducing an additional layer by returning a newly
// constructed Context with a merged keyvals rather than simply
// wrapping the existing Context.
// 3. All of Context's methods take pointer receivers even though they
// do not mutate the Context.
//
// Before explaining the last detail, first some background. The Go compiler
// generates wrapper methods to implement the auto dereferencing behavior when
// calling a value method through a pointer variable. These wrapper methods
// are also used when calling a value method through an interface variable
// because interfaces store a pointer to the underlying concrete value.
// Calling a pointer receiver through an interface does not require generating
// an additional function.
//
// If Context had value methods then calling Context.Log through a variable
// with type Logger would have an extra stack frame compared to calling
// Context.Log through a variable with type Context. Using pointer receivers
// avoids this problem.
// A Context wraps a Logger and holds keyvals that it includes in all log
// events. When logging, a Context replaces all value elements (odd indexes)
// containing a Valuer with their generated value for each call to its Log
// method.
type Context struct {
// 1. newContext avoids introducing an additional layer when asked to
// wrap another context.
// 2. With and WithPrefix avoid introducing an additional layer by
// returning a newly constructed context with a merged keyvals rather
// than simply wrapping the existing context.
type context struct {
logger Logger
keyvals []interface{}
hasValuer bool
}
func newContext(logger Logger) *context {
if c, ok := logger.(*context); ok {
return c
}
return &context{logger: logger}
}
// Log replaces all value elements (odd indexes) containing a Valuer in the
// stored context with their generated value, appends keyvals, and passes the
// result to the wrapped Logger.
func (l *Context) Log(keyvals ...interface{}) error {
func (l *context) Log(keyvals ...interface{}) error {
kvs := append(l.keyvals, keyvals...)
if len(kvs)%2 != 0 {
kvs = append(kvs, ErrMissingValue)
@ -86,53 +124,6 @@ func (l *Context) Log(keyvals ...interface{}) error {
return l.logger.Log(kvs...)
}
// With returns a new Context with keyvals appended to those of the receiver.
func (l *Context) With(keyvals ...interface{}) *Context {
if len(keyvals) == 0 {
return l
}
kvs := append(l.keyvals, keyvals...)
if len(kvs)%2 != 0 {
kvs = append(kvs, ErrMissingValue)
}
return &Context{
logger: l.logger,
// Limiting the capacity of the stored keyvals ensures that a new
// backing array is created if the slice must grow in Log or With.
// Using the extra capacity without copying risks a data race that
// would violate the Logger interface contract.
keyvals: kvs[:len(kvs):len(kvs)],
hasValuer: l.hasValuer || containsValuer(keyvals),
}
}
// WithPrefix returns a new Context with keyvals prepended to those of the
// receiver.
func (l *Context) WithPrefix(keyvals ...interface{}) *Context {
if len(keyvals) == 0 {
return l
}
// Limiting the capacity of the stored keyvals ensures that a new
// backing array is created if the slice must grow in Log or With.
// Using the extra capacity without copying risks a data race that
// would violate the Logger interface contract.
n := len(l.keyvals) + len(keyvals)
if len(keyvals)%2 != 0 {
n++
}
kvs := make([]interface{}, 0, n)
kvs = append(kvs, keyvals...)
if len(kvs)%2 != 0 {
kvs = append(kvs, ErrMissingValue)
}
kvs = append(kvs, l.keyvals...)
return &Context{
logger: l.logger,
keyvals: kvs,
hasValuer: l.hasValuer || containsValuer(keyvals),
}
}
// LoggerFunc is an adapter to allow use of ordinary functions as Loggers. If
// f is a function with the appropriate signature, LoggerFunc(f) is a Logger
// object that calls f.

View file

@ -39,7 +39,7 @@ func TimestampKey(key string) StdlibAdapterOption {
return func(a *StdlibAdapter) { a.timestampKey = key }
}
// FileKey sets the key for the file and line field. By default, it's "file".
// FileKey sets the key for the file and line field. By default, it's "caller".
func FileKey(key string) StdlibAdapterOption {
return func(a *StdlibAdapter) { a.fileKey = key }
}
@ -55,7 +55,7 @@ func NewStdlibAdapter(logger Logger, options ...StdlibAdapterOption) io.Writer {
a := StdlibAdapter{
Logger: logger,
timestampKey: "ts",
fileKey: "file",
fileKey: "caller",
messageKey: "msg",
}
for _, option := range options {

View file

@ -36,24 +36,59 @@ func (l *SwapLogger) Swap(logger Logger) {
l.logger.Store(loggerStruct{logger})
}
// SyncWriter synchronizes concurrent writes to an io.Writer.
type SyncWriter struct {
mu sync.Mutex
w io.Writer
// NewSyncWriter returns a new writer that is safe for concurrent use by
// multiple goroutines. Writes to the returned writer are passed on to w. If
// another write is already in progress, the calling goroutine blocks until
// the writer is available.
//
// If w implements the following interface, so does the returned writer.
//
// interface {
// Fd() uintptr
// }
func NewSyncWriter(w io.Writer) io.Writer {
switch w := w.(type) {
case fdWriter:
return &fdSyncWriter{fdWriter: w}
default:
return &syncWriter{Writer: w}
}
}
// NewSyncWriter returns a new SyncWriter. The returned writer is safe for
// concurrent use by multiple goroutines.
func NewSyncWriter(w io.Writer) *SyncWriter {
return &SyncWriter{w: w}
// syncWriter synchronizes concurrent writes to an io.Writer.
type syncWriter struct {
sync.Mutex
io.Writer
}
// Write writes p to the underlying io.Writer. If another write is already in
// progress, the calling goroutine blocks until the SyncWriter is available.
func (w *SyncWriter) Write(p []byte) (n int, err error) {
w.mu.Lock()
n, err = w.w.Write(p)
w.mu.Unlock()
// progress, the calling goroutine blocks until the syncWriter is available.
func (w *syncWriter) Write(p []byte) (n int, err error) {
w.Lock()
n, err = w.Writer.Write(p)
w.Unlock()
return n, err
}
// fdWriter is an io.Writer that also has an Fd method. The most common
// example of an fdWriter is an *os.File.
type fdWriter interface {
io.Writer
Fd() uintptr
}
// fdSyncWriter synchronizes concurrent writes to an fdWriter.
type fdSyncWriter struct {
sync.Mutex
fdWriter
}
// Write writes p to the underlying io.Writer. If another write is already in
// progress, the calling goroutine blocks until the fdSyncWriter is available.
func (w *fdSyncWriter) Write(p []byte) (n int, err error) {
w.Lock()
n, err = w.fdWriter.Write(p)
w.Unlock()
return n, err
}

View file

@ -6,9 +6,9 @@ import (
"github.com/go-stack/stack"
)
// A Valuer generates a log value. When passed to Context.With in a value
// element (odd indexes), it represents a dynamic value which is re-evaluated
// with each log event.
// A Valuer generates a log value. When passed to With or WithPrefix in a
// value element (odd indexes), it represents a dynamic value which is re-
// evaluated with each log event.
type Valuer func() interface{}
// bindValues replaces all value elements (odd indexes) containing a Valuer
@ -32,22 +32,51 @@ func containsValuer(keyvals []interface{}) bool {
return false
}
// Timestamp returns a Valuer that invokes the underlying function when bound,
// returning a time.Time. Users will probably want to use DefaultTimestamp or
// DefaultTimestampUTC.
// Timestamp returns a timestamp Valuer. It invokes the t function to get the
// time; unless you are doing something tricky, pass time.Now.
//
// Most users will want to use DefaultTimestamp or DefaultTimestampUTC, which
// are TimestampFormats that use the RFC3339Nano format.
func Timestamp(t func() time.Time) Valuer {
return func() interface{} { return t() }
}
var (
// DefaultTimestamp is a Valuer that returns the current wallclock time,
// respecting time zones, when bound.
DefaultTimestamp Valuer = func() interface{} { return time.Now().Format(time.RFC3339) }
// TimestampFormat returns a timestamp Valuer with a custom time format. It
// invokes the t function to get the time to format; unless you are doing
// something tricky, pass time.Now. The layout string is passed to
// Time.Format.
//
// Most users will want to use DefaultTimestamp or DefaultTimestampUTC, which
// are TimestampFormats that use the RFC3339Nano format.
func TimestampFormat(t func() time.Time, layout string) Valuer {
return func() interface{} {
return timeFormat{
time: t(),
layout: layout,
}
}
}
// DefaultTimestampUTC is a Valuer that returns the current time in UTC
// when bound.
DefaultTimestampUTC Valuer = func() interface{} { return time.Now().UTC().Format(time.RFC3339) }
)
// A timeFormat represents an instant in time and a layout used when
// marshaling to a text format.
type timeFormat struct {
time time.Time
layout string
}
func (tf timeFormat) String() string {
return tf.time.Format(tf.layout)
}
// MarshalText implements encoding.TextMarshaller.
func (tf timeFormat) MarshalText() (text []byte, err error) {
// The following code adapted from the standard library time.Time.Format
// method. Using the same undocumented magic constant to extend the size
// of the buffer as seen there.
b := make([]byte, 0, len(tf.layout)+10)
b = tf.time.AppendFormat(b, tf.layout)
return b, nil
}
// Caller returns a Valuer that returns a file and line from a specified depth
// in the callstack. Users will probably want to use DefaultCaller.
@ -56,6 +85,17 @@ func Caller(depth int) Valuer {
}
var (
// DefaultTimestamp is a Valuer that returns the current wallclock time,
// respecting time zones, when bound.
DefaultTimestamp = TimestampFormat(time.Now, time.RFC3339Nano)
// DefaultTimestampUTC is a Valuer that returns the current time in UTC
// when bound.
DefaultTimestampUTC = TimestampFormat(
func() time.Time { return time.Now().UTC() },
time.RFC3339Nano,
)
// DefaultCaller is a Valuer that returns the file and line where the Log
// method was invoked. It can only be used with log.With.
DefaultCaller = Caller(3)

View file

@ -1,23 +1,54 @@
// Package metrics provides a framework for application instrumentation. All
// metrics are safe for concurrent use. Considerable design influence has been
// taken from https://github.com/codahale/metrics and https://prometheus.io.
// Package metrics provides a framework for application instrumentation. It's
// primarily designed to help you get started with good and robust
// instrumentation, and to help you migrate from a less-capable system like
// Graphite to a more-capable system like Prometheus. If your organization has
// already standardized on an instrumentation system like Prometheus, and has no
// plans to change, it may make sense to use that system's instrumentation
// library directly.
//
// This package contains the common interfaces. Your code should take these
// interfaces as parameters. Implementations are provided for different
// instrumentation systems in the various subdirectories.
// This package provides three core metric abstractions (Counter, Gauge, and
// Histogram) and implementations for almost all common instrumentation
// backends. Each metric has an observation method (Add, Set, or Observe,
// respectively) used to record values, and a With method to "scope" the
// observation by various parameters. For example, you might have a Histogram to
// record request durations, parameterized by the method that's being called.
//
// var requestDuration metrics.Histogram
// // ...
// requestDuration.With("method", "MyMethod").Observe(time.Since(begin))
//
// This allows a single high-level metrics object (requestDuration) to work with
// many code paths somewhat dynamically. The concept of With is fully supported
// in some backends like Prometheus, and not supported in other backends like
// Graphite. So, With may be a no-op, depending on the concrete implementation
// you choose. Please check the implementation to know for sure. For
// implementations that don't provide With, it's necessary to fully parameterize
// each metric in the metric name, e.g.
//
// // Statsd
// c := statsd.NewCounter("request_duration_MyMethod_200")
// c.Add(1)
//
// // Prometheus
// c := prometheus.NewCounter(stdprometheus.CounterOpts{
// Name: "request_duration",
// ...
// }, []string{"method", "status_code"})
// c.With("method", "MyMethod", "status_code", strconv.Itoa(code)).Add(1)
//
// Usage
//
// Metrics are dependencies and should be passed to the components that need
// Metrics are dependencies, and should be passed to the components that need
// them in the same way you'd construct and pass a database handle, or reference
// to another component. So, create metrics in your func main, using whichever
// concrete implementation is appropriate for your organization.
// to another component. Metrics should *not* be created in the global scope.
// Instead, instantiate metrics in your func main, using whichever concrete
// implementation is appropriate for your organization.
//
// latency := prometheus.NewSummaryFrom(stdprometheus.SummaryOpts{
// Namespace: "myteam",
// Subsystem: "foosvc",
// Name: "request_latency_seconds",
// Help: "Incoming request latency in seconds."
// Help: "Incoming request latency in seconds.",
// }, []string{"method", "status_code"})
//
// Write your components to take the metrics they will use as parameters to
@ -40,8 +71,14 @@
// api := NewAPI(store, logger, latency)
// http.ListenAndServe("/", api)
//
// Note that metrics are "write-only" interfaces.
//
// Implementation details
//
// All metrics are safe for concurrent use. Considerable design influence has
// been taken from https://github.com/codahale/metrics and
// https://prometheus.io.
//
// Each telemetry system has different semantics for label values, push vs.
// pull, support for histograms, etc. These properties influence the design of
// their respective packages. This table attempts to summarize the key points of
@ -54,7 +91,7 @@
// expvar 1 atomic atomic synthetic, batch, in-place expose
// influx n custom custom custom
// prometheus n native native native
// circonus 1 native native native
// pcp 1 native native native
// cloudwatch n batch push-aggregate batch push-aggregate synthetic, batch, push-aggregate
//
package metrics

View file

@ -14,10 +14,13 @@ import (
"fmt"
"io"
"strings"
"sync"
"sync/atomic"
"time"
"github.com/go-kit/kit/log"
"github.com/go-kit/kit/metrics"
"github.com/go-kit/kit/metrics/generic"
"github.com/go-kit/kit/metrics/internal/lv"
"github.com/go-kit/kit/metrics/internal/ratemap"
"github.com/go-kit/kit/util/conn"
@ -34,53 +37,63 @@ import (
// To regularly report metrics to an io.Writer, use the WriteLoop helper method.
// To send to a DogStatsD server, use the SendLoop helper method.
type Dogstatsd struct {
mtx sync.RWMutex
prefix string
rates *ratemap.RateMap
counters *lv.Space
gauges *lv.Space
gauges map[string]*gaugeNode
timings *lv.Space
histograms *lv.Space
logger log.Logger
lvs lv.LabelValues
}
// New returns a Dogstatsd object that may be used to create metrics. Prefix is
// applied to all created metrics. Callers must ensure that regular calls to
// WriteTo are performed, either manually or with one of the helper methods.
func New(prefix string, logger log.Logger) *Dogstatsd {
func New(prefix string, logger log.Logger, lvs ...string) *Dogstatsd {
if len(lvs)%2 != 0 {
panic("odd number of LabelValues; programmer error!")
}
return &Dogstatsd{
prefix: prefix,
rates: ratemap.New(),
counters: lv.NewSpace(),
gauges: lv.NewSpace(),
gauges: map[string]*gaugeNode{},
timings: lv.NewSpace(),
histograms: lv.NewSpace(),
logger: logger,
lvs: lvs,
}
}
// NewCounter returns a counter, sending observations to this Dogstatsd object.
func (d *Dogstatsd) NewCounter(name string, sampleRate float64) *Counter {
d.rates.Set(d.prefix+name, sampleRate)
d.rates.Set(name, sampleRate)
return &Counter{
name: d.prefix + name,
name: name,
obs: d.counters.Observe,
}
}
// NewGauge returns a gauge, sending observations to this Dogstatsd object.
func (d *Dogstatsd) NewGauge(name string) *Gauge {
return &Gauge{
name: d.prefix + name,
obs: d.gauges.Observe,
d.mtx.Lock()
n, ok := d.gauges[name]
if !ok {
n = &gaugeNode{gauge: &Gauge{g: generic.NewGauge(name), ddog: d}}
d.gauges[name] = n
}
d.mtx.Unlock()
return n.gauge
}
// NewTiming returns a histogram whose observations are interpreted as
// millisecond durations, and are forwarded to this Dogstatsd object.
func (d *Dogstatsd) NewTiming(name string, sampleRate float64) *Timing {
d.rates.Set(d.prefix+name, sampleRate)
d.rates.Set(name, sampleRate)
return &Timing{
name: d.prefix + name,
name: name,
obs: d.timings.Observe,
}
}
@ -88,9 +101,9 @@ func (d *Dogstatsd) NewTiming(name string, sampleRate float64) *Timing {
// NewHistogram returns a histogram whose observations are of an unspecified
// unit, and are forwarded to this Dogstatsd object.
func (d *Dogstatsd) NewHistogram(name string, sampleRate float64) *Histogram {
d.rates.Set(d.prefix+name, sampleRate)
d.rates.Set(name, sampleRate)
return &Histogram{
name: d.prefix + name,
name: name,
obs: d.histograms.Observe,
}
}
@ -124,7 +137,7 @@ func (d *Dogstatsd) WriteTo(w io.Writer) (count int64, err error) {
var n int
d.counters.Reset().Walk(func(name string, lvs lv.LabelValues, values []float64) bool {
n, err = fmt.Fprintf(w, "%s:%f|c%s%s\n", name, sum(values), sampling(d.rates.Get(name)), tagValues(lvs))
n, err = fmt.Fprintf(w, "%s%s:%f|c%s%s\n", d.prefix, name, sum(values), sampling(d.rates.Get(name)), d.tagValues(lvs))
if err != nil {
return false
}
@ -135,22 +148,23 @@ func (d *Dogstatsd) WriteTo(w io.Writer) (count int64, err error) {
return count, err
}
d.gauges.Reset().Walk(func(name string, lvs lv.LabelValues, values []float64) bool {
n, err = fmt.Fprintf(w, "%s:%f|g%s\n", name, last(values), tagValues(lvs))
if err != nil {
return false
}
count += int64(n)
return true
})
if err != nil {
return count, err
d.mtx.RLock()
for _, root := range d.gauges {
root.walk(func(name string, lvs lv.LabelValues, value float64) bool {
n, err = fmt.Fprintf(w, "%s%s:%f|g%s\n", d.prefix, name, value, d.tagValues(lvs))
if err != nil {
return false
}
count += int64(n)
return true
})
}
d.mtx.RUnlock()
d.timings.Reset().Walk(func(name string, lvs lv.LabelValues, values []float64) bool {
sampleRate := d.rates.Get(name)
for _, value := range values {
n, err = fmt.Fprintf(w, "%s:%f|ms%s%s\n", name, value, sampling(sampleRate), tagValues(lvs))
n, err = fmt.Fprintf(w, "%s%s:%f|ms%s%s\n", d.prefix, name, value, sampling(sampleRate), d.tagValues(lvs))
if err != nil {
return false
}
@ -165,7 +179,7 @@ func (d *Dogstatsd) WriteTo(w io.Writer) (count int64, err error) {
d.histograms.Reset().Walk(func(name string, lvs lv.LabelValues, values []float64) bool {
sampleRate := d.rates.Get(name)
for _, value := range values {
n, err = fmt.Fprintf(w, "%s:%f|h%s%s\n", name, value, sampling(sampleRate), tagValues(lvs))
n, err = fmt.Fprintf(w, "%s%s:%f|h%s%s\n", d.prefix, name, value, sampling(sampleRate), d.tagValues(lvs))
if err != nil {
return false
}
@ -200,14 +214,17 @@ func sampling(r float64) string {
return sv
}
func tagValues(labelValues []string) string {
if len(labelValues) == 0 {
func (d *Dogstatsd) tagValues(labelValues []string) string {
if len(labelValues) == 0 && len(d.lvs) == 0 {
return ""
}
if len(labelValues)%2 != 0 {
panic("tagValues received a labelValues with an odd number of strings")
}
pairs := make([]string, 0, len(labelValues)/2)
pairs := make([]string, 0, (len(d.lvs)+len(labelValues))/2)
for i := 0; i < len(d.lvs); i += 2 {
pairs = append(pairs, d.lvs[i]+":"+d.lvs[i+1])
}
for i := 0; i < len(labelValues); i += 2 {
pairs = append(pairs, labelValues[i]+":"+labelValues[i+1])
}
@ -241,23 +258,31 @@ func (c *Counter) Add(delta float64) {
// Gauge is a DogStatsD gauge. Observations are forwarded to a Dogstatsd
// object, and aggregated (the last observation selected) per timeseries.
type Gauge struct {
name string
lvs lv.LabelValues
obs observeFunc
g *generic.Gauge
ddog *Dogstatsd
set int32
}
// With implements metrics.Gauge.
func (g *Gauge) With(labelValues ...string) metrics.Gauge {
return &Gauge{
name: g.name,
lvs: g.lvs.With(labelValues...),
obs: g.obs,
}
g.ddog.mtx.RLock()
node := g.ddog.gauges[g.g.Name]
g.ddog.mtx.RUnlock()
ga := &Gauge{g: g.g.With(labelValues...).(*generic.Gauge), ddog: g.ddog}
return node.addGauge(ga, ga.g.LabelValues())
}
// Set implements metrics.Gauge.
func (g *Gauge) Set(value float64) {
g.obs(g.name, g.lvs, value)
g.g.Set(value)
g.touch()
}
// Add implements metrics.Gauge.
func (g *Gauge) Add(delta float64) {
g.g.Add(delta)
g.touch()
}
// Timing is a DogStatsD timing, or metrics.Histogram. Observations are
@ -304,3 +329,61 @@ func (h *Histogram) With(labelValues ...string) metrics.Histogram {
func (h *Histogram) Observe(value float64) {
h.obs(h.name, h.lvs, value)
}
type pair struct{ label, value string }
type gaugeNode struct {
mtx sync.RWMutex
gauge *Gauge
children map[pair]*gaugeNode
}
func (n *gaugeNode) addGauge(g *Gauge, lvs lv.LabelValues) *Gauge {
n.mtx.Lock()
defer n.mtx.Unlock()
if len(lvs) == 0 {
if n.gauge == nil {
n.gauge = g
}
return n.gauge
}
if len(lvs) < 2 {
panic("too few LabelValues; programmer error!")
}
head, tail := pair{lvs[0], lvs[1]}, lvs[2:]
if n.children == nil {
n.children = map[pair]*gaugeNode{}
}
child, ok := n.children[head]
if !ok {
child = &gaugeNode{}
n.children[head] = child
}
return child.addGauge(g, tail)
}
func (n *gaugeNode) walk(fn func(string, lv.LabelValues, float64) bool) bool {
n.mtx.RLock()
defer n.mtx.RUnlock()
if n.gauge != nil {
value, ok := n.gauge.read()
if ok && !fn(n.gauge.g.Name, n.gauge.g.LabelValues(), value) {
return false
}
}
for _, child := range n.children {
if !child.walk(fn) {
return false
}
}
return true
}
func (g *Gauge) touch() {
atomic.StoreInt32(&(g.set), 1)
}
func (g *Gauge) read() (float64, bool) {
set := atomic.SwapInt32(&(g.set), 0)
return g.g.Value(), set != 0
}

View file

@ -33,6 +33,7 @@ func NewCounter(name string) *Counter {
// With implements Counter.
func (c *Counter) With(labelValues ...string) metrics.Counter {
return &Counter{
Name: c.Name,
bits: atomic.LoadUint64(&c.bits),
lvs: c.lvs.With(labelValues...),
}
@ -95,6 +96,7 @@ func NewGauge(name string) *Gauge {
// With implements Gauge.
func (g *Gauge) With(labelValues ...string) metrics.Gauge {
return &Gauge{
Name: g.Name,
bits: atomic.LoadUint64(&g.bits),
lvs: g.lvs.With(labelValues...),
}
@ -105,6 +107,20 @@ func (g *Gauge) Set(value float64) {
atomic.StoreUint64(&g.bits, math.Float64bits(value))
}
// Add implements metrics.Gauge.
func (g *Gauge) Add(delta float64) {
for {
var (
old = atomic.LoadUint64(&g.bits)
newf = math.Float64frombits(old) + delta
new = math.Float64bits(newf)
)
if atomic.CompareAndSwapUint64(&g.bits, old, new) {
break
}
}
}
// Value returns the current value of the gauge.
func (g *Gauge) Value() float64 {
return math.Float64frombits(atomic.LoadUint64(&g.bits))
@ -121,7 +137,7 @@ func (g *Gauge) LabelValues() []string {
type Histogram struct {
Name string
lvs lv.LabelValues
h gohistogram.Histogram
h *safeHistogram
}
// NewHistogram returns a numeric histogram based on VividCortex/gohistogram. A
@ -129,25 +145,30 @@ type Histogram struct {
func NewHistogram(name string, buckets int) *Histogram {
return &Histogram{
Name: name,
h: gohistogram.NewHistogram(buckets),
h: &safeHistogram{Histogram: gohistogram.NewHistogram(buckets)},
}
}
// With implements Histogram.
func (h *Histogram) With(labelValues ...string) metrics.Histogram {
return &Histogram{
lvs: h.lvs.With(labelValues...),
h: h.h,
Name: h.Name,
lvs: h.lvs.With(labelValues...),
h: h.h,
}
}
// Observe implements Histogram.
func (h *Histogram) Observe(value float64) {
h.h.Lock()
defer h.h.Unlock()
h.h.Add(value)
}
// Quantile returns the value of the quantile q, 0.0 < q < 1.0.
func (h *Histogram) Quantile(q float64) float64 {
h.h.RLock()
defer h.h.RUnlock()
return h.h.Quantile(q)
}
@ -159,9 +180,17 @@ func (h *Histogram) LabelValues() []string {
// Print writes a string representation of the histogram to the passed writer.
// Useful for printing to a terminal.
func (h *Histogram) Print(w io.Writer) {
h.h.RLock()
defer h.h.RUnlock()
fmt.Fprintf(w, h.h.String())
}
// safeHistogram exists as gohistogram.Histogram is not goroutine-safe.
type safeHistogram struct {
sync.RWMutex
gohistogram.Histogram
}
// Bucket is a range in a histogram which aggregates observations.
type Bucket struct {
From, To, Count int64

View file

@ -66,6 +66,7 @@ func (in *Influx) NewGauge(name string) *Gauge {
return &Gauge{
name: name,
obs: in.gauges.Observe,
add: in.gauges.Add,
}
}
@ -168,10 +169,14 @@ func mergeTags(tags map[string]string, labelValues []string) map[string]string {
if len(labelValues)%2 != 0 {
panic("mergeTags received a labelValues with an odd number of strings")
}
for i := 0; i < len(labelValues); i += 2 {
tags[labelValues[i]] = labelValues[i+1]
ret := make(map[string]string, len(tags)+len(labelValues)/2)
for k, v := range tags {
ret[k] = v
}
return tags
for i := 0; i < len(labelValues); i += 2 {
ret[labelValues[i]] = labelValues[i+1]
}
return ret
}
func sum(a []float64) float64 {
@ -216,6 +221,7 @@ type Gauge struct {
name string
lvs lv.LabelValues
obs observeFunc
add observeFunc
}
// With implements metrics.Gauge.
@ -224,6 +230,7 @@ func (g *Gauge) With(labelValues ...string) metrics.Gauge {
name: g.name,
lvs: g.lvs.With(labelValues...),
obs: g.obs,
add: g.add,
}
}
@ -232,6 +239,11 @@ func (g *Gauge) Set(value float64) {
g.obs(g.name, g.lvs, value)
}
// Add implements metrics.Gauge.
func (g *Gauge) Add(delta float64) {
g.add(g.name, g.lvs, delta)
}
// Histogram is an Influx histrogram. Observations are aggregated into a
// generic.Histogram and emitted as per-quantile gauges to the Influx server.
type Histogram struct {

View file

@ -21,6 +21,13 @@ func (s *Space) Observe(name string, lvs LabelValues, value float64) {
s.nodeFor(name).observe(lvs, value)
}
// Add locates the time series identified by the name and label values in
// the vector space, and appends the delta to the last value in the list of
// observations.
func (s *Space) Add(name string, lvs LabelValues, delta float64) {
s.nodeFor(name).add(lvs, delta)
}
// Walk traverses the vector space and invokes fn for each non-empty time series
// which is encountered. Return false to abort the traversal.
func (s *Space) Walk(fn func(name string, lvs LabelValues, observations []float64) bool) {
@ -91,6 +98,34 @@ func (n *node) observe(lvs LabelValues, value float64) {
child.observe(tail, value)
}
func (n *node) add(lvs LabelValues, delta float64) {
n.mtx.Lock()
defer n.mtx.Unlock()
if len(lvs) == 0 {
var value float64
if len(n.observations) > 0 {
value = last(n.observations) + delta
} else {
value = delta
}
n.observations = append(n.observations, value)
return
}
if len(lvs) < 2 {
panic("too few LabelValues; programmer error!")
}
head, tail := pair{lvs[0], lvs[1]}, lvs[2:]
if n.children == nil {
n.children = map[pair]*node{}
}
child, ok := n.children[head]
if !ok {
child = &node{}
n.children[head] = child
}
child.add(tail, delta)
}
func (n *node) walk(lvs LabelValues, fn func(LabelValues, []float64) bool) bool {
n.mtx.RLock()
defer n.mtx.RUnlock()
@ -104,3 +139,7 @@ func (n *node) walk(lvs LabelValues, fn func(LabelValues, []float64) bool) bool
}
return true
}
func last(a []float64) float64 {
return a[len(a)-1]
}

View file

@ -12,6 +12,7 @@ type Counter interface {
type Gauge interface {
With(labelValues ...string) Gauge
Set(value float64)
Add(delta float64)
}
// Histogram describes a metric that takes repeated observations of the same

View file

@ -54,6 +54,13 @@ func (g Gauge) With(labelValues ...string) metrics.Gauge {
return next
}
// Add implements metrics.Gauge.
func (g Gauge) Add(delta float64) {
for _, gauge := range g {
gauge.Add(delta)
}
}
// Histogram collects multiple individual histograms and treats them as a unit.
type Histogram []metrics.Histogram

View file

@ -74,6 +74,7 @@ func (s *Statsd) NewGauge(name string) *Gauge {
return &Gauge{
name: s.prefix + name,
obs: s.gauges.Observe,
add: s.gauges.Add,
}
}
@ -201,6 +202,7 @@ func (c *Counter) Add(delta float64) {
type Gauge struct {
name string
obs observeFunc
add observeFunc
}
// With is a no-op.
@ -213,6 +215,11 @@ func (g *Gauge) Set(value float64) {
g.obs(g.name, lv.LabelValues{}, value)
}
// Add implements metrics.Gauge.
func (g *Gauge) Add(delta float64) {
g.add(g.name, lv.LabelValues{}, delta)
}
// Timing is a StatsD timing, or metrics.Histogram. Observations are
// forwarded to a Statsd object, and collected (but not aggregated) per
// timeseries.

View file

@ -7,6 +7,7 @@ import "time"
type Timer struct {
h Histogram
t time.Time
u time.Duration
}
// NewTimer wraps the given histogram and records the current time.
@ -14,15 +15,22 @@ func NewTimer(h Histogram) *Timer {
return &Timer{
h: h,
t: time.Now(),
u: time.Second,
}
}
// ObserveDuration captures the number of seconds since the timer was
// constructed, and forwards that observation to the histogram.
func (t *Timer) ObserveDuration() {
d := time.Since(t.t).Seconds()
d := float64(time.Since(t.t).Nanoseconds()) / float64(t.u)
if d < 0 {
d = 0
}
t.h.Observe(d)
}
// Unit sets the unit of the float64 emitted by the timer.
// By default, the timer emits seconds.
func (t *Timer) Unit(u time.Duration) {
t.u = u
}