Merge tag 'v1.6.0-rc6' into master

This commit is contained in:
Fernandez Ludovic 2018-04-18 10:13:22 +02:00
commit e6ce61fdf0
48 changed files with 1115 additions and 472 deletions

View file

@ -1,5 +1,24 @@
# Change Log # Change Log
## [v1.6.0-rc6](https://github.com/containous/traefik/tree/v1.6.0-rc6) (2018-04-17)
[All Commits](https://github.com/containous/traefik/compare/v1.6.0-rc5...v1.6.0-rc6)
**Enhancements:**
- **[acme]** Create backup file during migration from ACME V1 to ACME V2 ([#3191](https://github.com/containous/traefik/pull/3191) by [nmengin](https://github.com/nmengin))
- **[servicefabric]** Use shared label system ([#3197](https://github.com/containous/traefik/pull/3197) by [ldez](https://github.com/ldez))
**Bug fixes:**
- **[docker]** Fix multiple frontends with docker-compose --scale ([#3190](https://github.com/containous/traefik/pull/3190) by [jbdoumenjou](https://github.com/jbdoumenjou))
- **[metrics]** Fix duplicated tags in InfluxDB ([#3189](https://github.com/containous/traefik/pull/3189) by [mmatur](https://github.com/mmatur))
- **[middleware,tracing]** Fix nil value when tracing is enabled ([#3192](https://github.com/containous/traefik/pull/3192) by [mmatur](https://github.com/mmatur))
- **[middleware]** Fix panic in atomic on ARM and x86-32 platforms ([#3195](https://github.com/containous/traefik/pull/3195) by [mmatur](https://github.com/mmatur))
- **[middleware]** Redirect to HTTPS first before basic auth if header redirect (secure) is set ([#3187](https://github.com/containous/traefik/pull/3187) by [SantoDE](https://github.com/SantoDE))
- **[servicefabric]** Fix backend name for stateful service and more. ([#3183](https://github.com/containous/traefik/pull/3183) by [ldez](https://github.com/ldez))
- Add missing argument in log. ([#3188](https://github.com/containous/traefik/pull/3188) by [chemidy](https://github.com/chemidy))
**Documentation:**
- **[provider]** Fix template version documentation. ([#3184](https://github.com/containous/traefik/pull/3184) by [ldez](https://github.com/ldez))
## [v1.6.0-rc5](https://github.com/containous/traefik/tree/v1.6.0-rc5) (2018-04-12) ## [v1.6.0-rc5](https://github.com/containous/traefik/tree/v1.6.0-rc5) (2018-04-12)
[All Commits](https://github.com/containous/traefik/compare/v1.6.0-rc4...v1.6.0-rc5) [All Commits](https://github.com/containous/traefik/compare/v1.6.0-rc4...v1.6.0-rc5)

11
Gopkg.lock generated
View file

@ -263,8 +263,8 @@
[[projects]] [[projects]]
name = "github.com/containous/traefik-extra-service-fabric" name = "github.com/containous/traefik-extra-service-fabric"
packages = ["."] packages = ["."]
revision = "503022efdc178146d598911092af75690510a80c" revision = "2889df8d4f84315e6e527588554ed0ce9d062305"
version = "v1.1.3" version = "v1.1.5"
[[projects]] [[projects]]
name = "github.com/coreos/bbolt" name = "github.com/coreos/bbolt"
@ -569,8 +569,8 @@
"metrics/statsd", "metrics/statsd",
"util/conn" "util/conn"
] ]
revision = "f66b0e13579bfc5a48b9e2a94b1209c107ea1f41" revision = "ca4112baa34cb55091301bdc13b1420a122b1b9e"
version = "v0.3.0" version = "v0.7.0"
[[projects]] [[projects]]
name = "github.com/go-logfmt/logfmt" name = "github.com/go-logfmt/logfmt"
@ -761,7 +761,6 @@
version = "v1.3.7" version = "v1.3.7"
[[projects]] [[projects]]
branch = "master"
name = "github.com/jjcollinge/servicefabric" name = "github.com/jjcollinge/servicefabric"
packages = ["."] packages = ["."]
revision = "8eebe170fa1ba25d3dfb928b3f86a7313b13b9fe" revision = "8eebe170fa1ba25d3dfb928b3f86a7313b13b9fe"
@ -1675,6 +1674,6 @@
[solve-meta] [solve-meta]
analyzer-name = "dep" analyzer-name = "dep"
analyzer-version = 1 analyzer-version = 1
inputs-digest = "c441208e9bf330e85e2939b383515f58a4957286960b43c444e6f512d1ff94ee" inputs-digest = "c7d91203842be1915ca08a31917a079489bff7ffc6f2e494330e9556b4730a06"
solver-name = "gps-cdcl" solver-name = "gps-cdcl"
solver-version = 1 solver-version = 1

View file

@ -66,7 +66,7 @@
[[constraint]] [[constraint]]
name = "github.com/containous/traefik-extra-service-fabric" name = "github.com/containous/traefik-extra-service-fabric"
version = "1.1.3" version = "1.1.5"
[[constraint]] [[constraint]]
name = "github.com/coreos/go-systemd" name = "github.com/coreos/go-systemd"
@ -97,7 +97,7 @@
[[constraint]] [[constraint]]
name = "github.com/go-kit/kit" name = "github.com/go-kit/kit"
version = "0.3.0" version = "0.7.0"
[[constraint]] [[constraint]]
branch = "master" branch = "master"

View file

@ -46,38 +46,42 @@ func (s *LocalStore) Get() (*Account, error) {
if err := json.Unmarshal(file, &account); err != nil { if err := json.Unmarshal(file, &account); err != nil {
return nil, err return nil, err
} }
// Check if ACME Account is in ACME V1 format
if account != nil && account.Registration != nil {
isOldRegistration, err := regexp.MatchString(acme.RegistrationURLPathV1Regexp, account.Registration.URI)
if err != nil {
return nil, err
}
if isOldRegistration {
account.Email = ""
account.Registration = nil
account.PrivateKey = nil
}
}
} }
return account, nil return account, nil
} }
// RemoveAccountV1Values removes ACME account V1 values
func RemoveAccountV1Values(account *Account) error {
// Check if ACME Account is in ACME V1 format
if account != nil && account.Registration != nil {
isOldRegistration, err := regexp.MatchString(acme.RegistrationURLPathV1Regexp, account.Registration.URI)
if err != nil {
return err
}
if isOldRegistration {
account.Email = ""
account.Registration = nil
account.PrivateKey = nil
}
}
return nil
}
// ConvertToNewFormat converts old acme.json format to the new one and store the result into the file (used for the backward compatibility) // ConvertToNewFormat converts old acme.json format to the new one and store the result into the file (used for the backward compatibility)
func ConvertToNewFormat(fileName string) { func ConvertToNewFormat(fileName string) {
localStore := acme.NewLocalStore(fileName) localStore := acme.NewLocalStore(fileName)
storeAccount, err := localStore.GetAccount() storeAccount, err := localStore.GetAccount()
if err != nil { if err != nil {
log.Warnf("Failed to read new account, ACME data conversion is not available : %v", err) log.Errorf("Failed to read new account, ACME data conversion is not available : %v", err)
return return
} }
storeCertificates, err := localStore.GetCertificates() storeCertificates, err := localStore.GetCertificates()
if err != nil { if err != nil {
log.Warnf("Failed to read new certificates, ACME data conversion is not available : %v", err) log.Errorf("Failed to read new certificates, ACME data conversion is not available : %v", err)
return return
} }
@ -86,13 +90,25 @@ func ConvertToNewFormat(fileName string) {
account, err := localStore.Get() account, err := localStore.Get()
if err != nil { if err != nil {
log.Warnf("Failed to read old account, ACME data conversion is not available : %v", err) log.Errorf("Failed to read old account, ACME data conversion is not available : %v", err)
return return
} }
// Convert ACME data from old to new format // Convert ACME data from old to new format
newAccount := &acme.Account{} newAccount := &acme.Account{}
if account != nil && len(account.Email) > 0 { if account != nil && len(account.Email) > 0 {
err = backupACMEFile(fileName, account)
if err != nil {
log.Errorf("Unable to create a backup for the V1 formatted ACME file: %s", err.Error())
return
}
err = RemoveAccountV1Values(account)
if err != nil {
log.Errorf("Unable to remove ACME Account V1 values: %s", err.Error())
return
}
newAccount = &acme.Account{ newAccount = &acme.Account{
PrivateKey: account.PrivateKey, PrivateKey: account.PrivateKey,
Registration: account.Registration, Registration: account.Registration,
@ -107,8 +123,8 @@ func ConvertToNewFormat(fileName string) {
Domain: cert.Domains, Domain: cert.Domains,
}) })
} }
// If account is in the old format, storeCertificates is nil or empty
// and has to be initialized // If account is in the old format, storeCertificates is nil or empty and has to be initialized
storeCertificates = newCertificates storeCertificates = newCertificates
} }
@ -119,7 +135,16 @@ func ConvertToNewFormat(fileName string) {
} }
} }
// FromNewToOldFormat converts new acme.json format to the old one (used for the backward compatibility) func backupACMEFile(originalFileName string, account interface{}) error {
// write account to file
data, err := json.MarshalIndent(account, "", " ")
if err != nil {
return err
}
return ioutil.WriteFile(originalFileName+".bak", data, 0600)
}
// FromNewToOldFormat converts new acme account to the old one (used for the backward compatibility)
func FromNewToOldFormat(fileName string) (*Account, error) { func FromNewToOldFormat(fileName string) (*Account, error) {
localStore := acme.NewLocalStore(fileName) localStore := acme.NewLocalStore(fileName)

View file

@ -134,10 +134,16 @@ func migrateACMEData(fileName string) (*acme.Account, error) {
if accountFromNewFormat == nil { if accountFromNewFormat == nil {
// convert ACME json file to KV store (used for backward compatibility) // convert ACME json file to KV store (used for backward compatibility)
localStore := acme.NewLocalStore(fileName) localStore := acme.NewLocalStore(fileName)
account, err = localStore.Get() account, err = localStore.Get()
if err != nil { if err != nil {
return nil, err return nil, err
} }
err = acme.RemoveAccountV1Values(account)
if err != nil {
return nil, err
}
} else { } else {
account = accountFromNewFormat account = accountFromNewFormat
} }

View file

@ -11,6 +11,8 @@ import (
"github.com/containous/traefik/api" "github.com/containous/traefik/api"
"github.com/containous/traefik/log" "github.com/containous/traefik/log"
"github.com/containous/traefik/middlewares/tracing" "github.com/containous/traefik/middlewares/tracing"
"github.com/containous/traefik/middlewares/tracing/jaeger"
"github.com/containous/traefik/middlewares/tracing/zipkin"
"github.com/containous/traefik/ping" "github.com/containous/traefik/ping"
acmeprovider "github.com/containous/traefik/provider/acme" acmeprovider "github.com/containous/traefik/provider/acme"
"github.com/containous/traefik/provider/boltdb" "github.com/containous/traefik/provider/boltdb"
@ -313,6 +315,43 @@ func (gc *GlobalConfiguration) SetEffectiveConfiguration(configFile string) {
} }
gc.initACMEProvider() gc.initACMEProvider()
gc.initTracing()
}
func (gc *GlobalConfiguration) initTracing() {
if gc.Tracing != nil {
switch gc.Tracing.Backend {
case jaeger.Name:
if gc.Tracing.Jaeger == nil {
gc.Tracing.Jaeger = &jaeger.Config{
SamplingServerURL: "http://localhost:5778/sampling",
SamplingType: "const",
SamplingParam: 1.0,
LocalAgentHostPort: "127.0.0.1:6832",
}
}
if gc.Tracing.Zipkin != nil {
log.Warn("Zipkin configuration will be ignored")
gc.Tracing.Zipkin = nil
}
case zipkin.Name:
if gc.Tracing.Zipkin == nil {
gc.Tracing.Zipkin = &zipkin.Config{
HTTPEndpoint: "http://localhost:9411/api/v1/spans",
SameSpan: false,
ID128Bit: true,
Debug: false,
}
}
if gc.Tracing.Jaeger != nil {
log.Warn("Jaeger configuration will be ignored")
gc.Tracing.Jaeger = nil
}
default:
log.Warnf("Unknown tracer %q", gc.Tracing.Backend)
return
}
}
} }
func (gc *GlobalConfiguration) initACMEProvider() { func (gc *GlobalConfiguration) initACMEProvider() {

View file

@ -5,14 +5,18 @@ import (
"time" "time"
"github.com/containous/flaeg" "github.com/containous/flaeg"
"github.com/containous/traefik/middlewares/tracing"
"github.com/containous/traefik/middlewares/tracing/jaeger"
"github.com/containous/traefik/middlewares/tracing/zipkin"
"github.com/containous/traefik/provider" "github.com/containous/traefik/provider"
"github.com/containous/traefik/provider/file" "github.com/containous/traefik/provider/file"
"github.com/stretchr/testify/assert"
) )
const defaultConfigFile = "traefik.toml" const defaultConfigFile = "traefik.toml"
func TestSetEffectiveConfigurationGraceTimeout(t *testing.T) { func TestSetEffectiveConfigurationGraceTimeout(t *testing.T) {
tests := []struct { testCases := []struct {
desc string desc string
legacyGraceTimeout time.Duration legacyGraceTimeout time.Duration
lifeCycleGraceTimeout time.Duration lifeCycleGraceTimeout time.Duration
@ -37,10 +41,11 @@ func TestSetEffectiveConfigurationGraceTimeout(t *testing.T) {
}, },
} }
for _, test := range tests { for _, test := range testCases {
test := test test := test
t.Run(test.desc, func(t *testing.T) { t.Run(test.desc, func(t *testing.T) {
t.Parallel() t.Parallel()
gc := &GlobalConfiguration{ gc := &GlobalConfiguration{
GraceTimeOut: flaeg.Duration(test.legacyGraceTimeout), GraceTimeOut: flaeg.Duration(test.legacyGraceTimeout),
} }
@ -52,17 +57,14 @@ func TestSetEffectiveConfigurationGraceTimeout(t *testing.T) {
gc.SetEffectiveConfiguration(defaultConfigFile) gc.SetEffectiveConfiguration(defaultConfigFile)
gotGraceTimeout := time.Duration(gc.LifeCycle.GraceTimeOut) assert.Equal(t, test.wantGraceTimeout, time.Duration(gc.LifeCycle.GraceTimeOut))
if gotGraceTimeout != test.wantGraceTimeout {
t.Fatalf("got effective grace timeout %d, want %d", gotGraceTimeout, test.wantGraceTimeout)
}
}) })
} }
} }
func TestSetEffectiveConfigurationFileProviderFilename(t *testing.T) { func TestSetEffectiveConfigurationFileProviderFilename(t *testing.T) {
tests := []struct { testCases := []struct {
desc string desc string
fileProvider *file.Provider fileProvider *file.Provider
wantFileProviderFilename string wantFileProviderFilename string
@ -84,20 +86,128 @@ func TestSetEffectiveConfigurationFileProviderFilename(t *testing.T) {
}, },
} }
for _, test := range tests { for _, test := range testCases {
test := test test := test
t.Run(test.desc, func(t *testing.T) { t.Run(test.desc, func(t *testing.T) {
t.Parallel() t.Parallel()
gc := &GlobalConfiguration{ gc := &GlobalConfiguration{
File: test.fileProvider, File: test.fileProvider,
} }
gc.SetEffectiveConfiguration(defaultConfigFile) gc.SetEffectiveConfiguration(defaultConfigFile)
gotFileProviderFilename := gc.File.Filename assert.Equal(t, test.wantFileProviderFilename, gc.File.Filename)
if gotFileProviderFilename != test.wantFileProviderFilename { })
t.Fatalf("got file provider file name %q, want %q", gotFileProviderFilename, test.wantFileProviderFilename) }
} }
func TestSetEffectiveConfigurationTracing(t *testing.T) {
testCases := []struct {
desc string
tracing *tracing.Tracing
expected *tracing.Tracing
}{
{
desc: "no tracing configuration",
tracing: &tracing.Tracing{},
expected: &tracing.Tracing{},
},
{
desc: "tracing bad backend name",
tracing: &tracing.Tracing{
Backend: "powpow",
},
expected: &tracing.Tracing{
Backend: "powpow",
},
},
{
desc: "tracing jaeger backend name",
tracing: &tracing.Tracing{
Backend: "jaeger",
Zipkin: &zipkin.Config{
HTTPEndpoint: "http://localhost:9411/api/v1/spans",
SameSpan: false,
ID128Bit: true,
Debug: false,
},
},
expected: &tracing.Tracing{
Backend: "jaeger",
Jaeger: &jaeger.Config{
SamplingServerURL: "http://localhost:5778/sampling",
SamplingType: "const",
SamplingParam: 1.0,
LocalAgentHostPort: "127.0.0.1:6832",
},
Zipkin: nil,
},
},
{
desc: "tracing zipkin backend name",
tracing: &tracing.Tracing{
Backend: "zipkin",
Jaeger: &jaeger.Config{
SamplingServerURL: "http://localhost:5778/sampling",
SamplingType: "const",
SamplingParam: 1.0,
LocalAgentHostPort: "127.0.0.1:6832",
},
},
expected: &tracing.Tracing{
Backend: "zipkin",
Jaeger: nil,
Zipkin: &zipkin.Config{
HTTPEndpoint: "http://localhost:9411/api/v1/spans",
SameSpan: false,
ID128Bit: true,
Debug: false,
},
},
},
{
desc: "tracing zipkin backend name value override",
tracing: &tracing.Tracing{
Backend: "zipkin",
Jaeger: &jaeger.Config{
SamplingServerURL: "http://localhost:5778/sampling",
SamplingType: "const",
SamplingParam: 1.0,
LocalAgentHostPort: "127.0.0.1:6832",
},
Zipkin: &zipkin.Config{
HTTPEndpoint: "http://powpow:9411/api/v1/spans",
SameSpan: true,
ID128Bit: true,
Debug: true,
},
},
expected: &tracing.Tracing{
Backend: "zipkin",
Jaeger: nil,
Zipkin: &zipkin.Config{
HTTPEndpoint: "http://powpow:9411/api/v1/spans",
SameSpan: true,
ID128Bit: true,
Debug: true,
},
},
},
}
for _, test := range testCases {
test := test
t.Run(test.desc, func(t *testing.T) {
t.Parallel()
gc := &GlobalConfiguration{
Tracing: test.tracing,
}
gc.SetEffectiveConfiguration(defaultConfigFile)
assert.Equal(t, test.expected, gc.Tracing)
}) })
} }
} }

View file

@ -543,3 +543,14 @@ Do not hesitate to complete it.
| [RFC2136](https://tools.ietf.org/html/rfc2136) | `rfc2136` | Not tested yet | | [RFC2136](https://tools.ietf.org/html/rfc2136) | `rfc2136` | Not tested yet |
| [Route 53](https://aws.amazon.com/route53/) | `route53` | YES | | [Route 53](https://aws.amazon.com/route53/) | `route53` | YES |
| [VULTR](https://www.vultr.com) | `vultr` | Not tested yet | | [VULTR](https://www.vultr.com) | `vultr` | Not tested yet |
## ACME V2 migration
During migration from ACME V1 to ACME V2 with a storage file, a backup is created with the content of the ACME V1 file.
To obtain the name of the backup file, Træfik concatenates the option `acme.storage` and the suffix `.bak`.
For example : if `acme.storage` value is `/etc/traefik/acme/acme.json`, the backup file will be named `/etc/traefik/acme/acme.json.bak`.
!!! note
When Træfik is launched in a container, do not forget to create a volume of the parent folder to get the backup file on the host.
Otherwise, the backup file will be deleted when the container will be stopped and Træfik will not generate it again.

View file

@ -73,7 +73,7 @@ prefix = "traefik"
# - "1": previous template version (must be used only with older custom templates, see "filename") # - "1": previous template version (must be used only with older custom templates, see "filename")
# - "2": current template version (must be used to force template version when "filename" is used) # - "2": current template version (must be used to force template version when "filename" is used)
# #
# templateVersion = "2" # templateVersion = 2
``` ```
This backend will create routes matching on hostname based on the service name used in Consul. This backend will create routes matching on hostname based on the service name used in Consul.

View file

@ -46,7 +46,7 @@ watch = true
# - "1": previous template version (must be used only with older custom templates, see "filename") # - "1": previous template version (must be used only with older custom templates, see "filename")
# - "2": current template version (must be used to force template version when "filename" is used) # - "2": current template version (must be used to force template version when "filename" is used)
# #
# templateVersion = "2" # templateVersion = 2
# Expose containers by default in Traefik. # Expose containers by default in Traefik.
# If set to false, containers that don't have `traefik.enable=true` will be ignored. # If set to false, containers that don't have `traefik.enable=true` will be ignored.
@ -139,7 +139,7 @@ swarmMode = true
# - "1": previous template version (must be used only with older custom templates, see "filename") # - "1": previous template version (must be used only with older custom templates, see "filename")
# - "2": current template version (must be used to force template version when "filename" is used) # - "2": current template version (must be used to force template version when "filename" is used)
# #
# templateVersion = "2" # templateVersion = 2
# Expose services by default in Traefik. # Expose services by default in Traefik.
# #

View file

@ -92,7 +92,7 @@ secretAccessKey = "123"
# - "1": previous template version (must be used only with older custom templates, see "filename") # - "1": previous template version (must be used only with older custom templates, see "filename")
# - "2": current template version (must be used to force template version when "filename" is used) # - "2": current template version (must be used to force template version when "filename" is used)
# #
# templateVersion = "2" # templateVersion = 2
``` ```
If `accessKeyID`/`secretAccessKey` is not given credentials will be resolved in the following order: If `accessKeyID`/`secretAccessKey` is not given credentials will be resolved in the following order:

View file

@ -52,7 +52,7 @@ domain = "marathon.localhost"
# - "1": previous template version (must be used only with older custom templates, see "filename") # - "1": previous template version (must be used only with older custom templates, see "filename")
# - "2": current template version (must be used to force template version when "filename" is used) # - "2": current template version (must be used to force template version when "filename" is used)
# #
# templateVersion = "2" # templateVersion = 2
# Expose Marathon apps by default in Traefik. # Expose Marathon apps by default in Traefik.
# #

View file

@ -55,7 +55,7 @@ domain = "mesos.localhost"
# - "1": previous template version (must be used only with older custom templates, see "filename") # - "1": previous template version (must be used only with older custom templates, see "filename")
# - "2": current template version (must be used to force template version when "filename" is used) # - "2": current template version (must be used to force template version when "filename" is used)
# #
# templateVersion = "2" # templateVersion = 2
# TLS client configuration. https://golang.org/pkg/crypto/tls/#Config # TLS client configuration. https://golang.org/pkg/crypto/tls/#Config
# #

View file

@ -61,7 +61,7 @@ enableServiceHealthFilter = true
# - "1": previous template version (must be used only with older custom templates, see "filename") # - "1": previous template version (must be used only with older custom templates, see "filename")
# - "2": current template version (must be used to force template version when "filename" is used) # - "2": current template version (must be used to force template version when "filename" is used)
# #
# templateVersion = "2" # templateVersion = 2
``` ```
To enable constraints see [backend-specific constraints section](/configuration/commons/#backend-specific). To enable constraints see [backend-specific constraints section](/configuration/commons/#backend-specific).

View file

@ -0,0 +1,78 @@
package integration
import (
"encoding/json"
"io/ioutil"
"net/http"
"os"
"time"
"github.com/containous/traefik/integration/try"
"github.com/containous/traefik/testhelpers"
"github.com/containous/traefik/types"
"github.com/go-check/check"
checker "github.com/vdemeester/shakers"
)
const (
composeProject = "minimal"
)
// Docker test suites
type DockerComposeSuite struct {
BaseSuite
}
func (s *DockerComposeSuite) SetUpSuite(c *check.C) {
s.createComposeProject(c, composeProject)
s.composeProject.Start(c)
}
func (s *DockerComposeSuite) TearDownSuite(c *check.C) {
// shutdown and delete compose project
if s.composeProject != nil {
s.composeProject.Stop(c)
}
}
func (s *DockerComposeSuite) TestComposeScale(c *check.C) {
var serviceCount = 2
var composeService = "whoami1"
s.composeProject.Scale(c, composeService, serviceCount)
file := s.adaptFileForHost(c, "fixtures/docker/simple.toml")
defer os.Remove(file)
cmd, display := s.traefikCmd(withConfigFile(file))
defer display(c)
err := cmd.Start()
c.Assert(err, checker.IsNil)
defer cmd.Process.Kill()
req := testhelpers.MustNewRequest(http.MethodGet, "http://127.0.0.1:8000/whoami", nil)
req.Host = "my.super.host"
_, err = try.ResponseUntilStatusCode(req, 1500*time.Millisecond, http.StatusOK)
c.Assert(err, checker.IsNil)
resp, err := http.Get("http://127.0.0.1:8080/api/providers/docker")
c.Assert(err, checker.IsNil)
defer resp.Body.Close()
body, err := ioutil.ReadAll(resp.Body)
c.Assert(err, checker.IsNil)
var provider types.Configuration
c.Assert(json.Unmarshal(body, &provider), checker.IsNil)
// check that we have only one backend with n servers
c.Assert(provider.Backends, checker.HasLen, 1)
myBackend := provider.Backends["backend-"+composeService+"-integrationtest"+composeProject]
c.Assert(myBackend, checker.NotNil)
c.Assert(myBackend.Servers, checker.HasLen, serviceCount)
// check that we have only one frontend
c.Assert(provider.Frontends, checker.HasLen, 1)
}

View file

@ -99,7 +99,7 @@ func (s *DockerSuite) TestSimpleConfiguration(c *check.C) {
defer cmd.Process.Kill() defer cmd.Process.Kill()
// TODO validate : run on 80 // TODO validate : run on 80
// Expected a 404 as we did not comfigure anything // Expected a 404 as we did not configure anything
err = try.GetRequest("http://127.0.0.1:8000/", 500*time.Millisecond, try.StatusCodeIs(http.StatusNotFound)) err = try.GetRequest("http://127.0.0.1:8000/", 500*time.Millisecond, try.StatusCodeIs(http.StatusNotFound))
c.Assert(err, checker.IsNil) c.Assert(err, checker.IsNil)
} }

View file

@ -41,6 +41,7 @@ func init() {
check.Suite(&ConstraintSuite{}) check.Suite(&ConstraintSuite{})
check.Suite(&ConsulCatalogSuite{}) check.Suite(&ConsulCatalogSuite{})
check.Suite(&ConsulSuite{}) check.Suite(&ConsulSuite{})
check.Suite(&DockerComposeSuite{})
check.Suite(&DockerSuite{}) check.Suite(&DockerSuite{})
check.Suite(&DynamoDBSuite{}) check.Suite(&DynamoDBSuite{})
check.Suite(&EtcdSuite{}) check.Suite(&EtcdSuite{})

View file

@ -0,0 +1,4 @@
whoami1:
image: emilevauge/whoami
labels:
- traefik.frontend.rule=PathPrefix:/whoami

View file

@ -24,21 +24,21 @@ func TestInfluxDB(t *testing.T) {
} }
expectedBackend := []string{ expectedBackend := []string{
`(traefik\.backend\.requests\.total,code=200,method=GET,service=test count=1) [\d]{19}`, `(traefik\.backend\.requests\.total,backend=test,code=200,method=GET count=1) [\d]{19}`,
`(traefik\.backend\.requests\.total,code=404,method=GET,service=test count=1) [\d]{19}`, `(traefik\.backend\.requests\.total,backend=test,code=404,method=GET count=1) [\d]{19}`,
`(traefik\.backend\.request\.duration(?:,backend=test)?,code=200,method=GET,service=test(?:,url=http://127.0.0.1)? p50=10000,p90=10000,p95=10000,p99=10000) [\d]{19}`, `(traefik\.backend\.request\.duration,backend=test,code=200 p50=10000,p90=10000,p95=10000,p99=10000) [\d]{19}`,
`(traefik\.backend\.retries\.total(?:,code=[\d]{3},method=GET)?,service=test count=2) [\d]{19}`, `(traefik\.backend\.retries\.total(?:,code=[\d]{3},method=GET)?,backend=test count=2) [\d]{19}`,
`(traefik\.config\.reload\.total(?:[a-z=0-9A-Z,]+)? count=1) [\d]{19}`, `(traefik\.config\.reload\.total(?:[a-z=0-9A-Z,]+)? count=1) [\d]{19}`,
`(traefik\.config\.reload\.total\.failure(?:[a-z=0-9A-Z,]+)? count=1) [\d]{19}`, `(traefik\.config\.reload\.total\.failure(?:[a-z=0-9A-Z,]+)? count=1) [\d]{19}`,
`(traefik\.backend\.server\.up,backend=test(?:[a-z=0-9A-Z,]+)?,url=http://127.0.0.1 value=1) [\d]{19}`, `(traefik\.backend\.server\.up,backend=test(?:[a-z=0-9A-Z,]+)?,url=http://127.0.0.1 value=1) [\d]{19}`,
} }
msgBackend := udp.ReceiveString(t, func() { msgBackend := udp.ReceiveString(t, func() {
influxDBRegistry.BackendReqsCounter().With("service", "test", "code", strconv.Itoa(http.StatusOK), "method", http.MethodGet).Add(1) influxDBRegistry.BackendReqsCounter().With("backend", "test", "code", strconv.Itoa(http.StatusOK), "method", http.MethodGet).Add(1)
influxDBRegistry.BackendReqsCounter().With("service", "test", "code", strconv.Itoa(http.StatusNotFound), "method", http.MethodGet).Add(1) influxDBRegistry.BackendReqsCounter().With("backend", "test", "code", strconv.Itoa(http.StatusNotFound), "method", http.MethodGet).Add(1)
influxDBRegistry.BackendRetriesCounter().With("service", "test").Add(1) influxDBRegistry.BackendRetriesCounter().With("backend", "test").Add(1)
influxDBRegistry.BackendRetriesCounter().With("service", "test").Add(1) influxDBRegistry.BackendRetriesCounter().With("backend", "test").Add(1)
influxDBRegistry.BackendReqDurationHistogram().With("service", "test", "code", strconv.Itoa(http.StatusOK)).Observe(10000) influxDBRegistry.BackendReqDurationHistogram().With("backend", "test", "code", strconv.Itoa(http.StatusOK)).Observe(10000)
influxDBRegistry.ConfigReloadsCounter().Add(1) influxDBRegistry.ConfigReloadsCounter().Add(1)
influxDBRegistry.ConfigReloadsFailureCounter().Add(1) influxDBRegistry.ConfigReloadsFailureCounter().Add(1)
influxDBRegistry.BackendServerUpGauge().With("backend", "test", "url", "http://127.0.0.1").Set(1) influxDBRegistry.BackendServerUpGauge().With("backend", "test", "url", "http://127.0.0.1").Set(1)
@ -47,9 +47,9 @@ func TestInfluxDB(t *testing.T) {
assertMessage(t, msgBackend, expectedBackend) assertMessage(t, msgBackend, expectedBackend)
expectedEntrypoint := []string{ expectedEntrypoint := []string{
`(traefik\.entrypoint\.requests\.total(?:,backend=test,code=[\d]{3})?,entrypoint=test(?:[a-z=0-9A-Z,:/.]+)? count=1) [\d]{19}`, `(traefik\.entrypoint\.requests\.total,entrypoint=test(?:[a-z=0-9A-Z,:/.]+)? count=1) [\d]{19}`,
`(traefik\.entrypoint\.request\.duration(?:,backend=test,code=[\d]{3})?,entrypoint=test(?:[a-z=0-9A-Z,:/.]+)? p50=10000,p90=10000,p95=10000,p99=10000) [\d]{19}`, `(traefik\.entrypoint\.request\.duration(?:,code=[\d]{3})?,entrypoint=test(?:[a-z=0-9A-Z,:/.]+)? p50=10000,p90=10000,p95=10000,p99=10000) [\d]{19}`,
`(traefik\.entrypoint\.connections\.open(?:[a-z=0-9A-Z,]+)?,entrypoint=test,(?:[a-z=0-9A-Z,:.//]+)? value=1) [\d]{19}`, `(traefik\.entrypoint\.connections\.open,entrypoint=test value=1) [\d]{19}`,
} }
msgEntrypoint := udp.ReceiveString(t, func() { msgEntrypoint := udp.ReceiveString(t, func() {

View file

@ -335,6 +335,12 @@ func (g *gauge) With(labelValues ...string) metrics.Gauge {
} }
} }
func (g *gauge) Add(delta float64) {
collector := g.gv.With(g.labelNamesValues.ToLabels())
collector.Add(delta)
g.collectors <- newCollector(g.name, g.labelNamesValues, collector)
}
func (g *gauge) Set(value float64) { func (g *gauge) Set(value float64) {
collector := g.gv.With(g.labelNamesValues.ToLabels()) collector := g.gv.With(g.labelNamesValues.ToLabels())
collector.Set(value) collector.Set(value)

View file

@ -41,11 +41,13 @@ func NewBackendMetricsMiddleware(registry metrics.Registry, backendName string)
} }
type metricsMiddleware struct { type metricsMiddleware struct {
// Important: Since this int64 field is using sync/atomic, it has to be at the top of the struct due to a bug on 32-bit platform
// See: https://golang.org/pkg/sync/atomic/ for more information
openConns int64
reqsCounter gokitmetrics.Counter reqsCounter gokitmetrics.Counter
reqDurationHistogram gokitmetrics.Histogram reqDurationHistogram gokitmetrics.Histogram
openConnsGauge gokitmetrics.Gauge openConnsGauge gokitmetrics.Gauge
baseLabels []string baseLabels []string
openConns int64
} }
func (m *metricsMiddleware) ServeHTTP(rw http.ResponseWriter, r *http.Request, next http.HandlerFunc) { func (m *metricsMiddleware) ServeHTTP(rw http.ResponseWriter, r *http.Request, next http.HandlerFunc) {

View file

@ -48,7 +48,7 @@ func (c *Config) Setup(componentName string) (opentracing.Tracer, io.Closer, err
log.Warnf("Could not initialize jaeger tracer: %s", err.Error()) log.Warnf("Could not initialize jaeger tracer: %s", err.Error())
return nil, nil, err return nil, nil, err
} }
log.Debugf("jaeger tracer configured", err) log.Debug("Jaeger tracer configured")
return opentracing.GlobalTracer(), closer, nil return opentracing.GlobalTracer(), closer, nil
} }

View file

@ -3,6 +3,7 @@ package zipkin
import ( import (
"io" "io"
"github.com/containous/traefik/log"
opentracing "github.com/opentracing/opentracing-go" opentracing "github.com/opentracing/opentracing-go"
zipkin "github.com/openzipkin/zipkin-go-opentracing" zipkin "github.com/openzipkin/zipkin-go-opentracing"
) )
@ -39,5 +40,7 @@ func (c *Config) Setup(serviceName string) (opentracing.Tracer, io.Closer, error
// Without this, child spans are getting the NOOP tracer // Without this, child spans are getting the NOOP tracer
opentracing.SetGlobalTracer(tracer) opentracing.SetGlobalTracer(tracer)
log.Debug("Zipkin tracer configured")
return tracer, collector, nil return tracer, collector, nil
} }

View file

@ -52,6 +52,7 @@ func (s *LocalStore) get() (*StoredData, error) {
return nil, err return nil, err
} }
} }
// Check if ACME Account is in ACME V1 format // Check if ACME Account is in ACME V1 format
if s.storedData.Account != nil && s.storedData.Account.Registration != nil { if s.storedData.Account != nil && s.storedData.Account.Registration != nil {
isOldRegistration, err := regexp.MatchString(RegistrationURLPathV1Regexp, s.storedData.Account.Registration.URI) isOldRegistration, err := regexp.MatchString(RegistrationURLPathV1Regexp, s.storedData.Account.Registration.URI)
@ -63,6 +64,21 @@ func (s *LocalStore) get() (*StoredData, error) {
s.SaveDataChan <- s.storedData s.SaveDataChan <- s.storedData
} }
} }
// Delete all certificates with no value
var certificates []*Certificate
for _, certificate := range s.storedData.Certificates {
if len(certificate.Certificate) == 0 || len(certificate.Key) == 0 {
log.Debugf("Delete certificate %v for domains %v which have no value.", certificate, certificate.Domain.ToStrArray())
continue
}
certificates = append(certificates, certificate)
}
if len(certificates) < len(s.storedData.Certificates) {
s.storedData.Certificates = certificates
s.SaveDataChan <- s.storedData
}
} }
} }

View file

@ -41,7 +41,7 @@ type Configuration struct {
Storage string `description:"Storage to use."` Storage string `description:"Storage to use."`
EntryPoint string `description:"EntryPoint to use."` EntryPoint string `description:"EntryPoint to use."`
OnHostRule bool `description:"Enable certificate generation on frontends Host rules."` OnHostRule bool `description:"Enable certificate generation on frontends Host rules."`
OnDemand bool `description:"Enable on demand certificate generation. This will request a certificate from Let's Encrypt during the first TLS handshake for a hostname that does not yet have a certificate."` //deprecated OnDemand bool `description:"Enable on demand certificate generation. This will request a certificate from Let's Encrypt during the first TLS handshake for a hostname that does not yet have a certificate."` // Deprecated
DNSChallenge *DNSChallenge `description:"Activate DNS-01 Challenge"` DNSChallenge *DNSChallenge `description:"Activate DNS-01 Challenge"`
HTTPChallenge *HTTPChallenge `description:"Activate HTTP-01 Challenge"` HTTPChallenge *HTTPChallenge `description:"Activate HTTP-01 Challenge"`
Domains []types.Domain `description:"CN and SANs (alternative domains) to each main domain using format: --acme.domains='main.com,san1.com,san2.com' --acme.domains='*.main.net'. No SANs for wildcards domain. Wildcard domains only accepted with DNSChallenge"` Domains []types.Domain `description:"CN and SANs (alternative domains) to each main domain using format: --acme.domains='main.com,san1.com,san2.com' --acme.domains='*.main.net'. No SANs for wildcards domain. Wildcard domains only accepted with DNSChallenge"`
@ -225,11 +225,17 @@ func (p *Provider) resolveCertificate(domain types.Domain, domainFromConfigurati
} }
bundle := true bundle := true
certificate, failures := client.ObtainCertificate(uncheckedDomains, bundle, nil, OSCPMustStaple) certificate, failures := client.ObtainCertificate(uncheckedDomains, bundle, nil, OSCPMustStaple)
if len(failures) > 0 { if len(failures) > 0 {
return nil, fmt.Errorf("cannot obtain certificates %+v", failures) return nil, fmt.Errorf("cannot obtain certificates %+v", failures)
} }
log.Debugf("Certificates obtained for domain %+v", uncheckedDomains)
if len(certificate.Certificate) == 0 || len(certificate.PrivateKey) == 0 {
return nil, fmt.Errorf("domains %v generate certificate with no value: %v", uncheckedDomains, certificate)
}
log.Debugf("Certificates obtained for domains %+v", uncheckedDomains)
if len(uncheckedDomains) > 1 { if len(uncheckedDomains) > 1 {
domain = types.Domain{Main: uncheckedDomains[0], SANs: uncheckedDomains[1:]} domain = types.Domain{Main: uncheckedDomains[0], SANs: uncheckedDomains[1:]}
} else { } else {
@ -446,16 +452,25 @@ func (p *Provider) renewCertificates() {
log.Infof("Error renewing certificate from LE : %+v, %v", certificate.Domain, err) log.Infof("Error renewing certificate from LE : %+v, %v", certificate.Domain, err)
continue continue
} }
log.Infof("Renewing certificate from LE : %+v", certificate.Domain) log.Infof("Renewing certificate from LE : %+v", certificate.Domain)
renewedCert, err := client.RenewCertificate(acme.CertificateResource{ renewedCert, err := client.RenewCertificate(acme.CertificateResource{
Domain: certificate.Domain.Main, Domain: certificate.Domain.Main,
PrivateKey: certificate.Key, PrivateKey: certificate.Key,
Certificate: certificate.Certificate, Certificate: certificate.Certificate,
}, true, OSCPMustStaple) }, true, OSCPMustStaple)
if err != nil { if err != nil {
log.Errorf("Error renewing certificate from LE: %v, %v", certificate.Domain, err) log.Errorf("Error renewing certificate from LE: %v, %v", certificate.Domain, err)
continue continue
} }
if len(renewedCert.Certificate) == 0 || len(renewedCert.PrivateKey) == 0 {
log.Errorf("domains %v renew certificate with no value: %v", certificate.Domain.ToStrArray(), certificate)
continue
}
p.addCertificateForDomain(certificate.Domain, renewedCert.Certificate, renewedCert.PrivateKey) p.addCertificateForDomain(certificate.Domain, renewedCert.Certificate, renewedCert.PrivateKey)
} }
} }
@ -473,6 +488,7 @@ func (p *Provider) AddRoutes(router *mux.Router) {
log.Debugf("Unable to split host and port: %v. Fallback to request host.", err) log.Debugf("Unable to split host and port: %v. Fallback to request host.", err)
domain = req.Host domain = req.Host
} }
tokenValue := getTokenValue(token, domain, p.Store) tokenValue := getTokenValue(token, domain, p.Store)
if len(tokenValue) > 0 { if len(tokenValue) > 0 {
rw.WriteHeader(http.StatusOK) rw.WriteHeader(http.StatusOK)

View file

@ -67,12 +67,13 @@ func (p *Provider) buildConfigurationV2(containersInspected []dockerData) *types
container.SegmentLabels = labels container.SegmentLabels = labels
container.SegmentName = segmentName container.SegmentName = segmentName
// Frontends serviceNamesKey := getServiceNameKey(container, p.SwarmMode, segmentName)
if _, exists := serviceNames[container.ServiceName+segmentName]; !exists {
if _, exists := serviceNames[serviceNamesKey]; !exists {
frontendName := p.getFrontendName(container, idx) frontendName := p.getFrontendName(container, idx)
frontends[frontendName] = append(frontends[frontendName], container) frontends[frontendName] = append(frontends[frontendName], container)
if len(container.ServiceName+segmentName) > 0 { if len(serviceNamesKey) > 0 {
serviceNames[container.ServiceName+segmentName] = struct{}{} serviceNames[serviceNamesKey] = struct{}{}
} }
} }
@ -104,6 +105,16 @@ func (p *Provider) buildConfigurationV2(containersInspected []dockerData) *types
return configuration return configuration
} }
func getServiceNameKey(container dockerData, swarmMode bool, segmentName string) string {
serviceNameKey := container.ServiceName
if values, err := label.GetStringMultipleStrict(container.Labels, labelDockerComposeProject, labelDockerComposeService); !swarmMode && err == nil {
serviceNameKey = values[labelDockerComposeService] + values[labelDockerComposeProject]
}
return serviceNameKey + segmentName
}
func (p *Provider) containerFilter(container dockerData) bool { func (p *Provider) containerFilter(container dockerData) bool {
if !label.IsEnabled(container.Labels, p.ExposedByDefault) { if !label.IsEnabled(container.Labels, p.ExposedByDefault) {
log.Debugf("Filtering disabled container %s", container.Name) log.Debugf("Filtering disabled container %s", container.Name)

View file

@ -311,6 +311,95 @@ func TestDockerBuildConfiguration(t *testing.T) {
}, },
}, },
}, },
{
desc: "when docker compose scale with different compose service names",
containers: []docker.ContainerJSON{
containerJSON(
name("test_0"),
labels(map[string]string{
labelDockerComposeProject: "myProject",
labelDockerComposeService: "myService",
}),
ports(nat.PortMap{
"80/tcp": {},
}),
withNetwork("bridge", ipv4("127.0.0.1")),
),
containerJSON(
name("test_1"),
labels(map[string]string{
labelDockerComposeProject: "myProject",
labelDockerComposeService: "myService",
}),
ports(nat.PortMap{
"80/tcp": {},
}),
withNetwork("bridge", ipv4("127.0.0.2")),
),
containerJSON(
name("test_2"),
labels(map[string]string{
labelDockerComposeProject: "myProject",
labelDockerComposeService: "myService2",
}),
ports(nat.PortMap{
"80/tcp": {},
}),
withNetwork("bridge", ipv4("127.0.0.3")),
),
},
expectedFrontends: map[string]*types.Frontend{
"frontend-Host-myService-myProject-docker-localhost-0": {
Backend: "backend-myService-myProject",
PassHostHeader: true,
EntryPoints: []string{},
BasicAuth: []string{},
Routes: map[string]types.Route{
"route-frontend-Host-myService-myProject-docker-localhost-0": {
Rule: "Host:myService.myProject.docker.localhost",
},
},
},
"frontend-Host-myService2-myProject-docker-localhost-2": {
Backend: "backend-myService2-myProject",
PassHostHeader: true,
EntryPoints: []string{},
BasicAuth: []string{},
Routes: map[string]types.Route{
"route-frontend-Host-myService2-myProject-docker-localhost-2": {
Rule: "Host:myService2.myProject.docker.localhost",
},
},
},
},
expectedBackends: map[string]*types.Backend{
"backend-myService-myProject": {
Servers: map[string]types.Server{
"server-test-0": {
URL: "http://127.0.0.1:80",
Weight: label.DefaultWeight,
}, "server-test-1": {
URL: "http://127.0.0.2:80",
Weight: label.DefaultWeight,
},
},
CircuitBreaker: nil,
},
"backend-myService2-myProject": {
Servers: map[string]types.Server{
"server-test-2": {
URL: "http://127.0.0.3:80",
Weight: label.DefaultWeight,
},
},
CircuitBreaker: nil,
},
},
},
} }
for _, test := range testCases { for _, test := range testCases {

View file

@ -125,11 +125,14 @@ func (p *Provider) buildConfigurationV1(containersInspected []dockerData) *types
servers := map[string][]dockerData{} servers := map[string][]dockerData{}
serviceNames := make(map[string]struct{}) serviceNames := make(map[string]struct{})
for idx, container := range filteredContainers { for idx, container := range filteredContainers {
if _, exists := serviceNames[container.ServiceName]; !exists {
serviceNamesKey := getServiceNameKey(container, p.SwarmMode, "")
if _, exists := serviceNames[serviceNamesKey]; !exists {
frontendName := p.getFrontendNameV1(container, idx) frontendName := p.getFrontendNameV1(container, idx)
frontends[frontendName] = append(frontends[frontendName], container) frontends[frontendName] = append(frontends[frontendName], container)
if len(container.ServiceName) > 0 { if len(serviceNamesKey) > 0 {
serviceNames[container.ServiceName] = struct{}{} serviceNames[serviceNamesKey] = struct{}{}
} }
} }
backendName := getBackendNameV1(container) backendName := getBackendNameV1(container)

View file

@ -1096,7 +1096,7 @@ func (s *Server) loadConfig(configurations types.Configurations, globalConfigura
errorPageName, frontendName, errorPage.Backend) errorPageName, frontendName, errorPage.Backend)
} else if config.Backends[errorPage.Backend] == nil { } else if config.Backends[errorPage.Backend] == nil {
log.Errorf("Error when creating error page %q for frontend %q: the backend %q doesn't exist.", log.Errorf("Error when creating error page %q for frontend %q: the backend %q doesn't exist.",
errorPageName, errorPage.Backend) errorPageName, frontendName, errorPage.Backend)
} else { } else {
errorPagesHandler, err := errorpages.NewHandler(errorPage, entryPointName+providerName+errorPage.Backend) errorPagesHandler, err := errorpages.NewHandler(errorPage, entryPointName+providerName+errorPage.Backend)
if err != nil { if err != nil {
@ -1174,6 +1174,16 @@ func (s *Server) loadConfig(configurations types.Configurations, globalConfigura
} }
} }
if headerMiddleware != nil {
log.Debugf("Adding header middleware for frontend %s", frontendName)
n.Use(s.tracingMiddleware.NewNegroniHandlerWrapper("Header", headerMiddleware, false))
}
if secureMiddleware != nil {
log.Debugf("Adding secure middleware for frontend %s", frontendName)
n.UseFunc(secureMiddleware.HandlerFuncWithNextForRequestOnly)
}
if len(frontend.BasicAuth) > 0 { if len(frontend.BasicAuth) > 0 {
users := types.Users{} users := types.Users{}
for _, user := range frontend.BasicAuth { for _, user := range frontend.BasicAuth {
@ -1192,16 +1202,6 @@ func (s *Server) loadConfig(configurations types.Configurations, globalConfigura
} }
} }
if headerMiddleware != nil {
log.Debugf("Adding header middleware for frontend %s", frontendName)
n.Use(s.tracingMiddleware.NewNegroniHandlerWrapper("Header", headerMiddleware, false))
}
if secureMiddleware != nil {
log.Debugf("Adding secure middleware for frontend %s", frontendName)
n.UseFunc(secureMiddleware.HandlerFuncWithNextForRequestOnly)
}
if config.Backends[frontend.Backend].Buffering != nil { if config.Backends[frontend.Backend].Buffering != nil {
bufferedLb, err := s.buildBufferingMiddleware(lb, config.Backends[frontend.Backend].Buffering) bufferedLb, err := s.buildBufferingMiddleware(lb, config.Backends[frontend.Backend].Buffering)

View file

@ -32,7 +32,12 @@ func (g *CollectingGauge) With(labelValues ...string) metrics.Gauge {
} }
// Set is there to satisfy the metrics.Gauge interface. // Set is there to satisfy the metrics.Gauge interface.
func (g *CollectingGauge) Set(delta float64) { func (g *CollectingGauge) Set(value float64) {
g.GaugeValue = value
}
// Add is there to satisfy the metrics.Gauge interface.
func (g *CollectingGauge) Add(delta float64) {
g.GaugeValue = delta g.GaugeValue = delta
} }

View file

@ -1,7 +1,10 @@
package servicefabric package servicefabric
import ( import (
"encoding/json"
"errors"
"net/http" "net/http"
"strings"
"time" "time"
"github.com/cenk/backoff" "github.com/cenk/backoff"
@ -18,6 +21,11 @@ var _ provider.Provider = (*Provider)(nil)
const traefikServiceFabricExtensionKey = "Traefik" const traefikServiceFabricExtensionKey = "Traefik"
const (
kindStateful = "Stateful"
kindStateless = "Stateless"
)
// Provider holds for configuration for the provider // Provider holds for configuration for the provider
type Provider struct { type Provider struct {
provider.BaseProvider `mapstructure:",squash"` provider.BaseProvider `mapstructure:",squash"`
@ -66,7 +74,7 @@ func (p *Provider) updateConfig(configurationChan chan<- types.ConfigMessage, po
log.Info("Checking service fabric config") log.Info("Checking service fabric config")
} }
configuration, err := p.buildConfiguration(sfClient) configuration, err := p.getConfiguration(sfClient)
if err != nil { if err != nil {
return err return err
} }
@ -90,6 +98,15 @@ func (p *Provider) updateConfig(configurationChan chan<- types.ConfigMessage, po
return nil return nil
} }
func (p *Provider) getConfiguration(sfClient sfClient) (*types.Configuration, error) {
services, err := getClusterServices(sfClient)
if err != nil {
return nil, err
}
return p.buildConfiguration(services)
}
func getClusterServices(sfClient sfClient) ([]ServiceItemExtended, error) { func getClusterServices(sfClient sfClient) ([]ServiceItemExtended, error) {
apps, err := sfClient.GetApplications() apps, err := sfClient.GetApplications()
if err != nil { if err != nil {
@ -171,11 +188,6 @@ func getValidInstances(sfClient sfClient, app sf.ApplicationItem, service sf.Ser
return validInstances return validInstances
} }
func isPrimary(instance replicaInstance) bool {
_, data := instance.GetReplicaData()
return data.ReplicaRole == "Primary"
}
func isHealthy(instanceData *sf.ReplicaItemBase) bool { func isHealthy(instanceData *sf.ReplicaItemBase) bool {
return instanceData != nil && (instanceData.ReplicaStatus == "Ready" && instanceData.HealthState != "Error") return instanceData != nil && (instanceData.ReplicaStatus == "Ready" && instanceData.HealthState != "Error")
} }
@ -185,6 +197,54 @@ func hasHTTPEndpoint(instanceData *sf.ReplicaItemBase) bool {
return err == nil return err == nil
} }
func getReplicaDefaultEndpoint(replicaData *sf.ReplicaItemBase) (string, error) {
endpoints, err := decodeEndpointData(replicaData.Address)
if err != nil {
return "", err
}
var defaultHTTPEndpoint string
for _, v := range endpoints {
if strings.Contains(v, "http") {
defaultHTTPEndpoint = v
break
}
}
if len(defaultHTTPEndpoint) == 0 {
return "", errors.New("no default endpoint found")
}
return defaultHTTPEndpoint, nil
}
func decodeEndpointData(endpointData string) (map[string]string, error) {
var endpointsMap map[string]map[string]string
if endpointData == "" {
return nil, errors.New("endpoint data is empty")
}
err := json.Unmarshal([]byte(endpointData), &endpointsMap)
if err != nil {
return nil, err
}
endpoints, endpointsExist := endpointsMap["Endpoints"]
if !endpointsExist {
return nil, errors.New("endpoint doesn't exist in endpoint data")
}
return endpoints, nil
}
func isStateful(service ServiceItemExtended) bool {
return service.ServiceKind == kindStateful
}
func isStateless(service ServiceItemExtended) bool {
return service.ServiceKind == kindStateless
}
// Return a set of labels from the Extension and Property manager // Return a set of labels from the Extension and Property manager
// Allow Extension labels to disable importing labels from the property manager. // Allow Extension labels to disable importing labels from the property manager.
func getLabels(sfClient sfClient, service *sf.ServiceItem, app *sf.ApplicationItem) (map[string]string, error) { func getLabels(sfClient sfClient, service *sf.ServiceItem, app *sf.ApplicationItem) (map[string]string, error) {

View file

@ -1,9 +1,7 @@
package servicefabric package servicefabric
import ( import (
"encoding/json"
"errors" "errors"
"math"
"strings" "strings"
"text/template" "text/template"
@ -14,12 +12,7 @@ import (
sf "github.com/jjcollinge/servicefabric" sf "github.com/jjcollinge/servicefabric"
) )
func (p *Provider) buildConfiguration(sfClient sfClient) (*types.Configuration, error) { func (p *Provider) buildConfiguration(services []ServiceItemExtended) (*types.Configuration, error) {
services, err := getClusterServices(sfClient)
if err != nil {
return nil, err
}
var sfFuncMap = template.FuncMap{ var sfFuncMap = template.FuncMap{
// Services // Services
"getServices": getServices, "getServices": getServices,
@ -38,7 +31,7 @@ func (p *Provider) buildConfiguration(sfClient sfClient) (*types.Configuration,
"filterServicesByLabelValue": filterServicesByLabelValue, // FIXME unused "filterServicesByLabelValue": filterServicesByLabelValue, // FIXME unused
// Backend functions // Backend functions
"getWeight": getFuncServiceIntLabel(label.TraefikWeight, label.DefaultWeightInt), "getWeight": getFuncServiceIntLabel(label.TraefikWeight, label.DefaultWeight),
"getProtocol": getFuncServiceStringLabel(label.TraefikProtocol, label.DefaultProtocol), "getProtocol": getFuncServiceStringLabel(label.TraefikProtocol, label.DefaultProtocol),
"getMaxConn": getMaxConn, "getMaxConn": getMaxConn,
"getHealthCheck": getHealthCheck, "getHealthCheck": getHealthCheck,
@ -46,9 +39,9 @@ func (p *Provider) buildConfiguration(sfClient sfClient) (*types.Configuration,
"getLoadBalancer": getLoadBalancer, "getLoadBalancer": getLoadBalancer,
// Frontend Functions // Frontend Functions
"getPriority": getFuncServiceIntLabel(label.TraefikFrontendPriority, label.DefaultFrontendPriorityInt), "getPriority": getFuncServiceIntLabel(label.TraefikFrontendPriority, label.DefaultFrontendPriority),
"getPassHostHeader": getFuncServiceBoolLabel(label.TraefikFrontendPassHostHeader, label.DefaultPassHostHeaderBool), "getPassHostHeader": getFuncServiceBoolLabel(label.TraefikFrontendPassHostHeader, label.DefaultPassHostHeader),
"getPassTLSCert": getFuncBoolLabel(label.TraefikFrontendPassTLSCert, false), "getPassTLSCert": getFuncBoolLabel(label.TraefikFrontendPassTLSCert, label.DefaultPassTLSCert),
"getEntryPoints": getFuncServiceSliceStringLabel(label.TraefikFrontendEntryPoints), "getEntryPoints": getFuncServiceSliceStringLabel(label.TraefikFrontendEntryPoints),
"getBasicAuth": getFuncServiceSliceStringLabel(label.TraefikFrontendAuthBasic), "getBasicAuth": getFuncServiceSliceStringLabel(label.TraefikFrontendAuthBasic),
"getFrontendRules": getFuncServiceLabelWithPrefix(label.TraefikFrontendRule), "getFrontendRules": getFuncServiceLabelWithPrefix(label.TraefikFrontendRule),
@ -70,12 +63,9 @@ func (p *Provider) buildConfiguration(sfClient sfClient) (*types.Configuration,
return p.GetConfiguration(tmpl, sfFuncMap, templateObjects) return p.GetConfiguration(tmpl, sfFuncMap, templateObjects)
} }
func isStateful(service ServiceItemExtended) bool { func isPrimary(instance replicaInstance) bool {
return service.ServiceKind == "Stateful" _, data := instance.GetReplicaData()
} return data.ReplicaRole == "Primary"
func isStateless(service ServiceItemExtended) bool {
return service.ServiceKind == "Stateless"
} }
func getBackendName(service ServiceItemExtended, partition PartitionItemExtended) string { func getBackendName(service ServiceItemExtended, partition PartitionItemExtended) string {
@ -92,26 +82,6 @@ func getDefaultEndpoint(instance replicaInstance) string {
return endpoint return endpoint
} }
func getReplicaDefaultEndpoint(replicaData *sf.ReplicaItemBase) (string, error) {
endpoints, err := decodeEndpointData(replicaData.Address)
if err != nil {
return "", err
}
var defaultHTTPEndpoint string
for _, v := range endpoints {
if strings.Contains(v, "http") {
defaultHTTPEndpoint = v
break
}
}
if len(defaultHTTPEndpoint) == 0 {
return "", errors.New("no default endpoint found")
}
return defaultHTTPEndpoint, nil
}
func getNamedEndpoint(instance replicaInstance, endpointName string) string { func getNamedEndpoint(instance replicaInstance, endpointName string) string {
id, data := instance.GetReplicaData() id, data := instance.GetReplicaData()
endpoint, err := getReplicaNamedEndpoint(data, endpointName) endpoint, err := getReplicaNamedEndpoint(data, endpointName)
@ -175,170 +145,30 @@ func filterServicesByLabelValue(services []ServiceItemExtended, key, expectedVal
return srvWithLabel return srvWithLabel
} }
func decodeEndpointData(endpointData string) (map[string]string, error) {
var endpointsMap map[string]map[string]string
if endpointData == "" {
return nil, errors.New("endpoint data is empty")
}
err := json.Unmarshal([]byte(endpointData), &endpointsMap)
if err != nil {
return nil, err
}
endpoints, endpointsExist := endpointsMap["Endpoints"]
if !endpointsExist {
return nil, errors.New("endpoint doesn't exist in endpoint data")
}
return endpoints, nil
}
func getHeaders(service ServiceItemExtended) *types.Headers { func getHeaders(service ServiceItemExtended) *types.Headers {
headers := &types.Headers{ return label.GetHeaders(service.Labels)
CustomRequestHeaders: label.GetMapValue(service.Labels, label.TraefikFrontendRequestHeaders),
CustomResponseHeaders: label.GetMapValue(service.Labels, label.TraefikFrontendResponseHeaders),
SSLProxyHeaders: label.GetMapValue(service.Labels, label.TraefikFrontendSSLProxyHeaders),
AllowedHosts: label.GetSliceStringValue(service.Labels, label.TraefikFrontendAllowedHosts),
HostsProxyHeaders: label.GetSliceStringValue(service.Labels, label.TraefikFrontendHostsProxyHeaders),
STSSeconds: label.GetInt64Value(service.Labels, label.TraefikFrontendSTSSeconds, 0),
SSLRedirect: label.GetBoolValue(service.Labels, label.TraefikFrontendSSLRedirect, false),
SSLTemporaryRedirect: label.GetBoolValue(service.Labels, label.TraefikFrontendSSLTemporaryRedirect, false),
STSIncludeSubdomains: label.GetBoolValue(service.Labels, label.TraefikFrontendSTSIncludeSubdomains, false),
STSPreload: label.GetBoolValue(service.Labels, label.TraefikFrontendSTSPreload, false),
ForceSTSHeader: label.GetBoolValue(service.Labels, label.TraefikFrontendForceSTSHeader, false),
FrameDeny: label.GetBoolValue(service.Labels, label.TraefikFrontendFrameDeny, false),
ContentTypeNosniff: label.GetBoolValue(service.Labels, label.TraefikFrontendContentTypeNosniff, false),
BrowserXSSFilter: label.GetBoolValue(service.Labels, label.TraefikFrontendBrowserXSSFilter, false),
IsDevelopment: label.GetBoolValue(service.Labels, label.TraefikFrontendIsDevelopment, false),
SSLHost: label.GetStringValue(service.Labels, label.TraefikFrontendSSLHost, ""),
CustomFrameOptionsValue: label.GetStringValue(service.Labels, label.TraefikFrontendCustomFrameOptionsValue, ""),
ContentSecurityPolicy: label.GetStringValue(service.Labels, label.TraefikFrontendContentSecurityPolicy, ""),
PublicKey: label.GetStringValue(service.Labels, label.TraefikFrontendPublicKey, ""),
ReferrerPolicy: label.GetStringValue(service.Labels, label.TraefikFrontendReferrerPolicy, ""),
CustomBrowserXSSValue: label.GetStringValue(service.Labels, label.TraefikFrontendCustomBrowserXSSValue, ""),
}
if !headers.HasSecureHeadersDefined() && !headers.HasCustomHeadersDefined() {
return nil
}
return headers
} }
func getWhiteList(service ServiceItemExtended) *types.WhiteList { func getWhiteList(service ServiceItemExtended) *types.WhiteList {
if label.Has(service.Labels, label.TraefikFrontendWhitelistSourceRange) { return label.GetWhiteList(service.Labels)
log.Warnf("Deprecated configuration found: %s. Please use %s.", label.TraefikFrontendWhitelistSourceRange, label.TraefikFrontendWhiteListSourceRange)
}
ranges := label.GetSliceStringValue(service.Labels, label.TraefikFrontendWhiteListSourceRange)
if len(ranges) > 0 {
return &types.WhiteList{
SourceRange: ranges,
UseXForwardedFor: label.GetBoolValue(service.Labels, label.TraefikFrontendWhiteListUseXForwardedFor, false),
}
}
// TODO: Deprecated
values := label.GetSliceStringValue(service.Labels, label.TraefikFrontendWhitelistSourceRange)
if len(values) > 0 {
return &types.WhiteList{
SourceRange: values,
UseXForwardedFor: false,
}
}
return nil
} }
func getRedirect(service ServiceItemExtended) *types.Redirect { func getRedirect(service ServiceItemExtended) *types.Redirect {
permanent := label.GetBoolValue(service.Labels, label.TraefikFrontendRedirectPermanent, false) return label.GetRedirect(service.Labels)
if label.Has(service.Labels, label.TraefikFrontendRedirectEntryPoint) {
return &types.Redirect{
EntryPoint: label.GetStringValue(service.Labels, label.TraefikFrontendRedirectEntryPoint, ""),
Permanent: permanent,
}
}
if label.Has(service.Labels, label.TraefikFrontendRedirectRegex) &&
label.Has(service.Labels, label.TraefikFrontendRedirectReplacement) {
return &types.Redirect{
Regex: label.GetStringValue(service.Labels, label.TraefikFrontendRedirectRegex, ""),
Replacement: label.GetStringValue(service.Labels, label.TraefikFrontendRedirectReplacement, ""),
Permanent: permanent,
}
}
return nil
} }
func getMaxConn(service ServiceItemExtended) *types.MaxConn { func getMaxConn(service ServiceItemExtended) *types.MaxConn {
amount := label.GetInt64Value(service.Labels, label.TraefikBackendMaxConnAmount, math.MinInt64) return label.GetMaxConn(service.Labels)
extractorFunc := label.GetStringValue(service.Labels, label.TraefikBackendMaxConnExtractorFunc, label.DefaultBackendMaxconnExtractorFunc)
if amount == math.MinInt64 || len(extractorFunc) == 0 {
return nil
}
return &types.MaxConn{
Amount: amount,
ExtractorFunc: extractorFunc,
}
} }
func getHealthCheck(service ServiceItemExtended) *types.HealthCheck { func getHealthCheck(service ServiceItemExtended) *types.HealthCheck {
path := label.GetStringValue(service.Labels, label.TraefikBackendHealthCheckPath, "") return label.GetHealthCheck(service.Labels)
if len(path) == 0 {
return nil
}
port := label.GetIntValue(service.Labels, label.TraefikBackendHealthCheckPort, label.DefaultBackendHealthCheckPort)
interval := label.GetStringValue(service.Labels, label.TraefikBackendHealthCheckInterval, "")
return &types.HealthCheck{
Path: path,
Port: port,
Interval: interval,
}
} }
func getCircuitBreaker(service ServiceItemExtended) *types.CircuitBreaker { func getCircuitBreaker(service ServiceItemExtended) *types.CircuitBreaker {
circuitBreaker := label.GetStringValue(service.Labels, label.TraefikBackendCircuitBreakerExpression, "") return label.GetCircuitBreaker(service.Labels)
if len(circuitBreaker) == 0 {
return nil
}
return &types.CircuitBreaker{Expression: circuitBreaker}
} }
func getLoadBalancer(service ServiceItemExtended) *types.LoadBalancer { func getLoadBalancer(service ServiceItemExtended) *types.LoadBalancer {
if !label.HasPrefix(service.Labels, label.TraefikBackendLoadBalancer) { return label.GetLoadBalancer(service.Labels)
return nil
}
method := label.GetStringValue(service.Labels, label.TraefikBackendLoadBalancerMethod, label.DefaultBackendLoadBalancerMethod)
lb := &types.LoadBalancer{
Method: method,
Sticky: getSticky(service),
}
if label.GetBoolValue(service.Labels, label.TraefikBackendLoadBalancerStickiness, false) {
cookieName := label.GetStringValue(service.Labels, label.TraefikBackendLoadBalancerStickinessCookieName, label.DefaultBackendLoadbalancerStickinessCookieName)
lb.Stickiness = &types.Stickiness{CookieName: cookieName}
}
return lb
}
// TODO: Deprecated
// replaced by Stickiness
// Deprecated
func getSticky(service ServiceItemExtended) bool {
if label.Has(service.Labels, label.TraefikBackendLoadBalancerSticky) {
log.Warnf("Deprecated configuration found: %s. Please use %s.", label.TraefikBackendLoadBalancerSticky, label.TraefikBackendLoadBalancerStickiness)
}
return label.GetBoolValue(service.Labels, label.TraefikBackendLoadBalancerSticky, false)
} }

View file

@ -59,7 +59,7 @@ const tmpl = `
{{range $instance := $partition.Instances}} {{range $instance := $partition.Instances}}
[backends."{{ $service.Name }}".servers."{{ $instance.ID }}"] [backends."{{ $service.Name }}".servers."{{ $instance.ID }}"]
url = "{{ getDefaultEndpoint $instance }}" url = "{{ getDefaultEndpoint $instance }}"
weight = {{ getLabelValue $service "backend.weight" "1" }} weight = {{ getWeight $service }}
{{end}} {{end}}
{{else if isStateful $service}} {{else if isStateful $service}}
@ -199,17 +199,18 @@ const tmpl = `
rule = "{{ $value }}" rule = "{{ $value }}"
{{end}} {{end}}
{{else if isStateful $service}} {{else if isStateful $service }}
{{range $partition := $service.Partitions }} {{range $partition := $service.Partitions }}
{{ $partitionId := $partition.PartitionInformation.ID }} {{ $partitionId := $partition.PartitionInformation.ID }}
{{if hasLabel $service "frontend.rule" }} {{ $rule := getLabelValue $service (print "traefik.frontend.rule.partition." $partitionId) "" }}
[frontends."{{ $service.Name }}/{{ $partitionId }}"] {{if $rule }}
backend = "{{ getBackendName $service.Name $partition }}" [frontends."{{ $service.Name }}/{{ $partitionId }}"]
backend = "{{ getBackendName $service $partition }}"
[frontends."{{ $service.Name }}/{{ $partitionId }}".routes.default] [frontends."{{ $service.Name }}/{{ $partitionId }}".routes.default]
rule = {{ getLabelValue $service "frontend.rule.partition.$partitionId" "" }} rule = "{{ $rule }}"
{{end}} {{end}}
{{end}} {{end}}

View file

@ -35,14 +35,15 @@
// idea to log simple values without formatting them. This practice allows // idea to log simple values without formatting them. This practice allows
// the chosen logger to encode values in the most appropriate way. // the chosen logger to encode values in the most appropriate way.
// //
// Log Context // Contextual Loggers
// //
// A log context stores keyvals that it includes in all log events. Building // A contextual logger stores keyvals that it includes in all log events.
// appropriate log contexts reduces repetition and aids consistency in the // Building appropriate contextual loggers reduces repetition and aids
// resulting log output. We can use a context to improve the RunTask example. // consistency in the resulting log output. With and WithPrefix add context to
// a logger. We can use With to improve the RunTask example.
// //
// func RunTask(task Task, logger log.Logger) string { // func RunTask(task Task, logger log.Logger) string {
// logger = log.NewContext(logger).With("taskID", task.ID) // logger = log.With(logger, "taskID", task.ID)
// logger.Log("event", "starting task") // logger.Log("event", "starting task")
// ... // ...
// taskHelper(task.Cmd, logger) // taskHelper(task.Cmd, logger)
@ -51,19 +52,18 @@
// } // }
// //
// The improved version emits the same log events as the original for the // The improved version emits the same log events as the original for the
// first and last calls to Log. The call to taskHelper highlights that a // first and last calls to Log. Passing the contextual logger to taskHelper
// context may be passed as a logger to other functions. Each log event // enables each log event created by taskHelper to include the task.ID even
// created by the called function will include the task.ID even though the // though taskHelper does not have access to that value. Using contextual
// function does not have access to that value. Using log contexts this way // loggers this way simplifies producing log output that enables tracing the
// simplifies producing log output that enables tracing the life cycle of // life cycle of individual tasks. (See the Contextual example for the full
// individual tasks. (See the Context example for the full code of the // code of the above snippet.)
// above snippet.)
// //
// Dynamic Context Values // Dynamic Contextual Values
// //
// A Valuer function stored in a log context generates a new value each time // A Valuer function stored in a contextual logger generates a new value each
// the context logs an event. The Valuer example demonstrates how this // time an event is logged. The Valuer example demonstrates how this feature
// feature works. // works.
// //
// Valuers provide the basis for consistently logging timestamps and source // Valuers provide the basis for consistently logging timestamps and source
// code location. The log package defines several valuers for that purpose. // code location. The log package defines several valuers for that purpose.
@ -72,7 +72,7 @@
// entries contain a timestamp and source location looks like this: // entries contain a timestamp and source location looks like this:
// //
// logger := log.NewLogfmtLogger(log.NewSyncWriter(os.Stdout)) // logger := log.NewLogfmtLogger(log.NewSyncWriter(os.Stdout))
// logger = log.NewContext(logger).With("ts", log.DefaultTimestampUTC, "caller", log.DefaultCaller) // logger = log.With(logger, "ts", log.DefaultTimestampUTC, "caller", log.DefaultCaller)
// //
// Concurrent Safety // Concurrent Safety
// //
@ -90,4 +90,27 @@
// handled atomically within the wrapped logger, but it typically serializes // handled atomically within the wrapped logger, but it typically serializes
// both the formatting and output logic. Use a SyncLogger if the formatting // both the formatting and output logic. Use a SyncLogger if the formatting
// logger may perform multiple writes per log event. // logger may perform multiple writes per log event.
//
// Error Handling
//
// This package relies on the practice of wrapping or decorating loggers with
// other loggers to provide composable pieces of functionality. It also means
// that Logger.Log must return an error because some
// implementations—especially those that output log data to an io.Writer—may
// encounter errors that cannot be handled locally. This in turn means that
// Loggers that wrap other loggers should return errors from the wrapped
// logger up the stack.
//
// Fortunately, the decorator pattern also provides a way to avoid the
// necessity to check for errors every time an application calls Logger.Log.
// An application required to panic whenever its Logger encounters
// an error could initialize its logger as follows.
//
// fmtlogger := log.NewLogfmtLogger(log.NewSyncWriter(os.Stdout))
// logger := log.LoggerFunc(func(keyvals ...interface{}) error {
// if err := fmtlogger.Log(keyvals...); err != nil {
// panic(err)
// }
// return nil
// })
package log package log

View file

@ -44,9 +44,6 @@ func merge(dst map[string]interface{}, k, v interface{}) {
default: default:
key = fmt.Sprint(x) key = fmt.Sprint(x)
} }
if x, ok := v.(error); ok {
v = safeError(x)
}
// We want json.Marshaler and encoding.TextMarshaller to take priority over // We want json.Marshaler and encoding.TextMarshaller to take priority over
// err.Error() and v.String(). But json.Marshall (called later) does that by // err.Error() and v.String(). But json.Marshall (called later) does that by

View file

@ -6,7 +6,7 @@ import "errors"
// log event from keyvals, a variadic sequence of alternating keys and values. // log event from keyvals, a variadic sequence of alternating keys and values.
// Implementations must be safe for concurrent use by multiple goroutines. In // Implementations must be safe for concurrent use by multiple goroutines. In
// particular, any implementation of Logger that appends to keyvals or // particular, any implementation of Logger that appends to keyvals or
// modifies any of its elements must make a copy first. // modifies or retains any of its elements must make a copy first.
type Logger interface { type Logger interface {
Log(keyvals ...interface{}) error Log(keyvals ...interface{}) error
} }
@ -15,62 +15,100 @@ type Logger interface {
// the missing value. // the missing value.
var ErrMissingValue = errors.New("(MISSING)") var ErrMissingValue = errors.New("(MISSING)")
// NewContext returns a new Context that logs to logger. // With returns a new contextual logger with keyvals prepended to those passed
func NewContext(logger Logger) *Context { // to calls to Log. If logger is also a contextual logger created by With or
if c, ok := logger.(*Context); ok { // WithPrefix, keyvals is appended to the existing context.
return c //
// The returned Logger replaces all value elements (odd indexes) containing a
// Valuer with their generated value for each call to its Log method.
func With(logger Logger, keyvals ...interface{}) Logger {
if len(keyvals) == 0 {
return logger
}
l := newContext(logger)
kvs := append(l.keyvals, keyvals...)
if len(kvs)%2 != 0 {
kvs = append(kvs, ErrMissingValue)
}
return &context{
logger: l.logger,
// Limiting the capacity of the stored keyvals ensures that a new
// backing array is created if the slice must grow in Log or With.
// Using the extra capacity without copying risks a data race that
// would violate the Logger interface contract.
keyvals: kvs[:len(kvs):len(kvs)],
hasValuer: l.hasValuer || containsValuer(keyvals),
} }
return &Context{logger: logger}
} }
// Context must always have the same number of stack frames between calls to // WithPrefix returns a new contextual logger with keyvals prepended to those
// passed to calls to Log. If logger is also a contextual logger created by
// With or WithPrefix, keyvals is prepended to the existing context.
//
// The returned Logger replaces all value elements (odd indexes) containing a
// Valuer with their generated value for each call to its Log method.
func WithPrefix(logger Logger, keyvals ...interface{}) Logger {
if len(keyvals) == 0 {
return logger
}
l := newContext(logger)
// Limiting the capacity of the stored keyvals ensures that a new
// backing array is created if the slice must grow in Log or With.
// Using the extra capacity without copying risks a data race that
// would violate the Logger interface contract.
n := len(l.keyvals) + len(keyvals)
if len(keyvals)%2 != 0 {
n++
}
kvs := make([]interface{}, 0, n)
kvs = append(kvs, keyvals...)
if len(kvs)%2 != 0 {
kvs = append(kvs, ErrMissingValue)
}
kvs = append(kvs, l.keyvals...)
return &context{
logger: l.logger,
keyvals: kvs,
hasValuer: l.hasValuer || containsValuer(keyvals),
}
}
// context is the Logger implementation returned by With and WithPrefix. It
// wraps a Logger and holds keyvals that it includes in all log events. Its
// Log method calls bindValues to generate values for each Valuer in the
// context keyvals.
//
// A context must always have the same number of stack frames between calls to
// its Log method and the eventual binding of Valuers to their value. This // its Log method and the eventual binding of Valuers to their value. This
// requirement comes from the functional requirement to allow a context to // requirement comes from the functional requirement to allow a context to
// resolve application call site information for a log.Caller stored in the // resolve application call site information for a Caller stored in the
// context. To do this we must be able to predict the number of logging // context. To do this we must be able to predict the number of logging
// functions on the stack when bindValues is called. // functions on the stack when bindValues is called.
// //
// Three implementation details provide the needed stack depth consistency. // Two implementation details provide the needed stack depth consistency.
// The first two of these details also result in better amortized performance,
// and thus make sense even without the requirements regarding stack depth.
// The third detail, however, is subtle and tied to the implementation of the
// Go compiler.
// //
// 1. NewContext avoids introducing an additional layer when asked to // 1. newContext avoids introducing an additional layer when asked to
// wrap another Context. // wrap another context.
// 2. With avoids introducing an additional layer by returning a newly // 2. With and WithPrefix avoid introducing an additional layer by
// constructed Context with a merged keyvals rather than simply // returning a newly constructed context with a merged keyvals rather
// wrapping the existing Context. // than simply wrapping the existing context.
// 3. All of Context's methods take pointer receivers even though they type context struct {
// do not mutate the Context.
//
// Before explaining the last detail, first some background. The Go compiler
// generates wrapper methods to implement the auto dereferencing behavior when
// calling a value method through a pointer variable. These wrapper methods
// are also used when calling a value method through an interface variable
// because interfaces store a pointer to the underlying concrete value.
// Calling a pointer receiver through an interface does not require generating
// an additional function.
//
// If Context had value methods then calling Context.Log through a variable
// with type Logger would have an extra stack frame compared to calling
// Context.Log through a variable with type Context. Using pointer receivers
// avoids this problem.
// A Context wraps a Logger and holds keyvals that it includes in all log
// events. When logging, a Context replaces all value elements (odd indexes)
// containing a Valuer with their generated value for each call to its Log
// method.
type Context struct {
logger Logger logger Logger
keyvals []interface{} keyvals []interface{}
hasValuer bool hasValuer bool
} }
func newContext(logger Logger) *context {
if c, ok := logger.(*context); ok {
return c
}
return &context{logger: logger}
}
// Log replaces all value elements (odd indexes) containing a Valuer in the // Log replaces all value elements (odd indexes) containing a Valuer in the
// stored context with their generated value, appends keyvals, and passes the // stored context with their generated value, appends keyvals, and passes the
// result to the wrapped Logger. // result to the wrapped Logger.
func (l *Context) Log(keyvals ...interface{}) error { func (l *context) Log(keyvals ...interface{}) error {
kvs := append(l.keyvals, keyvals...) kvs := append(l.keyvals, keyvals...)
if len(kvs)%2 != 0 { if len(kvs)%2 != 0 {
kvs = append(kvs, ErrMissingValue) kvs = append(kvs, ErrMissingValue)
@ -86,53 +124,6 @@ func (l *Context) Log(keyvals ...interface{}) error {
return l.logger.Log(kvs...) return l.logger.Log(kvs...)
} }
// With returns a new Context with keyvals appended to those of the receiver.
func (l *Context) With(keyvals ...interface{}) *Context {
if len(keyvals) == 0 {
return l
}
kvs := append(l.keyvals, keyvals...)
if len(kvs)%2 != 0 {
kvs = append(kvs, ErrMissingValue)
}
return &Context{
logger: l.logger,
// Limiting the capacity of the stored keyvals ensures that a new
// backing array is created if the slice must grow in Log or With.
// Using the extra capacity without copying risks a data race that
// would violate the Logger interface contract.
keyvals: kvs[:len(kvs):len(kvs)],
hasValuer: l.hasValuer || containsValuer(keyvals),
}
}
// WithPrefix returns a new Context with keyvals prepended to those of the
// receiver.
func (l *Context) WithPrefix(keyvals ...interface{}) *Context {
if len(keyvals) == 0 {
return l
}
// Limiting the capacity of the stored keyvals ensures that a new
// backing array is created if the slice must grow in Log or With.
// Using the extra capacity without copying risks a data race that
// would violate the Logger interface contract.
n := len(l.keyvals) + len(keyvals)
if len(keyvals)%2 != 0 {
n++
}
kvs := make([]interface{}, 0, n)
kvs = append(kvs, keyvals...)
if len(kvs)%2 != 0 {
kvs = append(kvs, ErrMissingValue)
}
kvs = append(kvs, l.keyvals...)
return &Context{
logger: l.logger,
keyvals: kvs,
hasValuer: l.hasValuer || containsValuer(keyvals),
}
}
// LoggerFunc is an adapter to allow use of ordinary functions as Loggers. If // LoggerFunc is an adapter to allow use of ordinary functions as Loggers. If
// f is a function with the appropriate signature, LoggerFunc(f) is a Logger // f is a function with the appropriate signature, LoggerFunc(f) is a Logger
// object that calls f. // object that calls f.

View file

@ -39,7 +39,7 @@ func TimestampKey(key string) StdlibAdapterOption {
return func(a *StdlibAdapter) { a.timestampKey = key } return func(a *StdlibAdapter) { a.timestampKey = key }
} }
// FileKey sets the key for the file and line field. By default, it's "file". // FileKey sets the key for the file and line field. By default, it's "caller".
func FileKey(key string) StdlibAdapterOption { func FileKey(key string) StdlibAdapterOption {
return func(a *StdlibAdapter) { a.fileKey = key } return func(a *StdlibAdapter) { a.fileKey = key }
} }
@ -55,7 +55,7 @@ func NewStdlibAdapter(logger Logger, options ...StdlibAdapterOption) io.Writer {
a := StdlibAdapter{ a := StdlibAdapter{
Logger: logger, Logger: logger,
timestampKey: "ts", timestampKey: "ts",
fileKey: "file", fileKey: "caller",
messageKey: "msg", messageKey: "msg",
} }
for _, option := range options { for _, option := range options {

View file

@ -36,24 +36,59 @@ func (l *SwapLogger) Swap(logger Logger) {
l.logger.Store(loggerStruct{logger}) l.logger.Store(loggerStruct{logger})
} }
// SyncWriter synchronizes concurrent writes to an io.Writer. // NewSyncWriter returns a new writer that is safe for concurrent use by
type SyncWriter struct { // multiple goroutines. Writes to the returned writer are passed on to w. If
mu sync.Mutex // another write is already in progress, the calling goroutine blocks until
w io.Writer // the writer is available.
//
// If w implements the following interface, so does the returned writer.
//
// interface {
// Fd() uintptr
// }
func NewSyncWriter(w io.Writer) io.Writer {
switch w := w.(type) {
case fdWriter:
return &fdSyncWriter{fdWriter: w}
default:
return &syncWriter{Writer: w}
}
} }
// NewSyncWriter returns a new SyncWriter. The returned writer is safe for // syncWriter synchronizes concurrent writes to an io.Writer.
// concurrent use by multiple goroutines. type syncWriter struct {
func NewSyncWriter(w io.Writer) *SyncWriter { sync.Mutex
return &SyncWriter{w: w} io.Writer
} }
// Write writes p to the underlying io.Writer. If another write is already in // Write writes p to the underlying io.Writer. If another write is already in
// progress, the calling goroutine blocks until the SyncWriter is available. // progress, the calling goroutine blocks until the syncWriter is available.
func (w *SyncWriter) Write(p []byte) (n int, err error) { func (w *syncWriter) Write(p []byte) (n int, err error) {
w.mu.Lock() w.Lock()
n, err = w.w.Write(p) n, err = w.Writer.Write(p)
w.mu.Unlock() w.Unlock()
return n, err
}
// fdWriter is an io.Writer that also has an Fd method. The most common
// example of an fdWriter is an *os.File.
type fdWriter interface {
io.Writer
Fd() uintptr
}
// fdSyncWriter synchronizes concurrent writes to an fdWriter.
type fdSyncWriter struct {
sync.Mutex
fdWriter
}
// Write writes p to the underlying io.Writer. If another write is already in
// progress, the calling goroutine blocks until the fdSyncWriter is available.
func (w *fdSyncWriter) Write(p []byte) (n int, err error) {
w.Lock()
n, err = w.fdWriter.Write(p)
w.Unlock()
return n, err return n, err
} }

View file

@ -6,9 +6,9 @@ import (
"github.com/go-stack/stack" "github.com/go-stack/stack"
) )
// A Valuer generates a log value. When passed to Context.With in a value // A Valuer generates a log value. When passed to With or WithPrefix in a
// element (odd indexes), it represents a dynamic value which is re-evaluated // value element (odd indexes), it represents a dynamic value which is re-
// with each log event. // evaluated with each log event.
type Valuer func() interface{} type Valuer func() interface{}
// bindValues replaces all value elements (odd indexes) containing a Valuer // bindValues replaces all value elements (odd indexes) containing a Valuer
@ -32,22 +32,51 @@ func containsValuer(keyvals []interface{}) bool {
return false return false
} }
// Timestamp returns a Valuer that invokes the underlying function when bound, // Timestamp returns a timestamp Valuer. It invokes the t function to get the
// returning a time.Time. Users will probably want to use DefaultTimestamp or // time; unless you are doing something tricky, pass time.Now.
// DefaultTimestampUTC. //
// Most users will want to use DefaultTimestamp or DefaultTimestampUTC, which
// are TimestampFormats that use the RFC3339Nano format.
func Timestamp(t func() time.Time) Valuer { func Timestamp(t func() time.Time) Valuer {
return func() interface{} { return t() } return func() interface{} { return t() }
} }
var ( // TimestampFormat returns a timestamp Valuer with a custom time format. It
// DefaultTimestamp is a Valuer that returns the current wallclock time, // invokes the t function to get the time to format; unless you are doing
// respecting time zones, when bound. // something tricky, pass time.Now. The layout string is passed to
DefaultTimestamp Valuer = func() interface{} { return time.Now().Format(time.RFC3339) } // Time.Format.
//
// Most users will want to use DefaultTimestamp or DefaultTimestampUTC, which
// are TimestampFormats that use the RFC3339Nano format.
func TimestampFormat(t func() time.Time, layout string) Valuer {
return func() interface{} {
return timeFormat{
time: t(),
layout: layout,
}
}
}
// DefaultTimestampUTC is a Valuer that returns the current time in UTC // A timeFormat represents an instant in time and a layout used when
// when bound. // marshaling to a text format.
DefaultTimestampUTC Valuer = func() interface{} { return time.Now().UTC().Format(time.RFC3339) } type timeFormat struct {
) time time.Time
layout string
}
func (tf timeFormat) String() string {
return tf.time.Format(tf.layout)
}
// MarshalText implements encoding.TextMarshaller.
func (tf timeFormat) MarshalText() (text []byte, err error) {
// The following code adapted from the standard library time.Time.Format
// method. Using the same undocumented magic constant to extend the size
// of the buffer as seen there.
b := make([]byte, 0, len(tf.layout)+10)
b = tf.time.AppendFormat(b, tf.layout)
return b, nil
}
// Caller returns a Valuer that returns a file and line from a specified depth // Caller returns a Valuer that returns a file and line from a specified depth
// in the callstack. Users will probably want to use DefaultCaller. // in the callstack. Users will probably want to use DefaultCaller.
@ -56,6 +85,17 @@ func Caller(depth int) Valuer {
} }
var ( var (
// DefaultTimestamp is a Valuer that returns the current wallclock time,
// respecting time zones, when bound.
DefaultTimestamp = TimestampFormat(time.Now, time.RFC3339Nano)
// DefaultTimestampUTC is a Valuer that returns the current time in UTC
// when bound.
DefaultTimestampUTC = TimestampFormat(
func() time.Time { return time.Now().UTC() },
time.RFC3339Nano,
)
// DefaultCaller is a Valuer that returns the file and line where the Log // DefaultCaller is a Valuer that returns the file and line where the Log
// method was invoked. It can only be used with log.With. // method was invoked. It can only be used with log.With.
DefaultCaller = Caller(3) DefaultCaller = Caller(3)

View file

@ -1,23 +1,54 @@
// Package metrics provides a framework for application instrumentation. All // Package metrics provides a framework for application instrumentation. It's
// metrics are safe for concurrent use. Considerable design influence has been // primarily designed to help you get started with good and robust
// taken from https://github.com/codahale/metrics and https://prometheus.io. // instrumentation, and to help you migrate from a less-capable system like
// Graphite to a more-capable system like Prometheus. If your organization has
// already standardized on an instrumentation system like Prometheus, and has no
// plans to change, it may make sense to use that system's instrumentation
// library directly.
// //
// This package contains the common interfaces. Your code should take these // This package provides three core metric abstractions (Counter, Gauge, and
// interfaces as parameters. Implementations are provided for different // Histogram) and implementations for almost all common instrumentation
// instrumentation systems in the various subdirectories. // backends. Each metric has an observation method (Add, Set, or Observe,
// respectively) used to record values, and a With method to "scope" the
// observation by various parameters. For example, you might have a Histogram to
// record request durations, parameterized by the method that's being called.
//
// var requestDuration metrics.Histogram
// // ...
// requestDuration.With("method", "MyMethod").Observe(time.Since(begin))
//
// This allows a single high-level metrics object (requestDuration) to work with
// many code paths somewhat dynamically. The concept of With is fully supported
// in some backends like Prometheus, and not supported in other backends like
// Graphite. So, With may be a no-op, depending on the concrete implementation
// you choose. Please check the implementation to know for sure. For
// implementations that don't provide With, it's necessary to fully parameterize
// each metric in the metric name, e.g.
//
// // Statsd
// c := statsd.NewCounter("request_duration_MyMethod_200")
// c.Add(1)
//
// // Prometheus
// c := prometheus.NewCounter(stdprometheus.CounterOpts{
// Name: "request_duration",
// ...
// }, []string{"method", "status_code"})
// c.With("method", "MyMethod", "status_code", strconv.Itoa(code)).Add(1)
// //
// Usage // Usage
// //
// Metrics are dependencies and should be passed to the components that need // Metrics are dependencies, and should be passed to the components that need
// them in the same way you'd construct and pass a database handle, or reference // them in the same way you'd construct and pass a database handle, or reference
// to another component. So, create metrics in your func main, using whichever // to another component. Metrics should *not* be created in the global scope.
// concrete implementation is appropriate for your organization. // Instead, instantiate metrics in your func main, using whichever concrete
// implementation is appropriate for your organization.
// //
// latency := prometheus.NewSummaryFrom(stdprometheus.SummaryOpts{ // latency := prometheus.NewSummaryFrom(stdprometheus.SummaryOpts{
// Namespace: "myteam", // Namespace: "myteam",
// Subsystem: "foosvc", // Subsystem: "foosvc",
// Name: "request_latency_seconds", // Name: "request_latency_seconds",
// Help: "Incoming request latency in seconds." // Help: "Incoming request latency in seconds.",
// }, []string{"method", "status_code"}) // }, []string{"method", "status_code"})
// //
// Write your components to take the metrics they will use as parameters to // Write your components to take the metrics they will use as parameters to
@ -40,8 +71,14 @@
// api := NewAPI(store, logger, latency) // api := NewAPI(store, logger, latency)
// http.ListenAndServe("/", api) // http.ListenAndServe("/", api)
// //
// Note that metrics are "write-only" interfaces.
//
// Implementation details // Implementation details
// //
// All metrics are safe for concurrent use. Considerable design influence has
// been taken from https://github.com/codahale/metrics and
// https://prometheus.io.
//
// Each telemetry system has different semantics for label values, push vs. // Each telemetry system has different semantics for label values, push vs.
// pull, support for histograms, etc. These properties influence the design of // pull, support for histograms, etc. These properties influence the design of
// their respective packages. This table attempts to summarize the key points of // their respective packages. This table attempts to summarize the key points of
@ -54,7 +91,7 @@
// expvar 1 atomic atomic synthetic, batch, in-place expose // expvar 1 atomic atomic synthetic, batch, in-place expose
// influx n custom custom custom // influx n custom custom custom
// prometheus n native native native // prometheus n native native native
// circonus 1 native native native
// pcp 1 native native native // pcp 1 native native native
// cloudwatch n batch push-aggregate batch push-aggregate synthetic, batch, push-aggregate
// //
package metrics package metrics

View file

@ -14,10 +14,13 @@ import (
"fmt" "fmt"
"io" "io"
"strings" "strings"
"sync"
"sync/atomic"
"time" "time"
"github.com/go-kit/kit/log" "github.com/go-kit/kit/log"
"github.com/go-kit/kit/metrics" "github.com/go-kit/kit/metrics"
"github.com/go-kit/kit/metrics/generic"
"github.com/go-kit/kit/metrics/internal/lv" "github.com/go-kit/kit/metrics/internal/lv"
"github.com/go-kit/kit/metrics/internal/ratemap" "github.com/go-kit/kit/metrics/internal/ratemap"
"github.com/go-kit/kit/util/conn" "github.com/go-kit/kit/util/conn"
@ -34,53 +37,63 @@ import (
// To regularly report metrics to an io.Writer, use the WriteLoop helper method. // To regularly report metrics to an io.Writer, use the WriteLoop helper method.
// To send to a DogStatsD server, use the SendLoop helper method. // To send to a DogStatsD server, use the SendLoop helper method.
type Dogstatsd struct { type Dogstatsd struct {
mtx sync.RWMutex
prefix string prefix string
rates *ratemap.RateMap rates *ratemap.RateMap
counters *lv.Space counters *lv.Space
gauges *lv.Space gauges map[string]*gaugeNode
timings *lv.Space timings *lv.Space
histograms *lv.Space histograms *lv.Space
logger log.Logger logger log.Logger
lvs lv.LabelValues
} }
// New returns a Dogstatsd object that may be used to create metrics. Prefix is // New returns a Dogstatsd object that may be used to create metrics. Prefix is
// applied to all created metrics. Callers must ensure that regular calls to // applied to all created metrics. Callers must ensure that regular calls to
// WriteTo are performed, either manually or with one of the helper methods. // WriteTo are performed, either manually or with one of the helper methods.
func New(prefix string, logger log.Logger) *Dogstatsd { func New(prefix string, logger log.Logger, lvs ...string) *Dogstatsd {
if len(lvs)%2 != 0 {
panic("odd number of LabelValues; programmer error!")
}
return &Dogstatsd{ return &Dogstatsd{
prefix: prefix, prefix: prefix,
rates: ratemap.New(), rates: ratemap.New(),
counters: lv.NewSpace(), counters: lv.NewSpace(),
gauges: lv.NewSpace(), gauges: map[string]*gaugeNode{},
timings: lv.NewSpace(), timings: lv.NewSpace(),
histograms: lv.NewSpace(), histograms: lv.NewSpace(),
logger: logger, logger: logger,
lvs: lvs,
} }
} }
// NewCounter returns a counter, sending observations to this Dogstatsd object. // NewCounter returns a counter, sending observations to this Dogstatsd object.
func (d *Dogstatsd) NewCounter(name string, sampleRate float64) *Counter { func (d *Dogstatsd) NewCounter(name string, sampleRate float64) *Counter {
d.rates.Set(d.prefix+name, sampleRate) d.rates.Set(name, sampleRate)
return &Counter{ return &Counter{
name: d.prefix + name, name: name,
obs: d.counters.Observe, obs: d.counters.Observe,
} }
} }
// NewGauge returns a gauge, sending observations to this Dogstatsd object. // NewGauge returns a gauge, sending observations to this Dogstatsd object.
func (d *Dogstatsd) NewGauge(name string) *Gauge { func (d *Dogstatsd) NewGauge(name string) *Gauge {
return &Gauge{ d.mtx.Lock()
name: d.prefix + name, n, ok := d.gauges[name]
obs: d.gauges.Observe, if !ok {
n = &gaugeNode{gauge: &Gauge{g: generic.NewGauge(name), ddog: d}}
d.gauges[name] = n
} }
d.mtx.Unlock()
return n.gauge
} }
// NewTiming returns a histogram whose observations are interpreted as // NewTiming returns a histogram whose observations are interpreted as
// millisecond durations, and are forwarded to this Dogstatsd object. // millisecond durations, and are forwarded to this Dogstatsd object.
func (d *Dogstatsd) NewTiming(name string, sampleRate float64) *Timing { func (d *Dogstatsd) NewTiming(name string, sampleRate float64) *Timing {
d.rates.Set(d.prefix+name, sampleRate) d.rates.Set(name, sampleRate)
return &Timing{ return &Timing{
name: d.prefix + name, name: name,
obs: d.timings.Observe, obs: d.timings.Observe,
} }
} }
@ -88,9 +101,9 @@ func (d *Dogstatsd) NewTiming(name string, sampleRate float64) *Timing {
// NewHistogram returns a histogram whose observations are of an unspecified // NewHistogram returns a histogram whose observations are of an unspecified
// unit, and are forwarded to this Dogstatsd object. // unit, and are forwarded to this Dogstatsd object.
func (d *Dogstatsd) NewHistogram(name string, sampleRate float64) *Histogram { func (d *Dogstatsd) NewHistogram(name string, sampleRate float64) *Histogram {
d.rates.Set(d.prefix+name, sampleRate) d.rates.Set(name, sampleRate)
return &Histogram{ return &Histogram{
name: d.prefix + name, name: name,
obs: d.histograms.Observe, obs: d.histograms.Observe,
} }
} }
@ -124,7 +137,7 @@ func (d *Dogstatsd) WriteTo(w io.Writer) (count int64, err error) {
var n int var n int
d.counters.Reset().Walk(func(name string, lvs lv.LabelValues, values []float64) bool { d.counters.Reset().Walk(func(name string, lvs lv.LabelValues, values []float64) bool {
n, err = fmt.Fprintf(w, "%s:%f|c%s%s\n", name, sum(values), sampling(d.rates.Get(name)), tagValues(lvs)) n, err = fmt.Fprintf(w, "%s%s:%f|c%s%s\n", d.prefix, name, sum(values), sampling(d.rates.Get(name)), d.tagValues(lvs))
if err != nil { if err != nil {
return false return false
} }
@ -135,22 +148,23 @@ func (d *Dogstatsd) WriteTo(w io.Writer) (count int64, err error) {
return count, err return count, err
} }
d.gauges.Reset().Walk(func(name string, lvs lv.LabelValues, values []float64) bool { d.mtx.RLock()
n, err = fmt.Fprintf(w, "%s:%f|g%s\n", name, last(values), tagValues(lvs)) for _, root := range d.gauges {
if err != nil { root.walk(func(name string, lvs lv.LabelValues, value float64) bool {
return false n, err = fmt.Fprintf(w, "%s%s:%f|g%s\n", d.prefix, name, value, d.tagValues(lvs))
} if err != nil {
count += int64(n) return false
return true }
}) count += int64(n)
if err != nil { return true
return count, err })
} }
d.mtx.RUnlock()
d.timings.Reset().Walk(func(name string, lvs lv.LabelValues, values []float64) bool { d.timings.Reset().Walk(func(name string, lvs lv.LabelValues, values []float64) bool {
sampleRate := d.rates.Get(name) sampleRate := d.rates.Get(name)
for _, value := range values { for _, value := range values {
n, err = fmt.Fprintf(w, "%s:%f|ms%s%s\n", name, value, sampling(sampleRate), tagValues(lvs)) n, err = fmt.Fprintf(w, "%s%s:%f|ms%s%s\n", d.prefix, name, value, sampling(sampleRate), d.tagValues(lvs))
if err != nil { if err != nil {
return false return false
} }
@ -165,7 +179,7 @@ func (d *Dogstatsd) WriteTo(w io.Writer) (count int64, err error) {
d.histograms.Reset().Walk(func(name string, lvs lv.LabelValues, values []float64) bool { d.histograms.Reset().Walk(func(name string, lvs lv.LabelValues, values []float64) bool {
sampleRate := d.rates.Get(name) sampleRate := d.rates.Get(name)
for _, value := range values { for _, value := range values {
n, err = fmt.Fprintf(w, "%s:%f|h%s%s\n", name, value, sampling(sampleRate), tagValues(lvs)) n, err = fmt.Fprintf(w, "%s%s:%f|h%s%s\n", d.prefix, name, value, sampling(sampleRate), d.tagValues(lvs))
if err != nil { if err != nil {
return false return false
} }
@ -200,14 +214,17 @@ func sampling(r float64) string {
return sv return sv
} }
func tagValues(labelValues []string) string { func (d *Dogstatsd) tagValues(labelValues []string) string {
if len(labelValues) == 0 { if len(labelValues) == 0 && len(d.lvs) == 0 {
return "" return ""
} }
if len(labelValues)%2 != 0 { if len(labelValues)%2 != 0 {
panic("tagValues received a labelValues with an odd number of strings") panic("tagValues received a labelValues with an odd number of strings")
} }
pairs := make([]string, 0, len(labelValues)/2) pairs := make([]string, 0, (len(d.lvs)+len(labelValues))/2)
for i := 0; i < len(d.lvs); i += 2 {
pairs = append(pairs, d.lvs[i]+":"+d.lvs[i+1])
}
for i := 0; i < len(labelValues); i += 2 { for i := 0; i < len(labelValues); i += 2 {
pairs = append(pairs, labelValues[i]+":"+labelValues[i+1]) pairs = append(pairs, labelValues[i]+":"+labelValues[i+1])
} }
@ -241,23 +258,31 @@ func (c *Counter) Add(delta float64) {
// Gauge is a DogStatsD gauge. Observations are forwarded to a Dogstatsd // Gauge is a DogStatsD gauge. Observations are forwarded to a Dogstatsd
// object, and aggregated (the last observation selected) per timeseries. // object, and aggregated (the last observation selected) per timeseries.
type Gauge struct { type Gauge struct {
name string g *generic.Gauge
lvs lv.LabelValues ddog *Dogstatsd
obs observeFunc set int32
} }
// With implements metrics.Gauge. // With implements metrics.Gauge.
func (g *Gauge) With(labelValues ...string) metrics.Gauge { func (g *Gauge) With(labelValues ...string) metrics.Gauge {
return &Gauge{ g.ddog.mtx.RLock()
name: g.name, node := g.ddog.gauges[g.g.Name]
lvs: g.lvs.With(labelValues...), g.ddog.mtx.RUnlock()
obs: g.obs,
} ga := &Gauge{g: g.g.With(labelValues...).(*generic.Gauge), ddog: g.ddog}
return node.addGauge(ga, ga.g.LabelValues())
} }
// Set implements metrics.Gauge. // Set implements metrics.Gauge.
func (g *Gauge) Set(value float64) { func (g *Gauge) Set(value float64) {
g.obs(g.name, g.lvs, value) g.g.Set(value)
g.touch()
}
// Add implements metrics.Gauge.
func (g *Gauge) Add(delta float64) {
g.g.Add(delta)
g.touch()
} }
// Timing is a DogStatsD timing, or metrics.Histogram. Observations are // Timing is a DogStatsD timing, or metrics.Histogram. Observations are
@ -304,3 +329,61 @@ func (h *Histogram) With(labelValues ...string) metrics.Histogram {
func (h *Histogram) Observe(value float64) { func (h *Histogram) Observe(value float64) {
h.obs(h.name, h.lvs, value) h.obs(h.name, h.lvs, value)
} }
type pair struct{ label, value string }
type gaugeNode struct {
mtx sync.RWMutex
gauge *Gauge
children map[pair]*gaugeNode
}
func (n *gaugeNode) addGauge(g *Gauge, lvs lv.LabelValues) *Gauge {
n.mtx.Lock()
defer n.mtx.Unlock()
if len(lvs) == 0 {
if n.gauge == nil {
n.gauge = g
}
return n.gauge
}
if len(lvs) < 2 {
panic("too few LabelValues; programmer error!")
}
head, tail := pair{lvs[0], lvs[1]}, lvs[2:]
if n.children == nil {
n.children = map[pair]*gaugeNode{}
}
child, ok := n.children[head]
if !ok {
child = &gaugeNode{}
n.children[head] = child
}
return child.addGauge(g, tail)
}
func (n *gaugeNode) walk(fn func(string, lv.LabelValues, float64) bool) bool {
n.mtx.RLock()
defer n.mtx.RUnlock()
if n.gauge != nil {
value, ok := n.gauge.read()
if ok && !fn(n.gauge.g.Name, n.gauge.g.LabelValues(), value) {
return false
}
}
for _, child := range n.children {
if !child.walk(fn) {
return false
}
}
return true
}
func (g *Gauge) touch() {
atomic.StoreInt32(&(g.set), 1)
}
func (g *Gauge) read() (float64, bool) {
set := atomic.SwapInt32(&(g.set), 0)
return g.g.Value(), set != 0
}

View file

@ -33,6 +33,7 @@ func NewCounter(name string) *Counter {
// With implements Counter. // With implements Counter.
func (c *Counter) With(labelValues ...string) metrics.Counter { func (c *Counter) With(labelValues ...string) metrics.Counter {
return &Counter{ return &Counter{
Name: c.Name,
bits: atomic.LoadUint64(&c.bits), bits: atomic.LoadUint64(&c.bits),
lvs: c.lvs.With(labelValues...), lvs: c.lvs.With(labelValues...),
} }
@ -95,6 +96,7 @@ func NewGauge(name string) *Gauge {
// With implements Gauge. // With implements Gauge.
func (g *Gauge) With(labelValues ...string) metrics.Gauge { func (g *Gauge) With(labelValues ...string) metrics.Gauge {
return &Gauge{ return &Gauge{
Name: g.Name,
bits: atomic.LoadUint64(&g.bits), bits: atomic.LoadUint64(&g.bits),
lvs: g.lvs.With(labelValues...), lvs: g.lvs.With(labelValues...),
} }
@ -105,6 +107,20 @@ func (g *Gauge) Set(value float64) {
atomic.StoreUint64(&g.bits, math.Float64bits(value)) atomic.StoreUint64(&g.bits, math.Float64bits(value))
} }
// Add implements metrics.Gauge.
func (g *Gauge) Add(delta float64) {
for {
var (
old = atomic.LoadUint64(&g.bits)
newf = math.Float64frombits(old) + delta
new = math.Float64bits(newf)
)
if atomic.CompareAndSwapUint64(&g.bits, old, new) {
break
}
}
}
// Value returns the current value of the gauge. // Value returns the current value of the gauge.
func (g *Gauge) Value() float64 { func (g *Gauge) Value() float64 {
return math.Float64frombits(atomic.LoadUint64(&g.bits)) return math.Float64frombits(atomic.LoadUint64(&g.bits))
@ -121,7 +137,7 @@ func (g *Gauge) LabelValues() []string {
type Histogram struct { type Histogram struct {
Name string Name string
lvs lv.LabelValues lvs lv.LabelValues
h gohistogram.Histogram h *safeHistogram
} }
// NewHistogram returns a numeric histogram based on VividCortex/gohistogram. A // NewHistogram returns a numeric histogram based on VividCortex/gohistogram. A
@ -129,25 +145,30 @@ type Histogram struct {
func NewHistogram(name string, buckets int) *Histogram { func NewHistogram(name string, buckets int) *Histogram {
return &Histogram{ return &Histogram{
Name: name, Name: name,
h: gohistogram.NewHistogram(buckets), h: &safeHistogram{Histogram: gohistogram.NewHistogram(buckets)},
} }
} }
// With implements Histogram. // With implements Histogram.
func (h *Histogram) With(labelValues ...string) metrics.Histogram { func (h *Histogram) With(labelValues ...string) metrics.Histogram {
return &Histogram{ return &Histogram{
lvs: h.lvs.With(labelValues...), Name: h.Name,
h: h.h, lvs: h.lvs.With(labelValues...),
h: h.h,
} }
} }
// Observe implements Histogram. // Observe implements Histogram.
func (h *Histogram) Observe(value float64) { func (h *Histogram) Observe(value float64) {
h.h.Lock()
defer h.h.Unlock()
h.h.Add(value) h.h.Add(value)
} }
// Quantile returns the value of the quantile q, 0.0 < q < 1.0. // Quantile returns the value of the quantile q, 0.0 < q < 1.0.
func (h *Histogram) Quantile(q float64) float64 { func (h *Histogram) Quantile(q float64) float64 {
h.h.RLock()
defer h.h.RUnlock()
return h.h.Quantile(q) return h.h.Quantile(q)
} }
@ -159,9 +180,17 @@ func (h *Histogram) LabelValues() []string {
// Print writes a string representation of the histogram to the passed writer. // Print writes a string representation of the histogram to the passed writer.
// Useful for printing to a terminal. // Useful for printing to a terminal.
func (h *Histogram) Print(w io.Writer) { func (h *Histogram) Print(w io.Writer) {
h.h.RLock()
defer h.h.RUnlock()
fmt.Fprintf(w, h.h.String()) fmt.Fprintf(w, h.h.String())
} }
// safeHistogram exists as gohistogram.Histogram is not goroutine-safe.
type safeHistogram struct {
sync.RWMutex
gohistogram.Histogram
}
// Bucket is a range in a histogram which aggregates observations. // Bucket is a range in a histogram which aggregates observations.
type Bucket struct { type Bucket struct {
From, To, Count int64 From, To, Count int64

View file

@ -66,6 +66,7 @@ func (in *Influx) NewGauge(name string) *Gauge {
return &Gauge{ return &Gauge{
name: name, name: name,
obs: in.gauges.Observe, obs: in.gauges.Observe,
add: in.gauges.Add,
} }
} }
@ -168,10 +169,14 @@ func mergeTags(tags map[string]string, labelValues []string) map[string]string {
if len(labelValues)%2 != 0 { if len(labelValues)%2 != 0 {
panic("mergeTags received a labelValues with an odd number of strings") panic("mergeTags received a labelValues with an odd number of strings")
} }
for i := 0; i < len(labelValues); i += 2 { ret := make(map[string]string, len(tags)+len(labelValues)/2)
tags[labelValues[i]] = labelValues[i+1] for k, v := range tags {
ret[k] = v
} }
return tags for i := 0; i < len(labelValues); i += 2 {
ret[labelValues[i]] = labelValues[i+1]
}
return ret
} }
func sum(a []float64) float64 { func sum(a []float64) float64 {
@ -216,6 +221,7 @@ type Gauge struct {
name string name string
lvs lv.LabelValues lvs lv.LabelValues
obs observeFunc obs observeFunc
add observeFunc
} }
// With implements metrics.Gauge. // With implements metrics.Gauge.
@ -224,6 +230,7 @@ func (g *Gauge) With(labelValues ...string) metrics.Gauge {
name: g.name, name: g.name,
lvs: g.lvs.With(labelValues...), lvs: g.lvs.With(labelValues...),
obs: g.obs, obs: g.obs,
add: g.add,
} }
} }
@ -232,6 +239,11 @@ func (g *Gauge) Set(value float64) {
g.obs(g.name, g.lvs, value) g.obs(g.name, g.lvs, value)
} }
// Add implements metrics.Gauge.
func (g *Gauge) Add(delta float64) {
g.add(g.name, g.lvs, delta)
}
// Histogram is an Influx histrogram. Observations are aggregated into a // Histogram is an Influx histrogram. Observations are aggregated into a
// generic.Histogram and emitted as per-quantile gauges to the Influx server. // generic.Histogram and emitted as per-quantile gauges to the Influx server.
type Histogram struct { type Histogram struct {

View file

@ -21,6 +21,13 @@ func (s *Space) Observe(name string, lvs LabelValues, value float64) {
s.nodeFor(name).observe(lvs, value) s.nodeFor(name).observe(lvs, value)
} }
// Add locates the time series identified by the name and label values in
// the vector space, and appends the delta to the last value in the list of
// observations.
func (s *Space) Add(name string, lvs LabelValues, delta float64) {
s.nodeFor(name).add(lvs, delta)
}
// Walk traverses the vector space and invokes fn for each non-empty time series // Walk traverses the vector space and invokes fn for each non-empty time series
// which is encountered. Return false to abort the traversal. // which is encountered. Return false to abort the traversal.
func (s *Space) Walk(fn func(name string, lvs LabelValues, observations []float64) bool) { func (s *Space) Walk(fn func(name string, lvs LabelValues, observations []float64) bool) {
@ -91,6 +98,34 @@ func (n *node) observe(lvs LabelValues, value float64) {
child.observe(tail, value) child.observe(tail, value)
} }
func (n *node) add(lvs LabelValues, delta float64) {
n.mtx.Lock()
defer n.mtx.Unlock()
if len(lvs) == 0 {
var value float64
if len(n.observations) > 0 {
value = last(n.observations) + delta
} else {
value = delta
}
n.observations = append(n.observations, value)
return
}
if len(lvs) < 2 {
panic("too few LabelValues; programmer error!")
}
head, tail := pair{lvs[0], lvs[1]}, lvs[2:]
if n.children == nil {
n.children = map[pair]*node{}
}
child, ok := n.children[head]
if !ok {
child = &node{}
n.children[head] = child
}
child.add(tail, delta)
}
func (n *node) walk(lvs LabelValues, fn func(LabelValues, []float64) bool) bool { func (n *node) walk(lvs LabelValues, fn func(LabelValues, []float64) bool) bool {
n.mtx.RLock() n.mtx.RLock()
defer n.mtx.RUnlock() defer n.mtx.RUnlock()
@ -104,3 +139,7 @@ func (n *node) walk(lvs LabelValues, fn func(LabelValues, []float64) bool) bool
} }
return true return true
} }
func last(a []float64) float64 {
return a[len(a)-1]
}

View file

@ -12,6 +12,7 @@ type Counter interface {
type Gauge interface { type Gauge interface {
With(labelValues ...string) Gauge With(labelValues ...string) Gauge
Set(value float64) Set(value float64)
Add(delta float64)
} }
// Histogram describes a metric that takes repeated observations of the same // Histogram describes a metric that takes repeated observations of the same

View file

@ -54,6 +54,13 @@ func (g Gauge) With(labelValues ...string) metrics.Gauge {
return next return next
} }
// Add implements metrics.Gauge.
func (g Gauge) Add(delta float64) {
for _, gauge := range g {
gauge.Add(delta)
}
}
// Histogram collects multiple individual histograms and treats them as a unit. // Histogram collects multiple individual histograms and treats them as a unit.
type Histogram []metrics.Histogram type Histogram []metrics.Histogram

View file

@ -74,6 +74,7 @@ func (s *Statsd) NewGauge(name string) *Gauge {
return &Gauge{ return &Gauge{
name: s.prefix + name, name: s.prefix + name,
obs: s.gauges.Observe, obs: s.gauges.Observe,
add: s.gauges.Add,
} }
} }
@ -201,6 +202,7 @@ func (c *Counter) Add(delta float64) {
type Gauge struct { type Gauge struct {
name string name string
obs observeFunc obs observeFunc
add observeFunc
} }
// With is a no-op. // With is a no-op.
@ -213,6 +215,11 @@ func (g *Gauge) Set(value float64) {
g.obs(g.name, lv.LabelValues{}, value) g.obs(g.name, lv.LabelValues{}, value)
} }
// Add implements metrics.Gauge.
func (g *Gauge) Add(delta float64) {
g.add(g.name, lv.LabelValues{}, delta)
}
// Timing is a StatsD timing, or metrics.Histogram. Observations are // Timing is a StatsD timing, or metrics.Histogram. Observations are
// forwarded to a Statsd object, and collected (but not aggregated) per // forwarded to a Statsd object, and collected (but not aggregated) per
// timeseries. // timeseries.

View file

@ -7,6 +7,7 @@ import "time"
type Timer struct { type Timer struct {
h Histogram h Histogram
t time.Time t time.Time
u time.Duration
} }
// NewTimer wraps the given histogram and records the current time. // NewTimer wraps the given histogram and records the current time.
@ -14,15 +15,22 @@ func NewTimer(h Histogram) *Timer {
return &Timer{ return &Timer{
h: h, h: h,
t: time.Now(), t: time.Now(),
u: time.Second,
} }
} }
// ObserveDuration captures the number of seconds since the timer was // ObserveDuration captures the number of seconds since the timer was
// constructed, and forwards that observation to the histogram. // constructed, and forwards that observation to the histogram.
func (t *Timer) ObserveDuration() { func (t *Timer) ObserveDuration() {
d := time.Since(t.t).Seconds() d := float64(time.Since(t.t).Nanoseconds()) / float64(t.u)
if d < 0 { if d < 0 {
d = 0 d = 0
} }
t.h.Observe(d) t.h.Observe(d)
} }
// Unit sets the unit of the float64 emitted by the timer.
// By default, the timer emits seconds.
func (t *Timer) Unit(u time.Duration) {
t.u = u
}