Fix duplicated tags in InfluxDB

This commit is contained in:
Michael 2018-04-16 10:28:04 +02:00 committed by Traefiker Bot
parent 749d833f65
commit ebd77f314d
20 changed files with 530 additions and 209 deletions

7
Gopkg.lock generated
View file

@ -569,8 +569,8 @@
"metrics/statsd", "metrics/statsd",
"util/conn" "util/conn"
] ]
revision = "f66b0e13579bfc5a48b9e2a94b1209c107ea1f41" revision = "ca4112baa34cb55091301bdc13b1420a122b1b9e"
version = "v0.3.0" version = "v0.7.0"
[[projects]] [[projects]]
name = "github.com/go-logfmt/logfmt" name = "github.com/go-logfmt/logfmt"
@ -761,6 +761,7 @@
version = "v1.3.7" version = "v1.3.7"
[[projects]] [[projects]]
branch = "master"
name = "github.com/jjcollinge/servicefabric" name = "github.com/jjcollinge/servicefabric"
packages = ["."] packages = ["."]
revision = "8eebe170fa1ba25d3dfb928b3f86a7313b13b9fe" revision = "8eebe170fa1ba25d3dfb928b3f86a7313b13b9fe"
@ -1674,6 +1675,6 @@
[solve-meta] [solve-meta]
analyzer-name = "dep" analyzer-name = "dep"
analyzer-version = 1 analyzer-version = 1
inputs-digest = "ffd938a3cb26d3d0926b34a98708af0a3db49311e03edc91649ea1baad1b97e9" inputs-digest = "3142954e49f77ea41aea160bb9b57c0281c31f6de6652bbf3ec98ea3bc8fc494"
solver-name = "gps-cdcl" solver-name = "gps-cdcl"
solver-version = 1 solver-version = 1

View file

@ -97,7 +97,7 @@
[[constraint]] [[constraint]]
name = "github.com/go-kit/kit" name = "github.com/go-kit/kit"
version = "0.3.0" version = "0.7.0"
[[constraint]] [[constraint]]
branch = "master" branch = "master"

View file

@ -24,21 +24,21 @@ func TestInfluxDB(t *testing.T) {
} }
expectedBackend := []string{ expectedBackend := []string{
`(traefik\.backend\.requests\.total,code=200,method=GET,service=test count=1) [\d]{19}`, `(traefik\.backend\.requests\.total,backend=test,code=200,method=GET count=1) [\d]{19}`,
`(traefik\.backend\.requests\.total,code=404,method=GET,service=test count=1) [\d]{19}`, `(traefik\.backend\.requests\.total,backend=test,code=404,method=GET count=1) [\d]{19}`,
`(traefik\.backend\.request\.duration(?:,backend=test)?,code=200,method=GET,service=test(?:,url=http://127.0.0.1)? p50=10000,p90=10000,p95=10000,p99=10000) [\d]{19}`, `(traefik\.backend\.request\.duration,backend=test,code=200 p50=10000,p90=10000,p95=10000,p99=10000) [\d]{19}`,
`(traefik\.backend\.retries\.total(?:,code=[\d]{3},method=GET)?,service=test count=2) [\d]{19}`, `(traefik\.backend\.retries\.total(?:,code=[\d]{3},method=GET)?,backend=test count=2) [\d]{19}`,
`(traefik\.config\.reload\.total(?:[a-z=0-9A-Z,]+)? count=1) [\d]{19}`, `(traefik\.config\.reload\.total(?:[a-z=0-9A-Z,]+)? count=1) [\d]{19}`,
`(traefik\.config\.reload\.total\.failure(?:[a-z=0-9A-Z,]+)? count=1) [\d]{19}`, `(traefik\.config\.reload\.total\.failure(?:[a-z=0-9A-Z,]+)? count=1) [\d]{19}`,
`(traefik\.backend\.server\.up,backend=test(?:[a-z=0-9A-Z,]+)?,url=http://127.0.0.1 value=1) [\d]{19}`, `(traefik\.backend\.server\.up,backend=test(?:[a-z=0-9A-Z,]+)?,url=http://127.0.0.1 value=1) [\d]{19}`,
} }
msgBackend := udp.ReceiveString(t, func() { msgBackend := udp.ReceiveString(t, func() {
influxDBRegistry.BackendReqsCounter().With("service", "test", "code", strconv.Itoa(http.StatusOK), "method", http.MethodGet).Add(1) influxDBRegistry.BackendReqsCounter().With("backend", "test", "code", strconv.Itoa(http.StatusOK), "method", http.MethodGet).Add(1)
influxDBRegistry.BackendReqsCounter().With("service", "test", "code", strconv.Itoa(http.StatusNotFound), "method", http.MethodGet).Add(1) influxDBRegistry.BackendReqsCounter().With("backend", "test", "code", strconv.Itoa(http.StatusNotFound), "method", http.MethodGet).Add(1)
influxDBRegistry.BackendRetriesCounter().With("service", "test").Add(1) influxDBRegistry.BackendRetriesCounter().With("backend", "test").Add(1)
influxDBRegistry.BackendRetriesCounter().With("service", "test").Add(1) influxDBRegistry.BackendRetriesCounter().With("backend", "test").Add(1)
influxDBRegistry.BackendReqDurationHistogram().With("service", "test", "code", strconv.Itoa(http.StatusOK)).Observe(10000) influxDBRegistry.BackendReqDurationHistogram().With("backend", "test", "code", strconv.Itoa(http.StatusOK)).Observe(10000)
influxDBRegistry.ConfigReloadsCounter().Add(1) influxDBRegistry.ConfigReloadsCounter().Add(1)
influxDBRegistry.ConfigReloadsFailureCounter().Add(1) influxDBRegistry.ConfigReloadsFailureCounter().Add(1)
influxDBRegistry.BackendServerUpGauge().With("backend", "test", "url", "http://127.0.0.1").Set(1) influxDBRegistry.BackendServerUpGauge().With("backend", "test", "url", "http://127.0.0.1").Set(1)
@ -47,9 +47,9 @@ func TestInfluxDB(t *testing.T) {
assertMessage(t, msgBackend, expectedBackend) assertMessage(t, msgBackend, expectedBackend)
expectedEntrypoint := []string{ expectedEntrypoint := []string{
`(traefik\.entrypoint\.requests\.total(?:,backend=test,code=[\d]{3})?,entrypoint=test(?:[a-z=0-9A-Z,:/.]+)? count=1) [\d]{19}`, `(traefik\.entrypoint\.requests\.total,entrypoint=test(?:[a-z=0-9A-Z,:/.]+)? count=1) [\d]{19}`,
`(traefik\.entrypoint\.request\.duration(?:,backend=test,code=[\d]{3})?,entrypoint=test(?:[a-z=0-9A-Z,:/.]+)? p50=10000,p90=10000,p95=10000,p99=10000) [\d]{19}`, `(traefik\.entrypoint\.request\.duration(?:,code=[\d]{3})?,entrypoint=test(?:[a-z=0-9A-Z,:/.]+)? p50=10000,p90=10000,p95=10000,p99=10000) [\d]{19}`,
`(traefik\.entrypoint\.connections\.open(?:[a-z=0-9A-Z,]+)?,entrypoint=test,(?:[a-z=0-9A-Z,:.//]+)? value=1) [\d]{19}`, `(traefik\.entrypoint\.connections\.open,entrypoint=test value=1) [\d]{19}`,
} }
msgEntrypoint := udp.ReceiveString(t, func() { msgEntrypoint := udp.ReceiveString(t, func() {

View file

@ -335,6 +335,12 @@ func (g *gauge) With(labelValues ...string) metrics.Gauge {
} }
} }
func (g *gauge) Add(delta float64) {
collector := g.gv.With(g.labelNamesValues.ToLabels())
collector.Add(delta)
g.collectors <- newCollector(g.name, g.labelNamesValues, collector)
}
func (g *gauge) Set(value float64) { func (g *gauge) Set(value float64) {
collector := g.gv.With(g.labelNamesValues.ToLabels()) collector := g.gv.With(g.labelNamesValues.ToLabels())
collector.Set(value) collector.Set(value)

View file

@ -32,7 +32,12 @@ func (g *CollectingGauge) With(labelValues ...string) metrics.Gauge {
} }
// Set is there to satisfy the metrics.Gauge interface. // Set is there to satisfy the metrics.Gauge interface.
func (g *CollectingGauge) Set(delta float64) { func (g *CollectingGauge) Set(value float64) {
g.GaugeValue = value
}
// Add is there to satisfy the metrics.Gauge interface.
func (g *CollectingGauge) Add(delta float64) {
g.GaugeValue = delta g.GaugeValue = delta
} }

View file

@ -35,14 +35,15 @@
// idea to log simple values without formatting them. This practice allows // idea to log simple values without formatting them. This practice allows
// the chosen logger to encode values in the most appropriate way. // the chosen logger to encode values in the most appropriate way.
// //
// Log Context // Contextual Loggers
// //
// A log context stores keyvals that it includes in all log events. Building // A contextual logger stores keyvals that it includes in all log events.
// appropriate log contexts reduces repetition and aids consistency in the // Building appropriate contextual loggers reduces repetition and aids
// resulting log output. We can use a context to improve the RunTask example. // consistency in the resulting log output. With and WithPrefix add context to
// a logger. We can use With to improve the RunTask example.
// //
// func RunTask(task Task, logger log.Logger) string { // func RunTask(task Task, logger log.Logger) string {
// logger = log.NewContext(logger).With("taskID", task.ID) // logger = log.With(logger, "taskID", task.ID)
// logger.Log("event", "starting task") // logger.Log("event", "starting task")
// ... // ...
// taskHelper(task.Cmd, logger) // taskHelper(task.Cmd, logger)
@ -51,19 +52,18 @@
// } // }
// //
// The improved version emits the same log events as the original for the // The improved version emits the same log events as the original for the
// first and last calls to Log. The call to taskHelper highlights that a // first and last calls to Log. Passing the contextual logger to taskHelper
// context may be passed as a logger to other functions. Each log event // enables each log event created by taskHelper to include the task.ID even
// created by the called function will include the task.ID even though the // though taskHelper does not have access to that value. Using contextual
// function does not have access to that value. Using log contexts this way // loggers this way simplifies producing log output that enables tracing the
// simplifies producing log output that enables tracing the life cycle of // life cycle of individual tasks. (See the Contextual example for the full
// individual tasks. (See the Context example for the full code of the // code of the above snippet.)
// above snippet.)
// //
// Dynamic Context Values // Dynamic Contextual Values
// //
// A Valuer function stored in a log context generates a new value each time // A Valuer function stored in a contextual logger generates a new value each
// the context logs an event. The Valuer example demonstrates how this // time an event is logged. The Valuer example demonstrates how this feature
// feature works. // works.
// //
// Valuers provide the basis for consistently logging timestamps and source // Valuers provide the basis for consistently logging timestamps and source
// code location. The log package defines several valuers for that purpose. // code location. The log package defines several valuers for that purpose.
@ -72,7 +72,7 @@
// entries contain a timestamp and source location looks like this: // entries contain a timestamp and source location looks like this:
// //
// logger := log.NewLogfmtLogger(log.NewSyncWriter(os.Stdout)) // logger := log.NewLogfmtLogger(log.NewSyncWriter(os.Stdout))
// logger = log.NewContext(logger).With("ts", log.DefaultTimestampUTC, "caller", log.DefaultCaller) // logger = log.With(logger, "ts", log.DefaultTimestampUTC, "caller", log.DefaultCaller)
// //
// Concurrent Safety // Concurrent Safety
// //
@ -90,4 +90,27 @@
// handled atomically within the wrapped logger, but it typically serializes // handled atomically within the wrapped logger, but it typically serializes
// both the formatting and output logic. Use a SyncLogger if the formatting // both the formatting and output logic. Use a SyncLogger if the formatting
// logger may perform multiple writes per log event. // logger may perform multiple writes per log event.
//
// Error Handling
//
// This package relies on the practice of wrapping or decorating loggers with
// other loggers to provide composable pieces of functionality. It also means
// that Logger.Log must return an error because some
// implementations—especially those that output log data to an io.Writer—may
// encounter errors that cannot be handled locally. This in turn means that
// Loggers that wrap other loggers should return errors from the wrapped
// logger up the stack.
//
// Fortunately, the decorator pattern also provides a way to avoid the
// necessity to check for errors every time an application calls Logger.Log.
// An application required to panic whenever its Logger encounters
// an error could initialize its logger as follows.
//
// fmtlogger := log.NewLogfmtLogger(log.NewSyncWriter(os.Stdout))
// logger := log.LoggerFunc(func(keyvals ...interface{}) error {
// if err := fmtlogger.Log(keyvals...); err != nil {
// panic(err)
// }
// return nil
// })
package log package log

View file

@ -44,9 +44,6 @@ func merge(dst map[string]interface{}, k, v interface{}) {
default: default:
key = fmt.Sprint(x) key = fmt.Sprint(x)
} }
if x, ok := v.(error); ok {
v = safeError(x)
}
// We want json.Marshaler and encoding.TextMarshaller to take priority over // We want json.Marshaler and encoding.TextMarshaller to take priority over
// err.Error() and v.String(). But json.Marshall (called later) does that by // err.Error() and v.String(). But json.Marshall (called later) does that by

View file

@ -6,7 +6,7 @@ import "errors"
// log event from keyvals, a variadic sequence of alternating keys and values. // log event from keyvals, a variadic sequence of alternating keys and values.
// Implementations must be safe for concurrent use by multiple goroutines. In // Implementations must be safe for concurrent use by multiple goroutines. In
// particular, any implementation of Logger that appends to keyvals or // particular, any implementation of Logger that appends to keyvals or
// modifies any of its elements must make a copy first. // modifies or retains any of its elements must make a copy first.
type Logger interface { type Logger interface {
Log(keyvals ...interface{}) error Log(keyvals ...interface{}) error
} }
@ -15,62 +15,100 @@ type Logger interface {
// the missing value. // the missing value.
var ErrMissingValue = errors.New("(MISSING)") var ErrMissingValue = errors.New("(MISSING)")
// NewContext returns a new Context that logs to logger. // With returns a new contextual logger with keyvals prepended to those passed
func NewContext(logger Logger) *Context { // to calls to Log. If logger is also a contextual logger created by With or
if c, ok := logger.(*Context); ok { // WithPrefix, keyvals is appended to the existing context.
return c //
// The returned Logger replaces all value elements (odd indexes) containing a
// Valuer with their generated value for each call to its Log method.
func With(logger Logger, keyvals ...interface{}) Logger {
if len(keyvals) == 0 {
return logger
}
l := newContext(logger)
kvs := append(l.keyvals, keyvals...)
if len(kvs)%2 != 0 {
kvs = append(kvs, ErrMissingValue)
}
return &context{
logger: l.logger,
// Limiting the capacity of the stored keyvals ensures that a new
// backing array is created if the slice must grow in Log or With.
// Using the extra capacity without copying risks a data race that
// would violate the Logger interface contract.
keyvals: kvs[:len(kvs):len(kvs)],
hasValuer: l.hasValuer || containsValuer(keyvals),
} }
return &Context{logger: logger}
} }
// Context must always have the same number of stack frames between calls to // WithPrefix returns a new contextual logger with keyvals prepended to those
// passed to calls to Log. If logger is also a contextual logger created by
// With or WithPrefix, keyvals is prepended to the existing context.
//
// The returned Logger replaces all value elements (odd indexes) containing a
// Valuer with their generated value for each call to its Log method.
func WithPrefix(logger Logger, keyvals ...interface{}) Logger {
if len(keyvals) == 0 {
return logger
}
l := newContext(logger)
// Limiting the capacity of the stored keyvals ensures that a new
// backing array is created if the slice must grow in Log or With.
// Using the extra capacity without copying risks a data race that
// would violate the Logger interface contract.
n := len(l.keyvals) + len(keyvals)
if len(keyvals)%2 != 0 {
n++
}
kvs := make([]interface{}, 0, n)
kvs = append(kvs, keyvals...)
if len(kvs)%2 != 0 {
kvs = append(kvs, ErrMissingValue)
}
kvs = append(kvs, l.keyvals...)
return &context{
logger: l.logger,
keyvals: kvs,
hasValuer: l.hasValuer || containsValuer(keyvals),
}
}
// context is the Logger implementation returned by With and WithPrefix. It
// wraps a Logger and holds keyvals that it includes in all log events. Its
// Log method calls bindValues to generate values for each Valuer in the
// context keyvals.
//
// A context must always have the same number of stack frames between calls to
// its Log method and the eventual binding of Valuers to their value. This // its Log method and the eventual binding of Valuers to their value. This
// requirement comes from the functional requirement to allow a context to // requirement comes from the functional requirement to allow a context to
// resolve application call site information for a log.Caller stored in the // resolve application call site information for a Caller stored in the
// context. To do this we must be able to predict the number of logging // context. To do this we must be able to predict the number of logging
// functions on the stack when bindValues is called. // functions on the stack when bindValues is called.
// //
// Three implementation details provide the needed stack depth consistency. // Two implementation details provide the needed stack depth consistency.
// The first two of these details also result in better amortized performance,
// and thus make sense even without the requirements regarding stack depth.
// The third detail, however, is subtle and tied to the implementation of the
// Go compiler.
// //
// 1. NewContext avoids introducing an additional layer when asked to // 1. newContext avoids introducing an additional layer when asked to
// wrap another Context. // wrap another context.
// 2. With avoids introducing an additional layer by returning a newly // 2. With and WithPrefix avoid introducing an additional layer by
// constructed Context with a merged keyvals rather than simply // returning a newly constructed context with a merged keyvals rather
// wrapping the existing Context. // than simply wrapping the existing context.
// 3. All of Context's methods take pointer receivers even though they type context struct {
// do not mutate the Context.
//
// Before explaining the last detail, first some background. The Go compiler
// generates wrapper methods to implement the auto dereferencing behavior when
// calling a value method through a pointer variable. These wrapper methods
// are also used when calling a value method through an interface variable
// because interfaces store a pointer to the underlying concrete value.
// Calling a pointer receiver through an interface does not require generating
// an additional function.
//
// If Context had value methods then calling Context.Log through a variable
// with type Logger would have an extra stack frame compared to calling
// Context.Log through a variable with type Context. Using pointer receivers
// avoids this problem.
// A Context wraps a Logger and holds keyvals that it includes in all log
// events. When logging, a Context replaces all value elements (odd indexes)
// containing a Valuer with their generated value for each call to its Log
// method.
type Context struct {
logger Logger logger Logger
keyvals []interface{} keyvals []interface{}
hasValuer bool hasValuer bool
} }
func newContext(logger Logger) *context {
if c, ok := logger.(*context); ok {
return c
}
return &context{logger: logger}
}
// Log replaces all value elements (odd indexes) containing a Valuer in the // Log replaces all value elements (odd indexes) containing a Valuer in the
// stored context with their generated value, appends keyvals, and passes the // stored context with their generated value, appends keyvals, and passes the
// result to the wrapped Logger. // result to the wrapped Logger.
func (l *Context) Log(keyvals ...interface{}) error { func (l *context) Log(keyvals ...interface{}) error {
kvs := append(l.keyvals, keyvals...) kvs := append(l.keyvals, keyvals...)
if len(kvs)%2 != 0 { if len(kvs)%2 != 0 {
kvs = append(kvs, ErrMissingValue) kvs = append(kvs, ErrMissingValue)
@ -86,53 +124,6 @@ func (l *Context) Log(keyvals ...interface{}) error {
return l.logger.Log(kvs...) return l.logger.Log(kvs...)
} }
// With returns a new Context with keyvals appended to those of the receiver.
func (l *Context) With(keyvals ...interface{}) *Context {
if len(keyvals) == 0 {
return l
}
kvs := append(l.keyvals, keyvals...)
if len(kvs)%2 != 0 {
kvs = append(kvs, ErrMissingValue)
}
return &Context{
logger: l.logger,
// Limiting the capacity of the stored keyvals ensures that a new
// backing array is created if the slice must grow in Log or With.
// Using the extra capacity without copying risks a data race that
// would violate the Logger interface contract.
keyvals: kvs[:len(kvs):len(kvs)],
hasValuer: l.hasValuer || containsValuer(keyvals),
}
}
// WithPrefix returns a new Context with keyvals prepended to those of the
// receiver.
func (l *Context) WithPrefix(keyvals ...interface{}) *Context {
if len(keyvals) == 0 {
return l
}
// Limiting the capacity of the stored keyvals ensures that a new
// backing array is created if the slice must grow in Log or With.
// Using the extra capacity without copying risks a data race that
// would violate the Logger interface contract.
n := len(l.keyvals) + len(keyvals)
if len(keyvals)%2 != 0 {
n++
}
kvs := make([]interface{}, 0, n)
kvs = append(kvs, keyvals...)
if len(kvs)%2 != 0 {
kvs = append(kvs, ErrMissingValue)
}
kvs = append(kvs, l.keyvals...)
return &Context{
logger: l.logger,
keyvals: kvs,
hasValuer: l.hasValuer || containsValuer(keyvals),
}
}
// LoggerFunc is an adapter to allow use of ordinary functions as Loggers. If // LoggerFunc is an adapter to allow use of ordinary functions as Loggers. If
// f is a function with the appropriate signature, LoggerFunc(f) is a Logger // f is a function with the appropriate signature, LoggerFunc(f) is a Logger
// object that calls f. // object that calls f.

View file

@ -39,7 +39,7 @@ func TimestampKey(key string) StdlibAdapterOption {
return func(a *StdlibAdapter) { a.timestampKey = key } return func(a *StdlibAdapter) { a.timestampKey = key }
} }
// FileKey sets the key for the file and line field. By default, it's "file". // FileKey sets the key for the file and line field. By default, it's "caller".
func FileKey(key string) StdlibAdapterOption { func FileKey(key string) StdlibAdapterOption {
return func(a *StdlibAdapter) { a.fileKey = key } return func(a *StdlibAdapter) { a.fileKey = key }
} }
@ -55,7 +55,7 @@ func NewStdlibAdapter(logger Logger, options ...StdlibAdapterOption) io.Writer {
a := StdlibAdapter{ a := StdlibAdapter{
Logger: logger, Logger: logger,
timestampKey: "ts", timestampKey: "ts",
fileKey: "file", fileKey: "caller",
messageKey: "msg", messageKey: "msg",
} }
for _, option := range options { for _, option := range options {

View file

@ -36,24 +36,59 @@ func (l *SwapLogger) Swap(logger Logger) {
l.logger.Store(loggerStruct{logger}) l.logger.Store(loggerStruct{logger})
} }
// SyncWriter synchronizes concurrent writes to an io.Writer. // NewSyncWriter returns a new writer that is safe for concurrent use by
type SyncWriter struct { // multiple goroutines. Writes to the returned writer are passed on to w. If
mu sync.Mutex // another write is already in progress, the calling goroutine blocks until
w io.Writer // the writer is available.
//
// If w implements the following interface, so does the returned writer.
//
// interface {
// Fd() uintptr
// }
func NewSyncWriter(w io.Writer) io.Writer {
switch w := w.(type) {
case fdWriter:
return &fdSyncWriter{fdWriter: w}
default:
return &syncWriter{Writer: w}
}
} }
// NewSyncWriter returns a new SyncWriter. The returned writer is safe for // syncWriter synchronizes concurrent writes to an io.Writer.
// concurrent use by multiple goroutines. type syncWriter struct {
func NewSyncWriter(w io.Writer) *SyncWriter { sync.Mutex
return &SyncWriter{w: w} io.Writer
} }
// Write writes p to the underlying io.Writer. If another write is already in // Write writes p to the underlying io.Writer. If another write is already in
// progress, the calling goroutine blocks until the SyncWriter is available. // progress, the calling goroutine blocks until the syncWriter is available.
func (w *SyncWriter) Write(p []byte) (n int, err error) { func (w *syncWriter) Write(p []byte) (n int, err error) {
w.mu.Lock() w.Lock()
n, err = w.w.Write(p) n, err = w.Writer.Write(p)
w.mu.Unlock() w.Unlock()
return n, err
}
// fdWriter is an io.Writer that also has an Fd method. The most common
// example of an fdWriter is an *os.File.
type fdWriter interface {
io.Writer
Fd() uintptr
}
// fdSyncWriter synchronizes concurrent writes to an fdWriter.
type fdSyncWriter struct {
sync.Mutex
fdWriter
}
// Write writes p to the underlying io.Writer. If another write is already in
// progress, the calling goroutine blocks until the fdSyncWriter is available.
func (w *fdSyncWriter) Write(p []byte) (n int, err error) {
w.Lock()
n, err = w.fdWriter.Write(p)
w.Unlock()
return n, err return n, err
} }

View file

@ -6,9 +6,9 @@ import (
"github.com/go-stack/stack" "github.com/go-stack/stack"
) )
// A Valuer generates a log value. When passed to Context.With in a value // A Valuer generates a log value. When passed to With or WithPrefix in a
// element (odd indexes), it represents a dynamic value which is re-evaluated // value element (odd indexes), it represents a dynamic value which is re-
// with each log event. // evaluated with each log event.
type Valuer func() interface{} type Valuer func() interface{}
// bindValues replaces all value elements (odd indexes) containing a Valuer // bindValues replaces all value elements (odd indexes) containing a Valuer
@ -32,22 +32,51 @@ func containsValuer(keyvals []interface{}) bool {
return false return false
} }
// Timestamp returns a Valuer that invokes the underlying function when bound, // Timestamp returns a timestamp Valuer. It invokes the t function to get the
// returning a time.Time. Users will probably want to use DefaultTimestamp or // time; unless you are doing something tricky, pass time.Now.
// DefaultTimestampUTC. //
// Most users will want to use DefaultTimestamp or DefaultTimestampUTC, which
// are TimestampFormats that use the RFC3339Nano format.
func Timestamp(t func() time.Time) Valuer { func Timestamp(t func() time.Time) Valuer {
return func() interface{} { return t() } return func() interface{} { return t() }
} }
var ( // TimestampFormat returns a timestamp Valuer with a custom time format. It
// DefaultTimestamp is a Valuer that returns the current wallclock time, // invokes the t function to get the time to format; unless you are doing
// respecting time zones, when bound. // something tricky, pass time.Now. The layout string is passed to
DefaultTimestamp Valuer = func() interface{} { return time.Now().Format(time.RFC3339) } // Time.Format.
//
// Most users will want to use DefaultTimestamp or DefaultTimestampUTC, which
// are TimestampFormats that use the RFC3339Nano format.
func TimestampFormat(t func() time.Time, layout string) Valuer {
return func() interface{} {
return timeFormat{
time: t(),
layout: layout,
}
}
}
// DefaultTimestampUTC is a Valuer that returns the current time in UTC // A timeFormat represents an instant in time and a layout used when
// when bound. // marshaling to a text format.
DefaultTimestampUTC Valuer = func() interface{} { return time.Now().UTC().Format(time.RFC3339) } type timeFormat struct {
) time time.Time
layout string
}
func (tf timeFormat) String() string {
return tf.time.Format(tf.layout)
}
// MarshalText implements encoding.TextMarshaller.
func (tf timeFormat) MarshalText() (text []byte, err error) {
// The following code adapted from the standard library time.Time.Format
// method. Using the same undocumented magic constant to extend the size
// of the buffer as seen there.
b := make([]byte, 0, len(tf.layout)+10)
b = tf.time.AppendFormat(b, tf.layout)
return b, nil
}
// Caller returns a Valuer that returns a file and line from a specified depth // Caller returns a Valuer that returns a file and line from a specified depth
// in the callstack. Users will probably want to use DefaultCaller. // in the callstack. Users will probably want to use DefaultCaller.
@ -56,6 +85,17 @@ func Caller(depth int) Valuer {
} }
var ( var (
// DefaultTimestamp is a Valuer that returns the current wallclock time,
// respecting time zones, when bound.
DefaultTimestamp = TimestampFormat(time.Now, time.RFC3339Nano)
// DefaultTimestampUTC is a Valuer that returns the current time in UTC
// when bound.
DefaultTimestampUTC = TimestampFormat(
func() time.Time { return time.Now().UTC() },
time.RFC3339Nano,
)
// DefaultCaller is a Valuer that returns the file and line where the Log // DefaultCaller is a Valuer that returns the file and line where the Log
// method was invoked. It can only be used with log.With. // method was invoked. It can only be used with log.With.
DefaultCaller = Caller(3) DefaultCaller = Caller(3)

View file

@ -1,23 +1,54 @@
// Package metrics provides a framework for application instrumentation. All // Package metrics provides a framework for application instrumentation. It's
// metrics are safe for concurrent use. Considerable design influence has been // primarily designed to help you get started with good and robust
// taken from https://github.com/codahale/metrics and https://prometheus.io. // instrumentation, and to help you migrate from a less-capable system like
// Graphite to a more-capable system like Prometheus. If your organization has
// already standardized on an instrumentation system like Prometheus, and has no
// plans to change, it may make sense to use that system's instrumentation
// library directly.
// //
// This package contains the common interfaces. Your code should take these // This package provides three core metric abstractions (Counter, Gauge, and
// interfaces as parameters. Implementations are provided for different // Histogram) and implementations for almost all common instrumentation
// instrumentation systems in the various subdirectories. // backends. Each metric has an observation method (Add, Set, or Observe,
// respectively) used to record values, and a With method to "scope" the
// observation by various parameters. For example, you might have a Histogram to
// record request durations, parameterized by the method that's being called.
//
// var requestDuration metrics.Histogram
// // ...
// requestDuration.With("method", "MyMethod").Observe(time.Since(begin))
//
// This allows a single high-level metrics object (requestDuration) to work with
// many code paths somewhat dynamically. The concept of With is fully supported
// in some backends like Prometheus, and not supported in other backends like
// Graphite. So, With may be a no-op, depending on the concrete implementation
// you choose. Please check the implementation to know for sure. For
// implementations that don't provide With, it's necessary to fully parameterize
// each metric in the metric name, e.g.
//
// // Statsd
// c := statsd.NewCounter("request_duration_MyMethod_200")
// c.Add(1)
//
// // Prometheus
// c := prometheus.NewCounter(stdprometheus.CounterOpts{
// Name: "request_duration",
// ...
// }, []string{"method", "status_code"})
// c.With("method", "MyMethod", "status_code", strconv.Itoa(code)).Add(1)
// //
// Usage // Usage
// //
// Metrics are dependencies and should be passed to the components that need // Metrics are dependencies, and should be passed to the components that need
// them in the same way you'd construct and pass a database handle, or reference // them in the same way you'd construct and pass a database handle, or reference
// to another component. So, create metrics in your func main, using whichever // to another component. Metrics should *not* be created in the global scope.
// concrete implementation is appropriate for your organization. // Instead, instantiate metrics in your func main, using whichever concrete
// implementation is appropriate for your organization.
// //
// latency := prometheus.NewSummaryFrom(stdprometheus.SummaryOpts{ // latency := prometheus.NewSummaryFrom(stdprometheus.SummaryOpts{
// Namespace: "myteam", // Namespace: "myteam",
// Subsystem: "foosvc", // Subsystem: "foosvc",
// Name: "request_latency_seconds", // Name: "request_latency_seconds",
// Help: "Incoming request latency in seconds." // Help: "Incoming request latency in seconds.",
// }, []string{"method", "status_code"}) // }, []string{"method", "status_code"})
// //
// Write your components to take the metrics they will use as parameters to // Write your components to take the metrics they will use as parameters to
@ -40,8 +71,14 @@
// api := NewAPI(store, logger, latency) // api := NewAPI(store, logger, latency)
// http.ListenAndServe("/", api) // http.ListenAndServe("/", api)
// //
// Note that metrics are "write-only" interfaces.
//
// Implementation details // Implementation details
// //
// All metrics are safe for concurrent use. Considerable design influence has
// been taken from https://github.com/codahale/metrics and
// https://prometheus.io.
//
// Each telemetry system has different semantics for label values, push vs. // Each telemetry system has different semantics for label values, push vs.
// pull, support for histograms, etc. These properties influence the design of // pull, support for histograms, etc. These properties influence the design of
// their respective packages. This table attempts to summarize the key points of // their respective packages. This table attempts to summarize the key points of
@ -54,7 +91,7 @@
// expvar 1 atomic atomic synthetic, batch, in-place expose // expvar 1 atomic atomic synthetic, batch, in-place expose
// influx n custom custom custom // influx n custom custom custom
// prometheus n native native native // prometheus n native native native
// circonus 1 native native native
// pcp 1 native native native // pcp 1 native native native
// cloudwatch n batch push-aggregate batch push-aggregate synthetic, batch, push-aggregate
// //
package metrics package metrics

View file

@ -14,10 +14,13 @@ import (
"fmt" "fmt"
"io" "io"
"strings" "strings"
"sync"
"sync/atomic"
"time" "time"
"github.com/go-kit/kit/log" "github.com/go-kit/kit/log"
"github.com/go-kit/kit/metrics" "github.com/go-kit/kit/metrics"
"github.com/go-kit/kit/metrics/generic"
"github.com/go-kit/kit/metrics/internal/lv" "github.com/go-kit/kit/metrics/internal/lv"
"github.com/go-kit/kit/metrics/internal/ratemap" "github.com/go-kit/kit/metrics/internal/ratemap"
"github.com/go-kit/kit/util/conn" "github.com/go-kit/kit/util/conn"
@ -34,53 +37,63 @@ import (
// To regularly report metrics to an io.Writer, use the WriteLoop helper method. // To regularly report metrics to an io.Writer, use the WriteLoop helper method.
// To send to a DogStatsD server, use the SendLoop helper method. // To send to a DogStatsD server, use the SendLoop helper method.
type Dogstatsd struct { type Dogstatsd struct {
mtx sync.RWMutex
prefix string prefix string
rates *ratemap.RateMap rates *ratemap.RateMap
counters *lv.Space counters *lv.Space
gauges *lv.Space gauges map[string]*gaugeNode
timings *lv.Space timings *lv.Space
histograms *lv.Space histograms *lv.Space
logger log.Logger logger log.Logger
lvs lv.LabelValues
} }
// New returns a Dogstatsd object that may be used to create metrics. Prefix is // New returns a Dogstatsd object that may be used to create metrics. Prefix is
// applied to all created metrics. Callers must ensure that regular calls to // applied to all created metrics. Callers must ensure that regular calls to
// WriteTo are performed, either manually or with one of the helper methods. // WriteTo are performed, either manually or with one of the helper methods.
func New(prefix string, logger log.Logger) *Dogstatsd { func New(prefix string, logger log.Logger, lvs ...string) *Dogstatsd {
if len(lvs)%2 != 0 {
panic("odd number of LabelValues; programmer error!")
}
return &Dogstatsd{ return &Dogstatsd{
prefix: prefix, prefix: prefix,
rates: ratemap.New(), rates: ratemap.New(),
counters: lv.NewSpace(), counters: lv.NewSpace(),
gauges: lv.NewSpace(), gauges: map[string]*gaugeNode{},
timings: lv.NewSpace(), timings: lv.NewSpace(),
histograms: lv.NewSpace(), histograms: lv.NewSpace(),
logger: logger, logger: logger,
lvs: lvs,
} }
} }
// NewCounter returns a counter, sending observations to this Dogstatsd object. // NewCounter returns a counter, sending observations to this Dogstatsd object.
func (d *Dogstatsd) NewCounter(name string, sampleRate float64) *Counter { func (d *Dogstatsd) NewCounter(name string, sampleRate float64) *Counter {
d.rates.Set(d.prefix+name, sampleRate) d.rates.Set(name, sampleRate)
return &Counter{ return &Counter{
name: d.prefix + name, name: name,
obs: d.counters.Observe, obs: d.counters.Observe,
} }
} }
// NewGauge returns a gauge, sending observations to this Dogstatsd object. // NewGauge returns a gauge, sending observations to this Dogstatsd object.
func (d *Dogstatsd) NewGauge(name string) *Gauge { func (d *Dogstatsd) NewGauge(name string) *Gauge {
return &Gauge{ d.mtx.Lock()
name: d.prefix + name, n, ok := d.gauges[name]
obs: d.gauges.Observe, if !ok {
n = &gaugeNode{gauge: &Gauge{g: generic.NewGauge(name), ddog: d}}
d.gauges[name] = n
} }
d.mtx.Unlock()
return n.gauge
} }
// NewTiming returns a histogram whose observations are interpreted as // NewTiming returns a histogram whose observations are interpreted as
// millisecond durations, and are forwarded to this Dogstatsd object. // millisecond durations, and are forwarded to this Dogstatsd object.
func (d *Dogstatsd) NewTiming(name string, sampleRate float64) *Timing { func (d *Dogstatsd) NewTiming(name string, sampleRate float64) *Timing {
d.rates.Set(d.prefix+name, sampleRate) d.rates.Set(name, sampleRate)
return &Timing{ return &Timing{
name: d.prefix + name, name: name,
obs: d.timings.Observe, obs: d.timings.Observe,
} }
} }
@ -88,9 +101,9 @@ func (d *Dogstatsd) NewTiming(name string, sampleRate float64) *Timing {
// NewHistogram returns a histogram whose observations are of an unspecified // NewHistogram returns a histogram whose observations are of an unspecified
// unit, and are forwarded to this Dogstatsd object. // unit, and are forwarded to this Dogstatsd object.
func (d *Dogstatsd) NewHistogram(name string, sampleRate float64) *Histogram { func (d *Dogstatsd) NewHistogram(name string, sampleRate float64) *Histogram {
d.rates.Set(d.prefix+name, sampleRate) d.rates.Set(name, sampleRate)
return &Histogram{ return &Histogram{
name: d.prefix + name, name: name,
obs: d.histograms.Observe, obs: d.histograms.Observe,
} }
} }
@ -124,7 +137,7 @@ func (d *Dogstatsd) WriteTo(w io.Writer) (count int64, err error) {
var n int var n int
d.counters.Reset().Walk(func(name string, lvs lv.LabelValues, values []float64) bool { d.counters.Reset().Walk(func(name string, lvs lv.LabelValues, values []float64) bool {
n, err = fmt.Fprintf(w, "%s:%f|c%s%s\n", name, sum(values), sampling(d.rates.Get(name)), tagValues(lvs)) n, err = fmt.Fprintf(w, "%s%s:%f|c%s%s\n", d.prefix, name, sum(values), sampling(d.rates.Get(name)), d.tagValues(lvs))
if err != nil { if err != nil {
return false return false
} }
@ -135,22 +148,23 @@ func (d *Dogstatsd) WriteTo(w io.Writer) (count int64, err error) {
return count, err return count, err
} }
d.gauges.Reset().Walk(func(name string, lvs lv.LabelValues, values []float64) bool { d.mtx.RLock()
n, err = fmt.Fprintf(w, "%s:%f|g%s\n", name, last(values), tagValues(lvs)) for _, root := range d.gauges {
if err != nil { root.walk(func(name string, lvs lv.LabelValues, value float64) bool {
return false n, err = fmt.Fprintf(w, "%s%s:%f|g%s\n", d.prefix, name, value, d.tagValues(lvs))
} if err != nil {
count += int64(n) return false
return true }
}) count += int64(n)
if err != nil { return true
return count, err })
} }
d.mtx.RUnlock()
d.timings.Reset().Walk(func(name string, lvs lv.LabelValues, values []float64) bool { d.timings.Reset().Walk(func(name string, lvs lv.LabelValues, values []float64) bool {
sampleRate := d.rates.Get(name) sampleRate := d.rates.Get(name)
for _, value := range values { for _, value := range values {
n, err = fmt.Fprintf(w, "%s:%f|ms%s%s\n", name, value, sampling(sampleRate), tagValues(lvs)) n, err = fmt.Fprintf(w, "%s%s:%f|ms%s%s\n", d.prefix, name, value, sampling(sampleRate), d.tagValues(lvs))
if err != nil { if err != nil {
return false return false
} }
@ -165,7 +179,7 @@ func (d *Dogstatsd) WriteTo(w io.Writer) (count int64, err error) {
d.histograms.Reset().Walk(func(name string, lvs lv.LabelValues, values []float64) bool { d.histograms.Reset().Walk(func(name string, lvs lv.LabelValues, values []float64) bool {
sampleRate := d.rates.Get(name) sampleRate := d.rates.Get(name)
for _, value := range values { for _, value := range values {
n, err = fmt.Fprintf(w, "%s:%f|h%s%s\n", name, value, sampling(sampleRate), tagValues(lvs)) n, err = fmt.Fprintf(w, "%s%s:%f|h%s%s\n", d.prefix, name, value, sampling(sampleRate), d.tagValues(lvs))
if err != nil { if err != nil {
return false return false
} }
@ -200,14 +214,17 @@ func sampling(r float64) string {
return sv return sv
} }
func tagValues(labelValues []string) string { func (d *Dogstatsd) tagValues(labelValues []string) string {
if len(labelValues) == 0 { if len(labelValues) == 0 && len(d.lvs) == 0 {
return "" return ""
} }
if len(labelValues)%2 != 0 { if len(labelValues)%2 != 0 {
panic("tagValues received a labelValues with an odd number of strings") panic("tagValues received a labelValues with an odd number of strings")
} }
pairs := make([]string, 0, len(labelValues)/2) pairs := make([]string, 0, (len(d.lvs)+len(labelValues))/2)
for i := 0; i < len(d.lvs); i += 2 {
pairs = append(pairs, d.lvs[i]+":"+d.lvs[i+1])
}
for i := 0; i < len(labelValues); i += 2 { for i := 0; i < len(labelValues); i += 2 {
pairs = append(pairs, labelValues[i]+":"+labelValues[i+1]) pairs = append(pairs, labelValues[i]+":"+labelValues[i+1])
} }
@ -241,23 +258,31 @@ func (c *Counter) Add(delta float64) {
// Gauge is a DogStatsD gauge. Observations are forwarded to a Dogstatsd // Gauge is a DogStatsD gauge. Observations are forwarded to a Dogstatsd
// object, and aggregated (the last observation selected) per timeseries. // object, and aggregated (the last observation selected) per timeseries.
type Gauge struct { type Gauge struct {
name string g *generic.Gauge
lvs lv.LabelValues ddog *Dogstatsd
obs observeFunc set int32
} }
// With implements metrics.Gauge. // With implements metrics.Gauge.
func (g *Gauge) With(labelValues ...string) metrics.Gauge { func (g *Gauge) With(labelValues ...string) metrics.Gauge {
return &Gauge{ g.ddog.mtx.RLock()
name: g.name, node := g.ddog.gauges[g.g.Name]
lvs: g.lvs.With(labelValues...), g.ddog.mtx.RUnlock()
obs: g.obs,
} ga := &Gauge{g: g.g.With(labelValues...).(*generic.Gauge), ddog: g.ddog}
return node.addGauge(ga, ga.g.LabelValues())
} }
// Set implements metrics.Gauge. // Set implements metrics.Gauge.
func (g *Gauge) Set(value float64) { func (g *Gauge) Set(value float64) {
g.obs(g.name, g.lvs, value) g.g.Set(value)
g.touch()
}
// Add implements metrics.Gauge.
func (g *Gauge) Add(delta float64) {
g.g.Add(delta)
g.touch()
} }
// Timing is a DogStatsD timing, or metrics.Histogram. Observations are // Timing is a DogStatsD timing, or metrics.Histogram. Observations are
@ -304,3 +329,61 @@ func (h *Histogram) With(labelValues ...string) metrics.Histogram {
func (h *Histogram) Observe(value float64) { func (h *Histogram) Observe(value float64) {
h.obs(h.name, h.lvs, value) h.obs(h.name, h.lvs, value)
} }
type pair struct{ label, value string }
type gaugeNode struct {
mtx sync.RWMutex
gauge *Gauge
children map[pair]*gaugeNode
}
func (n *gaugeNode) addGauge(g *Gauge, lvs lv.LabelValues) *Gauge {
n.mtx.Lock()
defer n.mtx.Unlock()
if len(lvs) == 0 {
if n.gauge == nil {
n.gauge = g
}
return n.gauge
}
if len(lvs) < 2 {
panic("too few LabelValues; programmer error!")
}
head, tail := pair{lvs[0], lvs[1]}, lvs[2:]
if n.children == nil {
n.children = map[pair]*gaugeNode{}
}
child, ok := n.children[head]
if !ok {
child = &gaugeNode{}
n.children[head] = child
}
return child.addGauge(g, tail)
}
func (n *gaugeNode) walk(fn func(string, lv.LabelValues, float64) bool) bool {
n.mtx.RLock()
defer n.mtx.RUnlock()
if n.gauge != nil {
value, ok := n.gauge.read()
if ok && !fn(n.gauge.g.Name, n.gauge.g.LabelValues(), value) {
return false
}
}
for _, child := range n.children {
if !child.walk(fn) {
return false
}
}
return true
}
func (g *Gauge) touch() {
atomic.StoreInt32(&(g.set), 1)
}
func (g *Gauge) read() (float64, bool) {
set := atomic.SwapInt32(&(g.set), 0)
return g.g.Value(), set != 0
}

View file

@ -33,6 +33,7 @@ func NewCounter(name string) *Counter {
// With implements Counter. // With implements Counter.
func (c *Counter) With(labelValues ...string) metrics.Counter { func (c *Counter) With(labelValues ...string) metrics.Counter {
return &Counter{ return &Counter{
Name: c.Name,
bits: atomic.LoadUint64(&c.bits), bits: atomic.LoadUint64(&c.bits),
lvs: c.lvs.With(labelValues...), lvs: c.lvs.With(labelValues...),
} }
@ -95,6 +96,7 @@ func NewGauge(name string) *Gauge {
// With implements Gauge. // With implements Gauge.
func (g *Gauge) With(labelValues ...string) metrics.Gauge { func (g *Gauge) With(labelValues ...string) metrics.Gauge {
return &Gauge{ return &Gauge{
Name: g.Name,
bits: atomic.LoadUint64(&g.bits), bits: atomic.LoadUint64(&g.bits),
lvs: g.lvs.With(labelValues...), lvs: g.lvs.With(labelValues...),
} }
@ -105,6 +107,20 @@ func (g *Gauge) Set(value float64) {
atomic.StoreUint64(&g.bits, math.Float64bits(value)) atomic.StoreUint64(&g.bits, math.Float64bits(value))
} }
// Add implements metrics.Gauge.
func (g *Gauge) Add(delta float64) {
for {
var (
old = atomic.LoadUint64(&g.bits)
newf = math.Float64frombits(old) + delta
new = math.Float64bits(newf)
)
if atomic.CompareAndSwapUint64(&g.bits, old, new) {
break
}
}
}
// Value returns the current value of the gauge. // Value returns the current value of the gauge.
func (g *Gauge) Value() float64 { func (g *Gauge) Value() float64 {
return math.Float64frombits(atomic.LoadUint64(&g.bits)) return math.Float64frombits(atomic.LoadUint64(&g.bits))
@ -121,7 +137,7 @@ func (g *Gauge) LabelValues() []string {
type Histogram struct { type Histogram struct {
Name string Name string
lvs lv.LabelValues lvs lv.LabelValues
h gohistogram.Histogram h *safeHistogram
} }
// NewHistogram returns a numeric histogram based on VividCortex/gohistogram. A // NewHistogram returns a numeric histogram based on VividCortex/gohistogram. A
@ -129,25 +145,30 @@ type Histogram struct {
func NewHistogram(name string, buckets int) *Histogram { func NewHistogram(name string, buckets int) *Histogram {
return &Histogram{ return &Histogram{
Name: name, Name: name,
h: gohistogram.NewHistogram(buckets), h: &safeHistogram{Histogram: gohistogram.NewHistogram(buckets)},
} }
} }
// With implements Histogram. // With implements Histogram.
func (h *Histogram) With(labelValues ...string) metrics.Histogram { func (h *Histogram) With(labelValues ...string) metrics.Histogram {
return &Histogram{ return &Histogram{
lvs: h.lvs.With(labelValues...), Name: h.Name,
h: h.h, lvs: h.lvs.With(labelValues...),
h: h.h,
} }
} }
// Observe implements Histogram. // Observe implements Histogram.
func (h *Histogram) Observe(value float64) { func (h *Histogram) Observe(value float64) {
h.h.Lock()
defer h.h.Unlock()
h.h.Add(value) h.h.Add(value)
} }
// Quantile returns the value of the quantile q, 0.0 < q < 1.0. // Quantile returns the value of the quantile q, 0.0 < q < 1.0.
func (h *Histogram) Quantile(q float64) float64 { func (h *Histogram) Quantile(q float64) float64 {
h.h.RLock()
defer h.h.RUnlock()
return h.h.Quantile(q) return h.h.Quantile(q)
} }
@ -159,9 +180,17 @@ func (h *Histogram) LabelValues() []string {
// Print writes a string representation of the histogram to the passed writer. // Print writes a string representation of the histogram to the passed writer.
// Useful for printing to a terminal. // Useful for printing to a terminal.
func (h *Histogram) Print(w io.Writer) { func (h *Histogram) Print(w io.Writer) {
h.h.RLock()
defer h.h.RUnlock()
fmt.Fprintf(w, h.h.String()) fmt.Fprintf(w, h.h.String())
} }
// safeHistogram exists as gohistogram.Histogram is not goroutine-safe.
type safeHistogram struct {
sync.RWMutex
gohistogram.Histogram
}
// Bucket is a range in a histogram which aggregates observations. // Bucket is a range in a histogram which aggregates observations.
type Bucket struct { type Bucket struct {
From, To, Count int64 From, To, Count int64

View file

@ -66,6 +66,7 @@ func (in *Influx) NewGauge(name string) *Gauge {
return &Gauge{ return &Gauge{
name: name, name: name,
obs: in.gauges.Observe, obs: in.gauges.Observe,
add: in.gauges.Add,
} }
} }
@ -168,10 +169,14 @@ func mergeTags(tags map[string]string, labelValues []string) map[string]string {
if len(labelValues)%2 != 0 { if len(labelValues)%2 != 0 {
panic("mergeTags received a labelValues with an odd number of strings") panic("mergeTags received a labelValues with an odd number of strings")
} }
for i := 0; i < len(labelValues); i += 2 { ret := make(map[string]string, len(tags)+len(labelValues)/2)
tags[labelValues[i]] = labelValues[i+1] for k, v := range tags {
ret[k] = v
} }
return tags for i := 0; i < len(labelValues); i += 2 {
ret[labelValues[i]] = labelValues[i+1]
}
return ret
} }
func sum(a []float64) float64 { func sum(a []float64) float64 {
@ -216,6 +221,7 @@ type Gauge struct {
name string name string
lvs lv.LabelValues lvs lv.LabelValues
obs observeFunc obs observeFunc
add observeFunc
} }
// With implements metrics.Gauge. // With implements metrics.Gauge.
@ -224,6 +230,7 @@ func (g *Gauge) With(labelValues ...string) metrics.Gauge {
name: g.name, name: g.name,
lvs: g.lvs.With(labelValues...), lvs: g.lvs.With(labelValues...),
obs: g.obs, obs: g.obs,
add: g.add,
} }
} }
@ -232,6 +239,11 @@ func (g *Gauge) Set(value float64) {
g.obs(g.name, g.lvs, value) g.obs(g.name, g.lvs, value)
} }
// Add implements metrics.Gauge.
func (g *Gauge) Add(delta float64) {
g.add(g.name, g.lvs, delta)
}
// Histogram is an Influx histrogram. Observations are aggregated into a // Histogram is an Influx histrogram. Observations are aggregated into a
// generic.Histogram and emitted as per-quantile gauges to the Influx server. // generic.Histogram and emitted as per-quantile gauges to the Influx server.
type Histogram struct { type Histogram struct {

View file

@ -21,6 +21,13 @@ func (s *Space) Observe(name string, lvs LabelValues, value float64) {
s.nodeFor(name).observe(lvs, value) s.nodeFor(name).observe(lvs, value)
} }
// Add locates the time series identified by the name and label values in
// the vector space, and appends the delta to the last value in the list of
// observations.
func (s *Space) Add(name string, lvs LabelValues, delta float64) {
s.nodeFor(name).add(lvs, delta)
}
// Walk traverses the vector space and invokes fn for each non-empty time series // Walk traverses the vector space and invokes fn for each non-empty time series
// which is encountered. Return false to abort the traversal. // which is encountered. Return false to abort the traversal.
func (s *Space) Walk(fn func(name string, lvs LabelValues, observations []float64) bool) { func (s *Space) Walk(fn func(name string, lvs LabelValues, observations []float64) bool) {
@ -91,6 +98,34 @@ func (n *node) observe(lvs LabelValues, value float64) {
child.observe(tail, value) child.observe(tail, value)
} }
func (n *node) add(lvs LabelValues, delta float64) {
n.mtx.Lock()
defer n.mtx.Unlock()
if len(lvs) == 0 {
var value float64
if len(n.observations) > 0 {
value = last(n.observations) + delta
} else {
value = delta
}
n.observations = append(n.observations, value)
return
}
if len(lvs) < 2 {
panic("too few LabelValues; programmer error!")
}
head, tail := pair{lvs[0], lvs[1]}, lvs[2:]
if n.children == nil {
n.children = map[pair]*node{}
}
child, ok := n.children[head]
if !ok {
child = &node{}
n.children[head] = child
}
child.add(tail, delta)
}
func (n *node) walk(lvs LabelValues, fn func(LabelValues, []float64) bool) bool { func (n *node) walk(lvs LabelValues, fn func(LabelValues, []float64) bool) bool {
n.mtx.RLock() n.mtx.RLock()
defer n.mtx.RUnlock() defer n.mtx.RUnlock()
@ -104,3 +139,7 @@ func (n *node) walk(lvs LabelValues, fn func(LabelValues, []float64) bool) bool
} }
return true return true
} }
func last(a []float64) float64 {
return a[len(a)-1]
}

View file

@ -12,6 +12,7 @@ type Counter interface {
type Gauge interface { type Gauge interface {
With(labelValues ...string) Gauge With(labelValues ...string) Gauge
Set(value float64) Set(value float64)
Add(delta float64)
} }
// Histogram describes a metric that takes repeated observations of the same // Histogram describes a metric that takes repeated observations of the same

View file

@ -54,6 +54,13 @@ func (g Gauge) With(labelValues ...string) metrics.Gauge {
return next return next
} }
// Add implements metrics.Gauge.
func (g Gauge) Add(delta float64) {
for _, gauge := range g {
gauge.Add(delta)
}
}
// Histogram collects multiple individual histograms and treats them as a unit. // Histogram collects multiple individual histograms and treats them as a unit.
type Histogram []metrics.Histogram type Histogram []metrics.Histogram

View file

@ -74,6 +74,7 @@ func (s *Statsd) NewGauge(name string) *Gauge {
return &Gauge{ return &Gauge{
name: s.prefix + name, name: s.prefix + name,
obs: s.gauges.Observe, obs: s.gauges.Observe,
add: s.gauges.Add,
} }
} }
@ -201,6 +202,7 @@ func (c *Counter) Add(delta float64) {
type Gauge struct { type Gauge struct {
name string name string
obs observeFunc obs observeFunc
add observeFunc
} }
// With is a no-op. // With is a no-op.
@ -213,6 +215,11 @@ func (g *Gauge) Set(value float64) {
g.obs(g.name, lv.LabelValues{}, value) g.obs(g.name, lv.LabelValues{}, value)
} }
// Add implements metrics.Gauge.
func (g *Gauge) Add(delta float64) {
g.add(g.name, lv.LabelValues{}, delta)
}
// Timing is a StatsD timing, or metrics.Histogram. Observations are // Timing is a StatsD timing, or metrics.Histogram. Observations are
// forwarded to a Statsd object, and collected (but not aggregated) per // forwarded to a Statsd object, and collected (but not aggregated) per
// timeseries. // timeseries.

View file

@ -7,6 +7,7 @@ import "time"
type Timer struct { type Timer struct {
h Histogram h Histogram
t time.Time t time.Time
u time.Duration
} }
// NewTimer wraps the given histogram and records the current time. // NewTimer wraps the given histogram and records the current time.
@ -14,15 +15,22 @@ func NewTimer(h Histogram) *Timer {
return &Timer{ return &Timer{
h: h, h: h,
t: time.Now(), t: time.Now(),
u: time.Second,
} }
} }
// ObserveDuration captures the number of seconds since the timer was // ObserveDuration captures the number of seconds since the timer was
// constructed, and forwards that observation to the histogram. // constructed, and forwards that observation to the histogram.
func (t *Timer) ObserveDuration() { func (t *Timer) ObserveDuration() {
d := time.Since(t.t).Seconds() d := float64(time.Since(t.t).Nanoseconds()) / float64(t.u)
if d < 0 { if d < 0 {
d = 0 d = 0
} }
t.h.Observe(d) t.h.Observe(d)
} }
// Unit sets the unit of the float64 emitted by the timer.
// By default, the timer emits seconds.
func (t *Timer) Unit(u time.Duration) {
t.u = u
}