DataDog and StatsD Metrics Support

* Added support for DataDog and StatsD monitoring
* Added documentation
This commit is contained in:
Alex Antonov 2017-07-20 17:26:43 -05:00 committed by Ludovic Fernandez
parent cd28e7b24f
commit 69c628b626
39 changed files with 3921 additions and 13 deletions

View file

@ -648,6 +648,16 @@ address = ":8080"
# [web.metrics.prometheus]
# Buckets=[0.1,0.3,1.2,5.0]
#
# To enable Traefik to export internal metics to DataDog
# [web.metrics.datadog]
# Address = localhost:8125
# PushInterval = "10s"
#
# To enable Traefik to export internal metics to StatsD
# [web.metrics.statsd]
# Address = localhost:8125
# PushInterval = "10s"
#
# To enable basic auth on the webui
# with 2 user/pass: test:test and test2:test2
# Passwords can be encoded in MD5, SHA1 and BCrypt: you can use htpasswd to generate those ones

22
glide.lock generated
View file

@ -1,5 +1,5 @@
hash: 6aff4c6177ddc3247530d141a93f5bb044ee72acaa63b5667ceb205828c8ad03
updated: 2017-07-11T23:50:31.241672481+02:00
hash: 4d24f4a986de7e07c32b63abc3c8bf365d205df0a6f65ba4a6ca3d7ac7ae2256
updated: 2017-07-20T23:54:09.638352893+02:00
imports:
- name: cloud.google.com/go
version: 2e6a95edb1071d750f6d7db777bf66cd2997af6c
@ -82,8 +82,6 @@ imports:
version: 9208b142303c12d8899bae836fd524ac9338b4fd
- name: github.com/codegangsta/cli
version: bf4a526f48af7badd25d2cb02d587e1b01be3b50
- name: github.com/urfave/negroni
version: 490e6a555d47ca891a89a150d0c1ef3922dfffe9
- name: github.com/containous/flaeg
version: b5d2dc5878df07c2d74413348186982e7b865871
- name: github.com/containous/mux
@ -242,9 +240,17 @@ imports:
- name: github.com/go-kit/kit
version: f66b0e13579bfc5a48b9e2a94b1209c107ea1f41
subpackages:
- log
- metrics
- metrics/dogstatsd
- metrics/internal/lv
- metrics/internal/ratemap
- metrics/multi
- metrics/prometheus
- metrics/statsd
- util/conn
- name: github.com/go-logfmt/logfmt
version: 390ab7935ee28ec6b286364bba9b4dd6410cb3d5
- name: github.com/go-openapi/jsonpointer
version: 46af16f9f7b149af66e5d1bd010e3574dc06de98
- name: github.com/go-openapi/jsonreference
@ -253,6 +259,8 @@ imports:
version: 6aced65f8501fe1217321abf0749d354824ba2ff
- name: github.com/go-openapi/swag
version: 1d0bd113de87027671077d3c71eb3ac5d7dbba72
- name: github.com/go-stack/stack
version: 54be5f394ed2c3e19dac9134a40a95ba5a017f7b
- name: github.com/gogo/protobuf
version: 909568be09de550ed094403c2bf8a261b5bb730a
subpackages:
@ -302,6 +310,8 @@ imports:
version: 72f9bd7c4e0c2a40055ab3d0f09654f730cce982
- name: github.com/juju/ratelimit
version: 77ed1c8a01217656d2080ad51981f6e99adaa177
- name: github.com/kr/logfmt
version: b84e30acd515aadc4b783ad4ff83aff3299bdfe0
- name: github.com/mailgun/timetools
version: 7e6055773c5137efbeb3bd2410d705fe10ab6bfd
- name: github.com/mailru/easyjson
@ -436,6 +446,8 @@ imports:
- assert
- mock
- require
- name: github.com/stvp/go-udp-testing
version: 06eb4f886d9f8242b0c176cf0d3ce5ec2cedda05
- name: github.com/thoas/stats
version: 152b5d051953fdb6e45f14b6826962aadc032324
- name: github.com/timewasted/linode
@ -452,6 +464,8 @@ imports:
version: 50716a0a853771bb36bfce61a45cdefdb98c2e6e
- name: github.com/unrolled/secure
version: 824e85271811af89640ea25620c67f6c2eed987e
- name: github.com/urfave/negroni
version: 490e6a555d47ca891a89a150d0c1ef3922dfffe9
- name: github.com/vulcand/oxy
version: 7da864c1d53bd58165435bb78bbf8c01f01c8f4a
repo: https://github.com/containous/oxy.git

View file

@ -22,7 +22,7 @@ import:
- roundrobin
- stream
- utils
- name: github.com/urfave/negroni
- package: github.com/urfave/negroni
version: 490e6a555d47ca891a89a150d0c1ef3922dfffe9
- package: github.com/containous/staert
version: 1e26a71803e428fd933f5f9c8e50a26878f53147
@ -99,7 +99,13 @@ import:
- package: github.com/go-kit/kit
version: v0.3.0
subpackages:
- log
- metrics
- metrics/dogstatsd
- metrics/multi
- metrics/prometheus
- metrics/statsd
- util/conn
- package: github.com/prometheus/client_golang
version: 08fd2e12372a66e68e30523c7642e0cbc3e4fbde
subpackages:
@ -190,6 +196,7 @@ import:
subpackages:
- spew
testImport:
- package: github.com/stvp/go-udp-testing
- package: github.com/docker/libcompose
version: 0ad950cbeb3d72107613dd220b5e9d7e001b890b
- package: github.com/go-check/check

92
middlewares/datadog.go Normal file
View file

@ -0,0 +1,92 @@
package middlewares
import (
"time"
"github.com/containous/traefik/log"
"github.com/containous/traefik/safe"
"github.com/containous/traefik/types"
kitlog "github.com/go-kit/kit/log"
"github.com/go-kit/kit/metrics"
"github.com/go-kit/kit/metrics/dogstatsd"
)
var _ Metrics = (Metrics)(nil)
var datadogClient = dogstatsd.New("traefik.", kitlog.LoggerFunc(func(keyvals ...interface{}) error {
log.Info(keyvals)
return nil
}))
var datadogTicker *time.Ticker
// Metric names consistent with https://github.com/DataDog/integrations-extras/pull/64
const (
ddMetricsReqsName = "requests.total"
ddMetricsLatencyName = "request.duration"
)
// Datadog is an Implementation for Metrics that exposes datadog metrics for the latency
// and the number of requests partitioned by status code and method.
// - number of requests partitioned by status code and method
// - request durations
// - amount of retries happened
type Datadog struct {
reqsCounter metrics.Counter
reqDurationHistogram metrics.Histogram
retryCounter metrics.Counter
}
func (dd *Datadog) getReqsCounter() metrics.Counter {
return dd.reqsCounter
}
func (dd *Datadog) getReqDurationHistogram() metrics.Histogram {
return dd.reqDurationHistogram
}
func (dd *Datadog) getRetryCounter() metrics.Counter {
return dd.retryCounter
}
// NewDataDog creates new instance of Datadog
func NewDataDog(name string) *Datadog {
var m Datadog
m.reqsCounter = datadogClient.NewCounter(ddMetricsReqsName, 1.0).With("service", name)
m.reqDurationHistogram = datadogClient.NewHistogram(ddMetricsLatencyName, 1.0).With("service", name)
return &m
}
// InitDatadogClient initializes metrics pusher and creates a datadogClient if not created already
func InitDatadogClient(config *types.Datadog) *time.Ticker {
if datadogTicker == nil {
address := config.Address
if len(address) == 0 {
address = "localhost:8125"
}
pushInterval, err := time.ParseDuration(config.PushInterval)
if err != nil {
log.Warnf("Unable to parse %s into pushInterval, using 10s as default value", config.PushInterval)
pushInterval = 10 * time.Second
}
report := time.NewTicker(pushInterval)
safe.Go(func() {
datadogClient.SendLoop(report.C, "udp", address)
})
datadogTicker = report
}
return datadogTicker
}
// StopDatadogClient stops internal datadogTicker which controls the pushing of metrics to DD Agent and resets it to `nil`
func StopDatadogClient() {
if datadogTicker != nil {
datadogTicker.Stop()
}
datadogTicker = nil
}

View file

@ -0,0 +1,53 @@
package middlewares
import (
"fmt"
"net/http"
"net/http/httptest"
"testing"
"time"
"github.com/containous/traefik/testhelpers"
"github.com/containous/traefik/types"
"github.com/stvp/go-udp-testing"
"github.com/urfave/negroni"
)
func TestDatadog(t *testing.T) {
udp.SetAddr(":18125")
// This is needed to make sure that UDP Listener listens for data a bit longer, otherwise it will quit after a millisecond
udp.Timeout = 5 * time.Second
recorder := httptest.NewRecorder()
InitDatadogClient(&types.Datadog{":18125", "1s"})
n := negroni.New()
dd := NewDataDog("test")
defer StopDatadogClient()
metricsMiddlewareBackend := NewMetricsWrapper(dd)
n.Use(metricsMiddlewareBackend)
r := http.NewServeMux()
r.HandleFunc(`/ok`, func(w http.ResponseWriter, r *http.Request) {
w.WriteHeader(http.StatusOK)
fmt.Fprintln(w, "ok")
})
r.HandleFunc(`/not-found`, func(w http.ResponseWriter, r *http.Request) {
w.WriteHeader(http.StatusNotFound)
fmt.Fprintln(w, "not-found")
})
n.UseHandler(r)
req1 := testhelpers.MustNewRequest(http.MethodGet, "http://localhost:3000/ok", nil)
req2 := testhelpers.MustNewRequest(http.MethodGet, "http://localhost:3000/not-found", nil)
expected := []string{
// We are only validating counts, as it is nearly impossible to validate latency, since it varies every run
"traefik.requests.total:1.000000|c|#service:test,code:404,method:GET\n",
"traefik.requests.total:1.000000|c|#service:test,code:200,method:GET\n",
}
udp.ShouldReceiveAll(t, expected, func() {
n.ServeHTTP(recorder, req1)
n.ServeHTTP(recorder, req2)
})
}

View file

@ -6,6 +6,7 @@ import (
"time"
"github.com/go-kit/kit/metrics"
"github.com/go-kit/kit/metrics/multi"
)
// Metrics is an Interface that must be satisfied by any system that
@ -22,6 +23,52 @@ type RetryMetrics interface {
getRetryCounter() metrics.Counter
}
// MultiMetrics is a struct that provides a wrapper container for multiple Metrics, if they are configured
type MultiMetrics struct {
wrappedMetrics *[]Metrics
reqsCounter metrics.Counter
reqDurationHistogram metrics.Histogram
retryCounter metrics.Counter
}
// NewMultiMetrics creates a new instance of MultiMetrics
func NewMultiMetrics(manyMetrics []Metrics) *MultiMetrics {
counters := []metrics.Counter{}
histograms := []metrics.Histogram{}
retryCounters := []metrics.Counter{}
for _, m := range manyMetrics {
counters = append(counters, m.getReqsCounter())
histograms = append(histograms, m.getReqDurationHistogram())
retryCounters = append(retryCounters, m.getRetryCounter())
}
var mm MultiMetrics
mm.wrappedMetrics = &manyMetrics
mm.reqsCounter = multi.NewCounter(counters...)
mm.reqDurationHistogram = multi.NewHistogram(histograms...)
mm.retryCounter = multi.NewCounter(retryCounters...)
return &mm
}
func (mm *MultiMetrics) getReqsCounter() metrics.Counter {
return mm.reqsCounter
}
func (mm *MultiMetrics) getReqDurationHistogram() metrics.Histogram {
return mm.reqDurationHistogram
}
func (mm *MultiMetrics) getRetryCounter() metrics.Counter {
return mm.retryCounter
}
func (mm *MultiMetrics) getWrappedMetrics() *[]Metrics {
return mm.wrappedMetrics
}
// MetricsWrapper is a Negroni compatible Handler which relies on a
// given Metrics implementation to expose and monitor Traefik Metrics.
type MetricsWrapper struct {

85
middlewares/statsd.go Normal file
View file

@ -0,0 +1,85 @@
package middlewares
import (
"time"
"github.com/containous/traefik/log"
"github.com/containous/traefik/safe"
"github.com/containous/traefik/types"
kitlog "github.com/go-kit/kit/log"
"github.com/go-kit/kit/metrics"
"github.com/go-kit/kit/metrics/statsd"
)
var _ Metrics = (Metrics)(nil)
var statsdClient = statsd.New("traefik.", kitlog.LoggerFunc(func(keyvals ...interface{}) error {
log.Info(keyvals)
return nil
}))
var statsdTicker *time.Ticker
// Statsd is an Implementation for Metrics that exposes statsd metrics for the latency
// and the number of requests partitioned by status code and method.
// - number of requests partitioned by status code and method
// - request durations
// - amount of retries happened
type Statsd struct {
reqsCounter metrics.Counter
reqDurationHistogram metrics.Histogram
retryCounter metrics.Counter
}
func (s *Statsd) getReqsCounter() metrics.Counter {
return s.reqsCounter
}
func (s *Statsd) getReqDurationHistogram() metrics.Histogram {
return s.reqDurationHistogram
}
func (s *Statsd) getRetryCounter() metrics.Counter {
return s.retryCounter
}
// NewStatsD creates new instance of StatsD
func NewStatsD(name string) *Statsd {
var m Statsd
m.reqsCounter = statsdClient.NewCounter(ddMetricsReqsName, 1.0).With("service", name)
m.reqDurationHistogram = statsdClient.NewTiming(ddMetricsLatencyName, 1.0).With("service", name)
return &m
}
// InitStatsdClient initializes metrics pusher and creates a statsdClient if not created already
func InitStatsdClient(config *types.Statsd) *time.Ticker {
if statsdTicker == nil {
address := config.Address
if len(address) == 0 {
address = "localhost:8125"
}
pushInterval, err := time.ParseDuration(config.PushInterval)
if err != nil {
log.Warnf("Unable to parse %s into pushInterval, using 10s as default value", config.PushInterval)
pushInterval = 10 * time.Second
}
report := time.NewTicker(pushInterval)
safe.Go(func() {
statsdClient.SendLoop(report.C, "udp", address)
})
statsdTicker = report
}
return statsdTicker
}
// StopStatsdClient stops internal statsdTicker which controls the pushing of metrics to StatsD Agent and resets it to `nil`
func StopStatsdClient() {
if statsdTicker != nil {
statsdTicker.Stop()
}
statsdTicker = nil
}

View file

@ -0,0 +1,52 @@
package middlewares
import (
"fmt"
"net/http"
"net/http/httptest"
"testing"
"time"
"github.com/containous/traefik/testhelpers"
"github.com/containous/traefik/types"
"github.com/stvp/go-udp-testing"
"github.com/urfave/negroni"
)
func TestStatsD(t *testing.T) {
udp.SetAddr(":18125")
// This is needed to make sure that UDP Listener listens for data a bit longer, otherwise it will quit after a millisecond
udp.Timeout = 5 * time.Second
recorder := httptest.NewRecorder()
InitStatsdClient(&types.Statsd{":18125", "1s"})
n := negroni.New()
c := NewStatsD("test")
defer StopStatsdClient()
metricsMiddlewareBackend := NewMetricsWrapper(c)
n.Use(metricsMiddlewareBackend)
r := http.NewServeMux()
r.HandleFunc(`/ok`, func(w http.ResponseWriter, r *http.Request) {
w.WriteHeader(http.StatusOK)
fmt.Fprintln(w, "ok")
})
r.HandleFunc(`/not-found`, func(w http.ResponseWriter, r *http.Request) {
w.WriteHeader(http.StatusNotFound)
fmt.Fprintln(w, "not-found")
})
n.UseHandler(r)
req1 := testhelpers.MustNewRequest(http.MethodGet, "http://localhost:3000/ok", nil)
req2 := testhelpers.MustNewRequest(http.MethodGet, "http://localhost:3000/not-found", nil)
expected := []string{
// We are only validating counts, as it is nearly impossible to validate latency, since it varies every run
"traefik.requests.total:2.000000|c\n",
}
udp.ShouldReceiveAll(t, expected, func() {
n.ServeHTTP(recorder, req1)
n.ServeHTTP(recorder, req2)
})
}

View file

@ -437,6 +437,14 @@ func NewTraefikDefaultPointersConfiguration() *TraefikConfiguration {
Prometheus: &types.Prometheus{
Buckets: types.Buckets{0.1, 0.3, 1.2, 5},
},
Datadog: &types.Datadog{
Address: "localhost:8125",
PushInterval: "10s",
},
StatsD: &types.Statsd{
Address: "localhost:8125",
PushInterval: "10s",
},
}
// default Marathon

View file

@ -157,6 +157,7 @@ func (server *Server) Close() {
os.Exit(1)
}
}(ctx)
stopMetricsClients(server.globalConfiguration)
server.stopLeadership()
server.routinesPool.Cleanup()
close(server.configurationChan)
@ -198,6 +199,7 @@ func (server *Server) setupServerEntryPoint(newServerEntryPointName string, newS
if server.accessLoggerMiddleware != nil {
serverMiddlewares = append(serverMiddlewares, server.accessLoggerMiddleware)
}
initializeMetricsClients(server.globalConfiguration)
metrics := newMetrics(server.globalConfiguration, newServerEntryPointName)
if metrics != nil {
serverMiddlewares = append(serverMiddlewares, middlewares.NewMetricsWrapper(metrics))
@ -1060,18 +1062,52 @@ func (*Server) configureBackends(backends map[string]*types.Backend) {
// Note that given there is no metrics instrumentation configured, it will return nil.
func newMetrics(globalConfig GlobalConfiguration, name string) middlewares.Metrics {
metricsEnabled := globalConfig.Web != nil && globalConfig.Web.Metrics != nil
if metricsEnabled && globalConfig.Web.Metrics.Prometheus != nil {
metrics, _, err := middlewares.NewPrometheus(name, globalConfig.Web.Metrics.Prometheus)
if err != nil {
log.Errorf("Error creating Prometheus Metrics implementation: %s", err)
return nil
if metricsEnabled {
// Create MultiMetric
metrics := []middlewares.Metrics{}
if globalConfig.Web.Metrics.Prometheus != nil {
metric, _, err := middlewares.NewPrometheus(name, globalConfig.Web.Metrics.Prometheus)
if err != nil {
log.Errorf("Error creating Prometheus metrics implementation: %s", err)
}
log.Debug("Configured Prometheus metrics")
metrics = append(metrics, metric)
}
return metrics
if globalConfig.Web.Metrics.Datadog != nil {
metric := middlewares.NewDataDog(name)
log.Debugf("Configured DataDog metrics pushing to %s once every %s", globalConfig.Web.Metrics.Datadog.Address, globalConfig.Web.Metrics.Datadog.PushInterval)
metrics = append(metrics, metric)
}
if globalConfig.Web.Metrics.StatsD != nil {
metric := middlewares.NewStatsD(name)
log.Debugf("Configured StatsD metrics pushing to %s once every %s", globalConfig.Web.Metrics.StatsD.Address, globalConfig.Web.Metrics.StatsD.PushInterval)
metrics = append(metrics, metric)
}
return middlewares.NewMultiMetrics(metrics)
}
return nil
}
func initializeMetricsClients(globalConfig GlobalConfiguration) {
metricsEnabled := globalConfig.Web != nil && globalConfig.Web.Metrics != nil
if metricsEnabled {
if globalConfig.Web.Metrics.Datadog != nil {
middlewares.InitDatadogClient(globalConfig.Web.Metrics.Datadog)
}
if globalConfig.Web.Metrics.StatsD != nil {
middlewares.InitStatsdClient(globalConfig.Web.Metrics.StatsD)
}
}
}
func stopMetricsClients(globalConfig GlobalConfiguration) {
middlewares.StopDatadogClient()
middlewares.StopStatsdClient()
}
func registerRetryMiddleware(
httpHandler http.Handler,
globalConfig GlobalConfiguration,

View file

@ -443,8 +443,8 @@ func TestNewMetrics(t *testing.T) {
metricsImpl := newMetrics(tc.globalConfig, "test1")
if metricsImpl != nil {
if _, ok := metricsImpl.(*middlewares.Prometheus); !ok {
t.Errorf("invalid metricsImpl type, got %T want %T", metricsImpl, &middlewares.Prometheus{})
if _, ok := metricsImpl.(*middlewares.MultiMetrics); !ok {
t.Errorf("invalid metricsImpl type, got %T want %T", metricsImpl, &middlewares.MultiMetrics{})
}
}
})

View file

@ -327,6 +327,8 @@ type Statistics struct {
// Metrics provides options to expose and send Traefik metrics to different third party monitoring systems
type Metrics struct {
Prometheus *Prometheus `description:"Prometheus metrics exporter type"`
Datadog *Datadog `description:"DataDog metrics exporter type"`
StatsD *Statsd `description:"StatsD metrics exporter type"`
}
// Prometheus can contain specific configuration used by the Prometheus Metrics exporter
@ -334,6 +336,18 @@ type Prometheus struct {
Buckets Buckets `description:"Buckets for latency metrics"`
}
// Datadog contains address and metrics pushing interval configuration
type Datadog struct {
Address string `description:"DataDog's Dogstatsd address"`
PushInterval string `description:"DataDog push interval"`
}
// Statsd contains address and metrics pushing interval configuration
type Statsd struct {
Address string `description:"StatsD address"`
PushInterval string `description:"DataDog push interval"`
}
// Buckets holds Prometheus Buckets
type Buckets []float64

93
vendor/github.com/go-kit/kit/log/doc.go generated vendored Normal file
View file

@ -0,0 +1,93 @@
// Package log provides a structured logger.
//
// Structured logging produces logs easily consumed later by humans or
// machines. Humans might be interested in debugging errors, or tracing
// specific requests. Machines might be interested in counting interesting
// events, or aggregating information for off-line processing. In both cases,
// it is important that the log messages are structured and actionable.
// Package log is designed to encourage both of these best practices.
//
// Basic Usage
//
// The fundamental interface is Logger. Loggers create log events from
// key/value data. The Logger interface has a single method, Log, which
// accepts a sequence of alternating key/value pairs, which this package names
// keyvals.
//
// type Logger interface {
// Log(keyvals ...interface{}) error
// }
//
// Here is an example of a function using a Logger to create log events.
//
// func RunTask(task Task, logger log.Logger) string {
// logger.Log("taskID", task.ID, "event", "starting task")
// ...
// logger.Log("taskID", task.ID, "event", "task complete")
// }
//
// The keys in the above example are "taskID" and "event". The values are
// task.ID, "starting task", and "task complete". Every key is followed
// immediately by its value.
//
// Keys are usually plain strings. Values may be any type that has a sensible
// encoding in the chosen log format. With structured logging it is a good
// idea to log simple values without formatting them. This practice allows
// the chosen logger to encode values in the most appropriate way.
//
// Log Context
//
// A log context stores keyvals that it includes in all log events. Building
// appropriate log contexts reduces repetition and aids consistency in the
// resulting log output. We can use a context to improve the RunTask example.
//
// func RunTask(task Task, logger log.Logger) string {
// logger = log.NewContext(logger).With("taskID", task.ID)
// logger.Log("event", "starting task")
// ...
// taskHelper(task.Cmd, logger)
// ...
// logger.Log("event", "task complete")
// }
//
// The improved version emits the same log events as the original for the
// first and last calls to Log. The call to taskHelper highlights that a
// context may be passed as a logger to other functions. Each log event
// created by the called function will include the task.ID even though the
// function does not have access to that value. Using log contexts this way
// simplifies producing log output that enables tracing the life cycle of
// individual tasks. (See the Context example for the full code of the
// above snippet.)
//
// Dynamic Context Values
//
// A Valuer function stored in a log context generates a new value each time
// the context logs an event. The Valuer example demonstrates how this
// feature works.
//
// Valuers provide the basis for consistently logging timestamps and source
// code location. The log package defines several valuers for that purpose.
// See Timestamp, DefaultTimestamp, DefaultTimestampUTC, Caller, and
// DefaultCaller. A common logger initialization sequence that ensures all log
// entries contain a timestamp and source location looks like this:
//
// logger := log.NewLogfmtLogger(log.NewSyncWriter(os.Stdout))
// logger = log.NewContext(logger).With("ts", log.DefaultTimestampUTC, "caller", log.DefaultCaller)
//
// Concurrent Safety
//
// Applications with multiple goroutines want each log event written to the
// same logger to remain separate from other log events. Package log provides
// two simple solutions for concurrent safe logging.
//
// NewSyncWriter wraps an io.Writer and serializes each call to its Write
// method. Using a SyncWriter has the benefit that the smallest practical
// portion of the logging logic is performed within a mutex, but it requires
// the formatting Logger to make only one call to Write per log event.
//
// NewSyncLogger wraps any Logger and serializes each call to its Log method.
// Using a SyncLogger has the benefit that it guarantees each log event is
// handled atomically within the wrapped logger, but it typically serializes
// both the formatting and output logic. Use a SyncLogger if the formatting
// logger may perform multiple writes per log event.
package log

92
vendor/github.com/go-kit/kit/log/json_logger.go generated vendored Normal file
View file

@ -0,0 +1,92 @@
package log
import (
"encoding"
"encoding/json"
"fmt"
"io"
"reflect"
)
type jsonLogger struct {
io.Writer
}
// NewJSONLogger returns a Logger that encodes keyvals to the Writer as a
// single JSON object. Each log event produces no more than one call to
// w.Write. The passed Writer must be safe for concurrent use by multiple
// goroutines if the returned Logger will be used concurrently.
func NewJSONLogger(w io.Writer) Logger {
return &jsonLogger{w}
}
func (l *jsonLogger) Log(keyvals ...interface{}) error {
n := (len(keyvals) + 1) / 2 // +1 to handle case when len is odd
m := make(map[string]interface{}, n)
for i := 0; i < len(keyvals); i += 2 {
k := keyvals[i]
var v interface{} = ErrMissingValue
if i+1 < len(keyvals) {
v = keyvals[i+1]
}
merge(m, k, v)
}
return json.NewEncoder(l.Writer).Encode(m)
}
func merge(dst map[string]interface{}, k, v interface{}) {
var key string
switch x := k.(type) {
case string:
key = x
case fmt.Stringer:
key = safeString(x)
default:
key = fmt.Sprint(x)
}
if x, ok := v.(error); ok {
v = safeError(x)
}
// We want json.Marshaler and encoding.TextMarshaller to take priority over
// err.Error() and v.String(). But json.Marshall (called later) does that by
// default so we force a no-op if it's one of those 2 case.
switch x := v.(type) {
case json.Marshaler:
case encoding.TextMarshaler:
case error:
v = safeError(x)
case fmt.Stringer:
v = safeString(x)
}
dst[key] = v
}
func safeString(str fmt.Stringer) (s string) {
defer func() {
if panicVal := recover(); panicVal != nil {
if v := reflect.ValueOf(str); v.Kind() == reflect.Ptr && v.IsNil() {
s = "NULL"
} else {
panic(panicVal)
}
}
}()
s = str.String()
return
}
func safeError(err error) (s interface{}) {
defer func() {
if panicVal := recover(); panicVal != nil {
if v := reflect.ValueOf(err); v.Kind() == reflect.Ptr && v.IsNil() {
s = nil
} else {
panic(panicVal)
}
}
}()
s = err.Error()
return
}

144
vendor/github.com/go-kit/kit/log/log.go generated vendored Normal file
View file

@ -0,0 +1,144 @@
package log
import "errors"
// Logger is the fundamental interface for all log operations. Log creates a
// log event from keyvals, a variadic sequence of alternating keys and values.
// Implementations must be safe for concurrent use by multiple goroutines. In
// particular, any implementation of Logger that appends to keyvals or
// modifies any of its elements must make a copy first.
type Logger interface {
Log(keyvals ...interface{}) error
}
// ErrMissingValue is appended to keyvals slices with odd length to substitute
// the missing value.
var ErrMissingValue = errors.New("(MISSING)")
// NewContext returns a new Context that logs to logger.
func NewContext(logger Logger) *Context {
if c, ok := logger.(*Context); ok {
return c
}
return &Context{logger: logger}
}
// Context must always have the same number of stack frames between calls to
// its Log method and the eventual binding of Valuers to their value. This
// requirement comes from the functional requirement to allow a context to
// resolve application call site information for a log.Caller stored in the
// context. To do this we must be able to predict the number of logging
// functions on the stack when bindValues is called.
//
// Three implementation details provide the needed stack depth consistency.
// The first two of these details also result in better amortized performance,
// and thus make sense even without the requirements regarding stack depth.
// The third detail, however, is subtle and tied to the implementation of the
// Go compiler.
//
// 1. NewContext avoids introducing an additional layer when asked to
// wrap another Context.
// 2. With avoids introducing an additional layer by returning a newly
// constructed Context with a merged keyvals rather than simply
// wrapping the existing Context.
// 3. All of Context's methods take pointer receivers even though they
// do not mutate the Context.
//
// Before explaining the last detail, first some background. The Go compiler
// generates wrapper methods to implement the auto dereferencing behavior when
// calling a value method through a pointer variable. These wrapper methods
// are also used when calling a value method through an interface variable
// because interfaces store a pointer to the underlying concrete value.
// Calling a pointer receiver through an interface does not require generating
// an additional function.
//
// If Context had value methods then calling Context.Log through a variable
// with type Logger would have an extra stack frame compared to calling
// Context.Log through a variable with type Context. Using pointer receivers
// avoids this problem.
// A Context wraps a Logger and holds keyvals that it includes in all log
// events. When logging, a Context replaces all value elements (odd indexes)
// containing a Valuer with their generated value for each call to its Log
// method.
type Context struct {
logger Logger
keyvals []interface{}
hasValuer bool
}
// Log replaces all value elements (odd indexes) containing a Valuer in the
// stored context with their generated value, appends keyvals, and passes the
// result to the wrapped Logger.
func (l *Context) Log(keyvals ...interface{}) error {
kvs := append(l.keyvals, keyvals...)
if len(kvs)%2 != 0 {
kvs = append(kvs, ErrMissingValue)
}
if l.hasValuer {
// If no keyvals were appended above then we must copy l.keyvals so
// that future log events will reevaluate the stored Valuers.
if len(keyvals) == 0 {
kvs = append([]interface{}{}, l.keyvals...)
}
bindValues(kvs[:len(l.keyvals)])
}
return l.logger.Log(kvs...)
}
// With returns a new Context with keyvals appended to those of the receiver.
func (l *Context) With(keyvals ...interface{}) *Context {
if len(keyvals) == 0 {
return l
}
kvs := append(l.keyvals, keyvals...)
if len(kvs)%2 != 0 {
kvs = append(kvs, ErrMissingValue)
}
return &Context{
logger: l.logger,
// Limiting the capacity of the stored keyvals ensures that a new
// backing array is created if the slice must grow in Log or With.
// Using the extra capacity without copying risks a data race that
// would violate the Logger interface contract.
keyvals: kvs[:len(kvs):len(kvs)],
hasValuer: l.hasValuer || containsValuer(keyvals),
}
}
// WithPrefix returns a new Context with keyvals prepended to those of the
// receiver.
func (l *Context) WithPrefix(keyvals ...interface{}) *Context {
if len(keyvals) == 0 {
return l
}
// Limiting the capacity of the stored keyvals ensures that a new
// backing array is created if the slice must grow in Log or With.
// Using the extra capacity without copying risks a data race that
// would violate the Logger interface contract.
n := len(l.keyvals) + len(keyvals)
if len(keyvals)%2 != 0 {
n++
}
kvs := make([]interface{}, 0, n)
kvs = append(kvs, keyvals...)
if len(kvs)%2 != 0 {
kvs = append(kvs, ErrMissingValue)
}
kvs = append(kvs, l.keyvals...)
return &Context{
logger: l.logger,
keyvals: kvs,
hasValuer: l.hasValuer || containsValuer(keyvals),
}
}
// LoggerFunc is an adapter to allow use of ordinary functions as Loggers. If
// f is a function with the appropriate signature, LoggerFunc(f) is a Logger
// object that calls f.
type LoggerFunc func(...interface{}) error
// Log implements Logger by calling f(keyvals...).
func (f LoggerFunc) Log(keyvals ...interface{}) error {
return f(keyvals...)
}

62
vendor/github.com/go-kit/kit/log/logfmt_logger.go generated vendored Normal file
View file

@ -0,0 +1,62 @@
package log
import (
"bytes"
"io"
"sync"
"github.com/go-logfmt/logfmt"
)
type logfmtEncoder struct {
*logfmt.Encoder
buf bytes.Buffer
}
func (l *logfmtEncoder) Reset() {
l.Encoder.Reset()
l.buf.Reset()
}
var logfmtEncoderPool = sync.Pool{
New: func() interface{} {
var enc logfmtEncoder
enc.Encoder = logfmt.NewEncoder(&enc.buf)
return &enc
},
}
type logfmtLogger struct {
w io.Writer
}
// NewLogfmtLogger returns a logger that encodes keyvals to the Writer in
// logfmt format. Each log event produces no more than one call to w.Write.
// The passed Writer must be safe for concurrent use by multiple goroutines if
// the returned Logger will be used concurrently.
func NewLogfmtLogger(w io.Writer) Logger {
return &logfmtLogger{w}
}
func (l logfmtLogger) Log(keyvals ...interface{}) error {
enc := logfmtEncoderPool.Get().(*logfmtEncoder)
enc.Reset()
defer logfmtEncoderPool.Put(enc)
if err := enc.EncodeKeyvals(keyvals...); err != nil {
return err
}
// Add newline to the end of the buffer
if err := enc.EndRecord(); err != nil {
return err
}
// The Logger interface requires implementations to be safe for concurrent
// use by multiple goroutines. For this implementation that means making
// only one call to l.w.Write() for each call to Log.
if _, err := l.w.Write(enc.buf.Bytes()); err != nil {
return err
}
return nil
}

8
vendor/github.com/go-kit/kit/log/nop_logger.go generated vendored Normal file
View file

@ -0,0 +1,8 @@
package log
type nopLogger struct{}
// NewNopLogger returns a logger that doesn't do anything.
func NewNopLogger() Logger { return nopLogger{} }
func (nopLogger) Log(...interface{}) error { return nil }

116
vendor/github.com/go-kit/kit/log/stdlib.go generated vendored Normal file
View file

@ -0,0 +1,116 @@
package log
import (
"io"
"log"
"regexp"
"strings"
)
// StdlibWriter implements io.Writer by invoking the stdlib log.Print. It's
// designed to be passed to a Go kit logger as the writer, for cases where
// it's necessary to redirect all Go kit log output to the stdlib logger.
//
// If you have any choice in the matter, you shouldn't use this. Prefer to
// redirect the stdlib log to the Go kit logger via NewStdlibAdapter.
type StdlibWriter struct{}
// Write implements io.Writer.
func (w StdlibWriter) Write(p []byte) (int, error) {
log.Print(strings.TrimSpace(string(p)))
return len(p), nil
}
// StdlibAdapter wraps a Logger and allows it to be passed to the stdlib
// logger's SetOutput. It will extract date/timestamps, filenames, and
// messages, and place them under relevant keys.
type StdlibAdapter struct {
Logger
timestampKey string
fileKey string
messageKey string
}
// StdlibAdapterOption sets a parameter for the StdlibAdapter.
type StdlibAdapterOption func(*StdlibAdapter)
// TimestampKey sets the key for the timestamp field. By default, it's "ts".
func TimestampKey(key string) StdlibAdapterOption {
return func(a *StdlibAdapter) { a.timestampKey = key }
}
// FileKey sets the key for the file and line field. By default, it's "file".
func FileKey(key string) StdlibAdapterOption {
return func(a *StdlibAdapter) { a.fileKey = key }
}
// MessageKey sets the key for the actual log message. By default, it's "msg".
func MessageKey(key string) StdlibAdapterOption {
return func(a *StdlibAdapter) { a.messageKey = key }
}
// NewStdlibAdapter returns a new StdlibAdapter wrapper around the passed
// logger. It's designed to be passed to log.SetOutput.
func NewStdlibAdapter(logger Logger, options ...StdlibAdapterOption) io.Writer {
a := StdlibAdapter{
Logger: logger,
timestampKey: "ts",
fileKey: "file",
messageKey: "msg",
}
for _, option := range options {
option(&a)
}
return a
}
func (a StdlibAdapter) Write(p []byte) (int, error) {
result := subexps(p)
keyvals := []interface{}{}
var timestamp string
if date, ok := result["date"]; ok && date != "" {
timestamp = date
}
if time, ok := result["time"]; ok && time != "" {
if timestamp != "" {
timestamp += " "
}
timestamp += time
}
if timestamp != "" {
keyvals = append(keyvals, a.timestampKey, timestamp)
}
if file, ok := result["file"]; ok && file != "" {
keyvals = append(keyvals, a.fileKey, file)
}
if msg, ok := result["msg"]; ok {
keyvals = append(keyvals, a.messageKey, msg)
}
if err := a.Logger.Log(keyvals...); err != nil {
return 0, err
}
return len(p), nil
}
const (
logRegexpDate = `(?P<date>[0-9]{4}/[0-9]{2}/[0-9]{2})?[ ]?`
logRegexpTime = `(?P<time>[0-9]{2}:[0-9]{2}:[0-9]{2}(\.[0-9]+)?)?[ ]?`
logRegexpFile = `(?P<file>.+?:[0-9]+)?`
logRegexpMsg = `(: )?(?P<msg>.*)`
)
var (
logRegexp = regexp.MustCompile(logRegexpDate + logRegexpTime + logRegexpFile + logRegexpMsg)
)
func subexps(line []byte) map[string]string {
m := logRegexp.FindSubmatch(line)
if len(m) < len(logRegexp.SubexpNames()) {
return map[string]string{}
}
result := map[string]string{}
for i, name := range logRegexp.SubexpNames() {
result[name] = string(m[i])
}
return result
}

81
vendor/github.com/go-kit/kit/log/sync.go generated vendored Normal file
View file

@ -0,0 +1,81 @@
package log
import (
"io"
"sync"
"sync/atomic"
)
// SwapLogger wraps another logger that may be safely replaced while other
// goroutines use the SwapLogger concurrently. The zero value for a SwapLogger
// will discard all log events without error.
//
// SwapLogger serves well as a package global logger that can be changed by
// importers.
type SwapLogger struct {
logger atomic.Value
}
type loggerStruct struct {
Logger
}
// Log implements the Logger interface by forwarding keyvals to the currently
// wrapped logger. It does not log anything if the wrapped logger is nil.
func (l *SwapLogger) Log(keyvals ...interface{}) error {
s, ok := l.logger.Load().(loggerStruct)
if !ok || s.Logger == nil {
return nil
}
return s.Log(keyvals...)
}
// Swap replaces the currently wrapped logger with logger. Swap may be called
// concurrently with calls to Log from other goroutines.
func (l *SwapLogger) Swap(logger Logger) {
l.logger.Store(loggerStruct{logger})
}
// SyncWriter synchronizes concurrent writes to an io.Writer.
type SyncWriter struct {
mu sync.Mutex
w io.Writer
}
// NewSyncWriter returns a new SyncWriter. The returned writer is safe for
// concurrent use by multiple goroutines.
func NewSyncWriter(w io.Writer) *SyncWriter {
return &SyncWriter{w: w}
}
// Write writes p to the underlying io.Writer. If another write is already in
// progress, the calling goroutine blocks until the SyncWriter is available.
func (w *SyncWriter) Write(p []byte) (n int, err error) {
w.mu.Lock()
n, err = w.w.Write(p)
w.mu.Unlock()
return n, err
}
// syncLogger provides concurrent safe logging for another Logger.
type syncLogger struct {
mu sync.Mutex
logger Logger
}
// NewSyncLogger returns a logger that synchronizes concurrent use of the
// wrapped logger. When multiple goroutines use the SyncLogger concurrently
// only one goroutine will be allowed to log to the wrapped logger at a time.
// The other goroutines will block until the logger is available.
func NewSyncLogger(logger Logger) Logger {
return &syncLogger{logger: logger}
}
// Log logs keyvals to the underlying Logger. If another log is already in
// progress, the calling goroutine blocks until the syncLogger is available.
func (l *syncLogger) Log(keyvals ...interface{}) error {
l.mu.Lock()
err := l.logger.Log(keyvals...)
l.mu.Unlock()
return err
}

62
vendor/github.com/go-kit/kit/log/value.go generated vendored Normal file
View file

@ -0,0 +1,62 @@
package log
import (
"time"
"github.com/go-stack/stack"
)
// A Valuer generates a log value. When passed to Context.With in a value
// element (odd indexes), it represents a dynamic value which is re-evaluated
// with each log event.
type Valuer func() interface{}
// bindValues replaces all value elements (odd indexes) containing a Valuer
// with their generated value.
func bindValues(keyvals []interface{}) {
for i := 1; i < len(keyvals); i += 2 {
if v, ok := keyvals[i].(Valuer); ok {
keyvals[i] = v()
}
}
}
// containsValuer returns true if any of the value elements (odd indexes)
// contain a Valuer.
func containsValuer(keyvals []interface{}) bool {
for i := 1; i < len(keyvals); i += 2 {
if _, ok := keyvals[i].(Valuer); ok {
return true
}
}
return false
}
// Timestamp returns a Valuer that invokes the underlying function when bound,
// returning a time.Time. Users will probably want to use DefaultTimestamp or
// DefaultTimestampUTC.
func Timestamp(t func() time.Time) Valuer {
return func() interface{} { return t() }
}
var (
// DefaultTimestamp is a Valuer that returns the current wallclock time,
// respecting time zones, when bound.
DefaultTimestamp Valuer = func() interface{} { return time.Now().Format(time.RFC3339) }
// DefaultTimestampUTC is a Valuer that returns the current time in UTC
// when bound.
DefaultTimestampUTC Valuer = func() interface{} { return time.Now().UTC().Format(time.RFC3339) }
)
// Caller returns a Valuer that returns a file and line from a specified depth
// in the callstack. Users will probably want to use DefaultCaller.
func Caller(depth int) Valuer {
return func() interface{} { return stack.Caller(depth) }
}
var (
// DefaultCaller is a Valuer that returns the file and line where the Log
// method was invoked. It can only be used with log.With.
DefaultCaller = Caller(3)
)

View file

@ -0,0 +1,306 @@
// Package dogstatsd provides a DogStatsD backend for package metrics. It's very
// similar to StatsD, but supports arbitrary tags per-metric, which map to Go
// kit's label values. So, while label values are no-ops in StatsD, they are
// supported here. For more details, see the documentation at
// http://docs.datadoghq.com/guides/dogstatsd/.
//
// This package batches observations and emits them on some schedule to the
// remote server. This is useful even if you connect to your DogStatsD server
// over UDP. Emitting one network packet per observation can quickly overwhelm
// even the fastest internal network.
package dogstatsd
import (
"fmt"
"io"
"strings"
"time"
"github.com/go-kit/kit/log"
"github.com/go-kit/kit/metrics"
"github.com/go-kit/kit/metrics/internal/lv"
"github.com/go-kit/kit/metrics/internal/ratemap"
"github.com/go-kit/kit/util/conn"
)
// Dogstatsd receives metrics observations and forwards them to a DogStatsD
// server. Create a Dogstatsd object, use it to create metrics, and pass those
// metrics as dependencies to the components that will use them.
//
// All metrics are buffered until WriteTo is called. Counters and gauges are
// aggregated into a single observation per timeseries per write. Timings and
// histograms are buffered but not aggregated.
//
// To regularly report metrics to an io.Writer, use the WriteLoop helper method.
// To send to a DogStatsD server, use the SendLoop helper method.
type Dogstatsd struct {
prefix string
rates *ratemap.RateMap
counters *lv.Space
gauges *lv.Space
timings *lv.Space
histograms *lv.Space
logger log.Logger
}
// New returns a Dogstatsd object that may be used to create metrics. Prefix is
// applied to all created metrics. Callers must ensure that regular calls to
// WriteTo are performed, either manually or with one of the helper methods.
func New(prefix string, logger log.Logger) *Dogstatsd {
return &Dogstatsd{
prefix: prefix,
rates: ratemap.New(),
counters: lv.NewSpace(),
gauges: lv.NewSpace(),
timings: lv.NewSpace(),
histograms: lv.NewSpace(),
logger: logger,
}
}
// NewCounter returns a counter, sending observations to this Dogstatsd object.
func (d *Dogstatsd) NewCounter(name string, sampleRate float64) *Counter {
d.rates.Set(d.prefix+name, sampleRate)
return &Counter{
name: d.prefix + name,
obs: d.counters.Observe,
}
}
// NewGauge returns a gauge, sending observations to this Dogstatsd object.
func (d *Dogstatsd) NewGauge(name string) *Gauge {
return &Gauge{
name: d.prefix + name,
obs: d.gauges.Observe,
}
}
// NewTiming returns a histogram whose observations are interpreted as
// millisecond durations, and are forwarded to this Dogstatsd object.
func (d *Dogstatsd) NewTiming(name string, sampleRate float64) *Timing {
d.rates.Set(d.prefix+name, sampleRate)
return &Timing{
name: d.prefix + name,
obs: d.timings.Observe,
}
}
// NewHistogram returns a histogram whose observations are of an unspecified
// unit, and are forwarded to this Dogstatsd object.
func (d *Dogstatsd) NewHistogram(name string, sampleRate float64) *Histogram {
d.rates.Set(d.prefix+name, sampleRate)
return &Histogram{
name: d.prefix + name,
obs: d.histograms.Observe,
}
}
// WriteLoop is a helper method that invokes WriteTo to the passed writer every
// time the passed channel fires. This method blocks until the channel is
// closed, so clients probably want to run it in its own goroutine. For typical
// usage, create a time.Ticker and pass its C channel to this method.
func (d *Dogstatsd) WriteLoop(c <-chan time.Time, w io.Writer) {
for range c {
if _, err := d.WriteTo(w); err != nil {
d.logger.Log("during", "WriteTo", "err", err)
}
}
}
// SendLoop is a helper method that wraps WriteLoop, passing a managed
// connection to the network and address. Like WriteLoop, this method blocks
// until the channel is closed, so clients probably want to start it in its own
// goroutine. For typical usage, create a time.Ticker and pass its C channel to
// this method.
func (d *Dogstatsd) SendLoop(c <-chan time.Time, network, address string) {
d.WriteLoop(c, conn.NewDefaultManager(network, address, d.logger))
}
// WriteTo flushes the buffered content of the metrics to the writer, in
// DogStatsD format. WriteTo abides best-effort semantics, so observations are
// lost if there is a problem with the write. Clients should be sure to call
// WriteTo regularly, ideally through the WriteLoop or SendLoop helper methods.
func (d *Dogstatsd) WriteTo(w io.Writer) (count int64, err error) {
var n int
d.counters.Reset().Walk(func(name string, lvs lv.LabelValues, values []float64) bool {
n, err = fmt.Fprintf(w, "%s:%f|c%s%s\n", name, sum(values), sampling(d.rates.Get(name)), tagValues(lvs))
if err != nil {
return false
}
count += int64(n)
return true
})
if err != nil {
return count, err
}
d.gauges.Reset().Walk(func(name string, lvs lv.LabelValues, values []float64) bool {
n, err = fmt.Fprintf(w, "%s:%f|g%s\n", name, last(values), tagValues(lvs))
if err != nil {
return false
}
count += int64(n)
return true
})
if err != nil {
return count, err
}
d.timings.Reset().Walk(func(name string, lvs lv.LabelValues, values []float64) bool {
sampleRate := d.rates.Get(name)
for _, value := range values {
n, err = fmt.Fprintf(w, "%s:%f|ms%s%s\n", name, value, sampling(sampleRate), tagValues(lvs))
if err != nil {
return false
}
count += int64(n)
}
return true
})
if err != nil {
return count, err
}
d.histograms.Reset().Walk(func(name string, lvs lv.LabelValues, values []float64) bool {
sampleRate := d.rates.Get(name)
for _, value := range values {
n, err = fmt.Fprintf(w, "%s:%f|h%s%s\n", name, value, sampling(sampleRate), tagValues(lvs))
if err != nil {
return false
}
count += int64(n)
}
return true
})
if err != nil {
return count, err
}
return count, err
}
func sum(a []float64) float64 {
var v float64
for _, f := range a {
v += f
}
return v
}
func last(a []float64) float64 {
return a[len(a)-1]
}
func sampling(r float64) string {
var sv string
if r < 1.0 {
sv = fmt.Sprintf("|@%f", r)
}
return sv
}
func tagValues(labelValues []string) string {
if len(labelValues) == 0 {
return ""
}
if len(labelValues)%2 != 0 {
panic("tagValues received a labelValues with an odd number of strings")
}
pairs := make([]string, 0, len(labelValues)/2)
for i := 0; i < len(labelValues); i += 2 {
pairs = append(pairs, labelValues[i]+":"+labelValues[i+1])
}
return "|#" + strings.Join(pairs, ",")
}
type observeFunc func(name string, lvs lv.LabelValues, value float64)
// Counter is a DogStatsD counter. Observations are forwarded to a Dogstatsd
// object, and aggregated (summed) per timeseries.
type Counter struct {
name string
lvs lv.LabelValues
obs observeFunc
}
// With implements metrics.Counter.
func (c *Counter) With(labelValues ...string) metrics.Counter {
return &Counter{
name: c.name,
lvs: c.lvs.With(labelValues...),
obs: c.obs,
}
}
// Add implements metrics.Counter.
func (c *Counter) Add(delta float64) {
c.obs(c.name, c.lvs, delta)
}
// Gauge is a DogStatsD gauge. Observations are forwarded to a Dogstatsd
// object, and aggregated (the last observation selected) per timeseries.
type Gauge struct {
name string
lvs lv.LabelValues
obs observeFunc
}
// With implements metrics.Gauge.
func (g *Gauge) With(labelValues ...string) metrics.Gauge {
return &Gauge{
name: g.name,
lvs: g.lvs.With(labelValues...),
obs: g.obs,
}
}
// Set implements metrics.Gauge.
func (g *Gauge) Set(value float64) {
g.obs(g.name, g.lvs, value)
}
// Timing is a DogStatsD timing, or metrics.Histogram. Observations are
// forwarded to a Dogstatsd object, and collected (but not aggregated) per
// timeseries.
type Timing struct {
name string
lvs lv.LabelValues
obs observeFunc
}
// With implements metrics.Timing.
func (t *Timing) With(labelValues ...string) metrics.Histogram {
return &Timing{
name: t.name,
lvs: t.lvs.With(labelValues...),
obs: t.obs,
}
}
// Observe implements metrics.Histogram. Value is interpreted as milliseconds.
func (t *Timing) Observe(value float64) {
t.obs(t.name, t.lvs, value)
}
// Histogram is a DogStatsD histrogram. Observations are forwarded to a
// Dogstatsd object, and collected (but not aggregated) per timeseries.
type Histogram struct {
name string
lvs lv.LabelValues
obs observeFunc
}
// With implements metrics.Histogram.
func (h *Histogram) With(labelValues ...string) metrics.Histogram {
return &Histogram{
name: h.name,
lvs: h.lvs.With(labelValues...),
obs: h.obs,
}
}
// Observe implements metrics.Histogram.
func (h *Histogram) Observe(value float64) {
h.obs(h.name, h.lvs, value)
}

View file

@ -0,0 +1,40 @@
// Package ratemap implements a goroutine-safe map of string to float64. It can
// be embedded in implementations whose metrics support fixed sample rates, so
// that an additional parameter doesn't have to be tracked through the e.g.
// lv.Space object.
package ratemap
import "sync"
// RateMap is a simple goroutine-safe map of string to float64.
type RateMap struct {
mtx sync.RWMutex
m map[string]float64
}
// New returns a new RateMap.
func New() *RateMap {
return &RateMap{
m: map[string]float64{},
}
}
// Set writes the given name/rate pair to the map.
// Set is safe for concurrent access by multiple goroutines.
func (m *RateMap) Set(name string, rate float64) {
m.mtx.Lock()
defer m.mtx.Unlock()
m.m[name] = rate
}
// Get retrieves the rate for the given name, or 1.0 if none is set.
// Get is safe for concurrent access by multiple goroutines.
func (m *RateMap) Get(name string) float64 {
m.mtx.RLock()
defer m.mtx.RUnlock()
f, ok := m.m[name]
if !ok {
f = 1.0
}
return f
}

79
vendor/github.com/go-kit/kit/metrics/multi/multi.go generated vendored Normal file
View file

@ -0,0 +1,79 @@
// Package multi provides adapters that send observations to multiple metrics
// simultaneously. This is useful if your service needs to emit to multiple
// instrumentation systems at the same time, for example if your organization is
// transitioning from one system to another.
package multi
import "github.com/go-kit/kit/metrics"
// Counter collects multiple individual counters and treats them as a unit.
type Counter []metrics.Counter
// NewCounter returns a multi-counter, wrapping the passed counters.
func NewCounter(c ...metrics.Counter) Counter {
return Counter(c)
}
// Add implements counter.
func (c Counter) Add(delta float64) {
for _, counter := range c {
counter.Add(delta)
}
}
// With implements counter.
func (c Counter) With(labelValues ...string) metrics.Counter {
next := make(Counter, len(c))
for i := range c {
next[i] = c[i].With(labelValues...)
}
return next
}
// Gauge collects multiple individual gauges and treats them as a unit.
type Gauge []metrics.Gauge
// NewGauge returns a multi-gauge, wrapping the passed gauges.
func NewGauge(g ...metrics.Gauge) Gauge {
return Gauge(g)
}
// Set implements Gauge.
func (g Gauge) Set(value float64) {
for _, gauge := range g {
gauge.Set(value)
}
}
// With implements gauge.
func (g Gauge) With(labelValues ...string) metrics.Gauge {
next := make(Gauge, len(g))
for i := range g {
next[i] = g[i].With(labelValues...)
}
return next
}
// Histogram collects multiple individual histograms and treats them as a unit.
type Histogram []metrics.Histogram
// NewHistogram returns a multi-histogram, wrapping the passed histograms.
func NewHistogram(h ...metrics.Histogram) Histogram {
return Histogram(h)
}
// Observe implements Histogram.
func (h Histogram) Observe(value float64) {
for _, histogram := range h {
histogram.Observe(value)
}
}
// With implements histogram.
func (h Histogram) With(labelValues ...string) metrics.Histogram {
next := make(Histogram, len(h))
for i := range h {
next[i] = h[i].With(labelValues...)
}
return next
}

232
vendor/github.com/go-kit/kit/metrics/statsd/statsd.go generated vendored Normal file
View file

@ -0,0 +1,232 @@
// Package statsd provides a StatsD backend for package metrics. StatsD has no
// concept of arbitrary key-value tagging, so label values are not supported,
// and With is a no-op on all metrics.
//
// This package batches observations and emits them on some schedule to the
// remote server. This is useful even if you connect to your StatsD server over
// UDP. Emitting one network packet per observation can quickly overwhelm even
// the fastest internal network.
package statsd
import (
"fmt"
"io"
"time"
"github.com/go-kit/kit/log"
"github.com/go-kit/kit/metrics"
"github.com/go-kit/kit/metrics/internal/lv"
"github.com/go-kit/kit/metrics/internal/ratemap"
"github.com/go-kit/kit/util/conn"
)
// Statsd receives metrics observations and forwards them to a StatsD server.
// Create a Statsd object, use it to create metrics, and pass those metrics as
// dependencies to the components that will use them.
//
// All metrics are buffered until WriteTo is called. Counters and gauges are
// aggregated into a single observation per timeseries per write. Timings are
// buffered but not aggregated.
//
// To regularly report metrics to an io.Writer, use the WriteLoop helper method.
// To send to a StatsD server, use the SendLoop helper method.
type Statsd struct {
prefix string
rates *ratemap.RateMap
// The observations are collected in an N-dimensional vector space, even
// though they only take advantage of a single dimension (name). This is an
// implementation detail born purely from convenience. It would be more
// accurate to collect them in a map[string][]float64, but we already have
// this nice data structure and helper methods.
counters *lv.Space
gauges *lv.Space
timings *lv.Space
logger log.Logger
}
// New returns a Statsd object that may be used to create metrics. Prefix is
// applied to all created metrics. Callers must ensure that regular calls to
// WriteTo are performed, either manually or with one of the helper methods.
func New(prefix string, logger log.Logger) *Statsd {
return &Statsd{
prefix: prefix,
rates: ratemap.New(),
counters: lv.NewSpace(),
gauges: lv.NewSpace(),
timings: lv.NewSpace(),
logger: logger,
}
}
// NewCounter returns a counter, sending observations to this Statsd object.
func (s *Statsd) NewCounter(name string, sampleRate float64) *Counter {
s.rates.Set(s.prefix+name, sampleRate)
return &Counter{
name: s.prefix + name,
obs: s.counters.Observe,
}
}
// NewGauge returns a gauge, sending observations to this Statsd object.
func (s *Statsd) NewGauge(name string) *Gauge {
return &Gauge{
name: s.prefix + name,
obs: s.gauges.Observe,
}
}
// NewTiming returns a histogram whose observations are interpreted as
// millisecond durations, and are forwarded to this Statsd object.
func (s *Statsd) NewTiming(name string, sampleRate float64) *Timing {
s.rates.Set(s.prefix+name, sampleRate)
return &Timing{
name: s.prefix + name,
obs: s.timings.Observe,
}
}
// WriteLoop is a helper method that invokes WriteTo to the passed writer every
// time the passed channel fires. This method blocks until the channel is
// closed, so clients probably want to run it in its own goroutine. For typical
// usage, create a time.Ticker and pass its C channel to this method.
func (s *Statsd) WriteLoop(c <-chan time.Time, w io.Writer) {
for range c {
if _, err := s.WriteTo(w); err != nil {
s.logger.Log("during", "WriteTo", "err", err)
}
}
}
// SendLoop is a helper method that wraps WriteLoop, passing a managed
// connection to the network and address. Like WriteLoop, this method blocks
// until the channel is closed, so clients probably want to start it in its own
// goroutine. For typical usage, create a time.Ticker and pass its C channel to
// this method.
func (s *Statsd) SendLoop(c <-chan time.Time, network, address string) {
s.WriteLoop(c, conn.NewDefaultManager(network, address, s.logger))
}
// WriteTo flushes the buffered content of the metrics to the writer, in
// StatsD format. WriteTo abides best-effort semantics, so observations are
// lost if there is a problem with the write. Clients should be sure to call
// WriteTo regularly, ideally through the WriteLoop or SendLoop helper methods.
func (s *Statsd) WriteTo(w io.Writer) (count int64, err error) {
var n int
s.counters.Reset().Walk(func(name string, _ lv.LabelValues, values []float64) bool {
n, err = fmt.Fprintf(w, "%s:%f|c%s\n", name, sum(values), sampling(s.rates.Get(name)))
if err != nil {
return false
}
count += int64(n)
return true
})
if err != nil {
return count, err
}
s.gauges.Reset().Walk(func(name string, _ lv.LabelValues, values []float64) bool {
n, err = fmt.Fprintf(w, "%s:%f|g\n", name, last(values))
if err != nil {
return false
}
count += int64(n)
return true
})
if err != nil {
return count, err
}
s.timings.Reset().Walk(func(name string, _ lv.LabelValues, values []float64) bool {
sampleRate := s.rates.Get(name)
for _, value := range values {
n, err = fmt.Fprintf(w, "%s:%f|ms%s\n", name, value, sampling(sampleRate))
if err != nil {
return false
}
count += int64(n)
}
return true
})
if err != nil {
return count, err
}
return count, err
}
func sum(a []float64) float64 {
var v float64
for _, f := range a {
v += f
}
return v
}
func last(a []float64) float64 {
return a[len(a)-1]
}
func sampling(r float64) string {
var sv string
if r < 1.0 {
sv = fmt.Sprintf("|@%f", r)
}
return sv
}
type observeFunc func(name string, lvs lv.LabelValues, value float64)
// Counter is a StatsD counter. Observations are forwarded to a Statsd object,
// and aggregated (summed) per timeseries.
type Counter struct {
name string
obs observeFunc
}
// With is a no-op.
func (c *Counter) With(...string) metrics.Counter {
return c
}
// Add implements metrics.Counter.
func (c *Counter) Add(delta float64) {
c.obs(c.name, lv.LabelValues{}, delta)
}
// Gauge is a StatsD gauge. Observations are forwarded to a Statsd object, and
// aggregated (the last observation selected) per timeseries.
type Gauge struct {
name string
obs observeFunc
}
// With is a no-op.
func (g *Gauge) With(...string) metrics.Gauge {
return g
}
// Set implements metrics.Gauge.
func (g *Gauge) Set(value float64) {
g.obs(g.name, lv.LabelValues{}, value)
}
// Timing is a StatsD timing, or metrics.Histogram. Observations are
// forwarded to a Statsd object, and collected (but not aggregated) per
// timeseries.
type Timing struct {
name string
obs observeFunc
}
// With is a no-op.
func (t *Timing) With(...string) metrics.Histogram {
return t
}
// Observe implements metrics.Histogram. Value is interpreted as milliseconds.
func (t *Timing) Observe(value float64) {
t.obs(t.name, lv.LabelValues{}, value)
}

2
vendor/github.com/go-kit/kit/util/conn/doc.go generated vendored Normal file
View file

@ -0,0 +1,2 @@
// Package conn provides utilities related to connections.
package conn

145
vendor/github.com/go-kit/kit/util/conn/manager.go generated vendored Normal file
View file

@ -0,0 +1,145 @@
package conn
import (
"errors"
"net"
"time"
"github.com/go-kit/kit/log"
)
// Dialer imitates net.Dial. Dialer is assumed to yield connections that are
// safe for use by multiple concurrent goroutines.
type Dialer func(network, address string) (net.Conn, error)
// AfterFunc imitates time.After.
type AfterFunc func(time.Duration) <-chan time.Time
// Manager manages a net.Conn.
//
// Clients provide a way to create the connection with a Dialer, network, and
// address. Clients should Take the connection when they want to use it, and Put
// back whatever error they receive from its use. When a non-nil error is Put,
// the connection is invalidated, and a new connection is established.
// Connection failures are retried after an exponential backoff.
type Manager struct {
dialer Dialer
network string
address string
after AfterFunc
logger log.Logger
takec chan net.Conn
putc chan error
}
// NewManager returns a connection manager using the passed Dialer, network, and
// address. The AfterFunc is used to control exponential backoff and retries.
// The logger is used to log errors; pass a log.NopLogger if you don't care to
// receive them. For normal use, prefer NewDefaultManager.
func NewManager(d Dialer, network, address string, after AfterFunc, logger log.Logger) *Manager {
m := &Manager{
dialer: d,
network: network,
address: address,
after: after,
logger: logger,
takec: make(chan net.Conn),
putc: make(chan error),
}
go m.loop()
return m
}
// NewDefaultManager is a helper constructor, suitable for most normal use in
// real (non-test) code. It uses the real net.Dial and time.After functions.
func NewDefaultManager(network, address string, logger log.Logger) *Manager {
return NewManager(net.Dial, network, address, time.After, logger)
}
// Take yields the current connection. It may be nil.
func (m *Manager) Take() net.Conn {
return <-m.takec
}
// Put accepts an error that came from a previously yielded connection. If the
// error is non-nil, the manager will invalidate the current connection and try
// to reconnect, with exponential backoff. Putting a nil error is a no-op.
func (m *Manager) Put(err error) {
m.putc <- err
}
// Write writes the passed data to the connection in a single Take/Put cycle.
func (m *Manager) Write(b []byte) (int, error) {
conn := m.Take()
if conn == nil {
return 0, ErrConnectionUnavailable
}
n, err := conn.Write(b)
defer m.Put(err)
return n, err
}
func (m *Manager) loop() {
var (
conn = dial(m.dialer, m.network, m.address, m.logger) // may block slightly
connc = make(chan net.Conn, 1)
reconnectc <-chan time.Time // initially nil
backoff = time.Second
)
// If the initial dial fails, we need to trigger a reconnect via the loop
// body, below. If we did this in a goroutine, we would race on the conn
// variable. So we use a buffered chan instead.
connc <- conn
for {
select {
case <-reconnectc:
reconnectc = nil // one-shot
go func() { connc <- dial(m.dialer, m.network, m.address, m.logger) }()
case conn = <-connc:
if conn == nil {
// didn't work
backoff = exponential(backoff) // wait longer
reconnectc = m.after(backoff) // try again
} else {
// worked!
backoff = time.Second // reset wait time
reconnectc = nil // no retry necessary
}
case m.takec <- conn:
case err := <-m.putc:
if err != nil && conn != nil {
m.logger.Log("err", err)
conn = nil // connection is bad
reconnectc = m.after(time.Nanosecond) // trigger immediately
}
}
}
}
func dial(d Dialer, network, address string, logger log.Logger) net.Conn {
conn, err := d(network, address)
if err != nil {
logger.Log("err", err)
conn = nil // just to be sure
}
return conn
}
func exponential(d time.Duration) time.Duration {
d *= 2
if d > time.Minute {
d = time.Minute
}
return d
}
// ErrConnectionUnavailable is returned by the Manager's Write method when the
// manager cannot yield a good connection.
var ErrConnectionUnavailable = errors.New("connection unavailable")

22
vendor/github.com/go-logfmt/logfmt/LICENSE generated vendored Normal file
View file

@ -0,0 +1,22 @@
The MIT License (MIT)
Copyright (c) 2015 go-logfmt
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.

237
vendor/github.com/go-logfmt/logfmt/decode.go generated vendored Normal file
View file

@ -0,0 +1,237 @@
package logfmt
import (
"bufio"
"bytes"
"fmt"
"io"
"unicode/utf8"
)
// A Decoder reads and decodes logfmt records from an input stream.
type Decoder struct {
pos int
key []byte
value []byte
lineNum int
s *bufio.Scanner
err error
}
// NewDecoder returns a new decoder that reads from r.
//
// The decoder introduces its own buffering and may read data from r beyond
// the logfmt records requested.
func NewDecoder(r io.Reader) *Decoder {
dec := &Decoder{
s: bufio.NewScanner(r),
}
return dec
}
// ScanRecord advances the Decoder to the next record, which can then be
// parsed with the ScanKeyval method. It returns false when decoding stops,
// either by reaching the end of the input or an error. After ScanRecord
// returns false, the Err method will return any error that occurred during
// decoding, except that if it was io.EOF, Err will return nil.
func (dec *Decoder) ScanRecord() bool {
if dec.err != nil {
return false
}
if !dec.s.Scan() {
dec.err = dec.s.Err()
return false
}
dec.lineNum++
dec.pos = 0
return true
}
// ScanKeyval advances the Decoder to the next key/value pair of the current
// record, which can then be retrieved with the Key and Value methods. It
// returns false when decoding stops, either by reaching the end of the
// current record or an error.
func (dec *Decoder) ScanKeyval() bool {
dec.key, dec.value = nil, nil
if dec.err != nil {
return false
}
line := dec.s.Bytes()
// garbage
for p, c := range line[dec.pos:] {
if c > ' ' {
dec.pos += p
goto key
}
}
dec.pos = len(line)
return false
key:
const invalidKeyError = "invalid key"
start, multibyte := dec.pos, false
for p, c := range line[dec.pos:] {
switch {
case c == '=':
dec.pos += p
if dec.pos > start {
dec.key = line[start:dec.pos]
if multibyte && bytes.IndexRune(dec.key, utf8.RuneError) != -1 {
dec.syntaxError(invalidKeyError)
return false
}
}
if dec.key == nil {
dec.unexpectedByte(c)
return false
}
goto equal
case c == '"':
dec.pos += p
dec.unexpectedByte(c)
return false
case c <= ' ':
dec.pos += p
if dec.pos > start {
dec.key = line[start:dec.pos]
if multibyte && bytes.IndexRune(dec.key, utf8.RuneError) != -1 {
dec.syntaxError(invalidKeyError)
return false
}
}
return true
case c >= utf8.RuneSelf:
multibyte = true
}
}
dec.pos = len(line)
if dec.pos > start {
dec.key = line[start:dec.pos]
if multibyte && bytes.IndexRune(dec.key, utf8.RuneError) != -1 {
dec.syntaxError(invalidKeyError)
return false
}
}
return true
equal:
dec.pos++
if dec.pos >= len(line) {
return true
}
switch c := line[dec.pos]; {
case c <= ' ':
return true
case c == '"':
goto qvalue
}
// value
start = dec.pos
for p, c := range line[dec.pos:] {
switch {
case c == '=' || c == '"':
dec.pos += p
dec.unexpectedByte(c)
return false
case c <= ' ':
dec.pos += p
if dec.pos > start {
dec.value = line[start:dec.pos]
}
return true
}
}
dec.pos = len(line)
if dec.pos > start {
dec.value = line[start:dec.pos]
}
return true
qvalue:
const (
untermQuote = "unterminated quoted value"
invalidQuote = "invalid quoted value"
)
hasEsc, esc := false, false
start = dec.pos
for p, c := range line[dec.pos+1:] {
switch {
case esc:
esc = false
case c == '\\':
hasEsc, esc = true, true
case c == '"':
dec.pos += p + 2
if hasEsc {
v, ok := unquoteBytes(line[start:dec.pos])
if !ok {
dec.syntaxError(invalidQuote)
return false
}
dec.value = v
} else {
start++
end := dec.pos - 1
if end > start {
dec.value = line[start:end]
}
}
return true
}
}
dec.pos = len(line)
dec.syntaxError(untermQuote)
return false
}
// Key returns the most recent key found by a call to ScanKeyval. The returned
// slice may point to internal buffers and is only valid until the next call
// to ScanRecord. It does no allocation.
func (dec *Decoder) Key() []byte {
return dec.key
}
// Value returns the most recent value found by a call to ScanKeyval. The
// returned slice may point to internal buffers and is only valid until the
// next call to ScanRecord. It does no allocation when the value has no
// escape sequences.
func (dec *Decoder) Value() []byte {
return dec.value
}
// Err returns the first non-EOF error that was encountered by the Scanner.
func (dec *Decoder) Err() error {
return dec.err
}
func (dec *Decoder) syntaxError(msg string) {
dec.err = &SyntaxError{
Msg: msg,
Line: dec.lineNum,
Pos: dec.pos + 1,
}
}
func (dec *Decoder) unexpectedByte(c byte) {
dec.err = &SyntaxError{
Msg: fmt.Sprintf("unexpected %q", c),
Line: dec.lineNum,
Pos: dec.pos + 1,
}
}
// A SyntaxError represents a syntax error in the logfmt input stream.
type SyntaxError struct {
Msg string
Line int
Pos int
}
func (e *SyntaxError) Error() string {
return fmt.Sprintf("logfmt syntax error at pos %d on line %d: %s", e.Pos, e.Line, e.Msg)
}

6
vendor/github.com/go-logfmt/logfmt/doc.go generated vendored Normal file
View file

@ -0,0 +1,6 @@
// Package logfmt implements utilities to marshal and unmarshal data in the
// logfmt format. The logfmt format records key/value pairs in a way that
// balances readability for humans and simplicity of computer parsing. It is
// most commonly used as a more human friendly alternative to JSON for
// structured logging.
package logfmt

321
vendor/github.com/go-logfmt/logfmt/encode.go generated vendored Normal file
View file

@ -0,0 +1,321 @@
package logfmt
import (
"bytes"
"encoding"
"errors"
"fmt"
"io"
"reflect"
"strings"
"unicode/utf8"
)
// MarshalKeyvals returns the logfmt encoding of keyvals, a variadic sequence
// of alternating keys and values.
func MarshalKeyvals(keyvals ...interface{}) ([]byte, error) {
buf := &bytes.Buffer{}
if err := NewEncoder(buf).EncodeKeyvals(keyvals...); err != nil {
return nil, err
}
return buf.Bytes(), nil
}
// An Encoder writes logfmt data to an output stream.
type Encoder struct {
w io.Writer
scratch bytes.Buffer
needSep bool
}
// NewEncoder returns a new encoder that writes to w.
func NewEncoder(w io.Writer) *Encoder {
return &Encoder{
w: w,
}
}
var (
space = []byte(" ")
equals = []byte("=")
newline = []byte("\n")
null = []byte("null")
)
// EncodeKeyval writes the logfmt encoding of key and value to the stream. A
// single space is written before the second and subsequent keys in a record.
// Nothing is written if a non-nil error is returned.
func (enc *Encoder) EncodeKeyval(key, value interface{}) error {
enc.scratch.Reset()
if enc.needSep {
if _, err := enc.scratch.Write(space); err != nil {
return err
}
}
if err := writeKey(&enc.scratch, key); err != nil {
return err
}
if _, err := enc.scratch.Write(equals); err != nil {
return err
}
if err := writeValue(&enc.scratch, value); err != nil {
return err
}
_, err := enc.w.Write(enc.scratch.Bytes())
enc.needSep = true
return err
}
// EncodeKeyvals writes the logfmt encoding of keyvals to the stream. Keyvals
// is a variadic sequence of alternating keys and values. Keys of unsupported
// type are skipped along with their corresponding value. Values of
// unsupported type or that cause a MarshalerError are replaced by their error
// but do not cause EncodeKeyvals to return an error. If a non-nil error is
// returned some key/value pairs may not have be written.
func (enc *Encoder) EncodeKeyvals(keyvals ...interface{}) error {
if len(keyvals) == 0 {
return nil
}
if len(keyvals)%2 == 1 {
keyvals = append(keyvals, nil)
}
for i := 0; i < len(keyvals); i += 2 {
k, v := keyvals[i], keyvals[i+1]
err := enc.EncodeKeyval(k, v)
if err == ErrUnsupportedKeyType {
continue
}
if _, ok := err.(*MarshalerError); ok || err == ErrUnsupportedValueType {
v = err
err = enc.EncodeKeyval(k, v)
}
if err != nil {
return err
}
}
return nil
}
// MarshalerError represents an error encountered while marshaling a value.
type MarshalerError struct {
Type reflect.Type
Err error
}
func (e *MarshalerError) Error() string {
return "error marshaling value of type " + e.Type.String() + ": " + e.Err.Error()
}
// ErrNilKey is returned by Marshal functions and Encoder methods if a key is
// a nil interface or pointer value.
var ErrNilKey = errors.New("nil key")
// ErrInvalidKey is returned by Marshal functions and Encoder methods if a key
// contains an invalid character.
var ErrInvalidKey = errors.New("invalid key")
// ErrUnsupportedKeyType is returned by Encoder methods if a key has an
// unsupported type.
var ErrUnsupportedKeyType = errors.New("unsupported key type")
// ErrUnsupportedValueType is returned by Encoder methods if a value has an
// unsupported type.
var ErrUnsupportedValueType = errors.New("unsupported value type")
func writeKey(w io.Writer, key interface{}) error {
if key == nil {
return ErrNilKey
}
switch k := key.(type) {
case string:
return writeStringKey(w, k)
case []byte:
if k == nil {
return ErrNilKey
}
return writeBytesKey(w, k)
case encoding.TextMarshaler:
kb, err := safeMarshal(k)
if err != nil {
return err
}
if kb == nil {
return ErrNilKey
}
return writeBytesKey(w, kb)
case fmt.Stringer:
ks, ok := safeString(k)
if !ok {
return ErrNilKey
}
return writeStringKey(w, ks)
default:
rkey := reflect.ValueOf(key)
switch rkey.Kind() {
case reflect.Array, reflect.Chan, reflect.Func, reflect.Map, reflect.Slice, reflect.Struct:
return ErrUnsupportedKeyType
case reflect.Ptr:
if rkey.IsNil() {
return ErrNilKey
}
return writeKey(w, rkey.Elem().Interface())
}
return writeStringKey(w, fmt.Sprint(k))
}
}
func invalidKeyRune(r rune) bool {
return r <= ' ' || r == '=' || r == '"' || r == utf8.RuneError
}
func invalidKeyString(key string) bool {
return len(key) == 0 || strings.IndexFunc(key, invalidKeyRune) != -1
}
func invalidKey(key []byte) bool {
return len(key) == 0 || bytes.IndexFunc(key, invalidKeyRune) != -1
}
func writeStringKey(w io.Writer, key string) error {
if invalidKeyString(key) {
return ErrInvalidKey
}
_, err := io.WriteString(w, key)
return err
}
func writeBytesKey(w io.Writer, key []byte) error {
if invalidKey(key) {
return ErrInvalidKey
}
_, err := w.Write(key)
return err
}
func writeValue(w io.Writer, value interface{}) error {
switch v := value.(type) {
case nil:
return writeBytesValue(w, null)
case string:
return writeStringValue(w, v, true)
case []byte:
return writeBytesValue(w, v)
case encoding.TextMarshaler:
vb, err := safeMarshal(v)
if err != nil {
return err
}
if vb == nil {
vb = null
}
return writeBytesValue(w, vb)
case error:
se, ok := safeError(v)
return writeStringValue(w, se, ok)
case fmt.Stringer:
ss, ok := safeString(v)
return writeStringValue(w, ss, ok)
default:
rvalue := reflect.ValueOf(value)
switch rvalue.Kind() {
case reflect.Array, reflect.Chan, reflect.Func, reflect.Map, reflect.Slice, reflect.Struct:
return ErrUnsupportedValueType
case reflect.Ptr:
if rvalue.IsNil() {
return writeBytesValue(w, null)
}
return writeValue(w, rvalue.Elem().Interface())
}
return writeStringValue(w, fmt.Sprint(v), true)
}
}
func needsQuotedValueRune(r rune) bool {
return r <= ' ' || r == '=' || r == '"' || r == utf8.RuneError
}
func writeStringValue(w io.Writer, value string, ok bool) error {
var err error
if ok && value == "null" {
_, err = io.WriteString(w, `"null"`)
} else if strings.IndexFunc(value, needsQuotedValueRune) != -1 {
_, err = writeQuotedString(w, value)
} else {
_, err = io.WriteString(w, value)
}
return err
}
func writeBytesValue(w io.Writer, value []byte) error {
var err error
if bytes.IndexFunc(value, needsQuotedValueRune) != -1 {
_, err = writeQuotedBytes(w, value)
} else {
_, err = w.Write(value)
}
return err
}
// EndRecord writes a newline character to the stream and resets the encoder
// to the beginning of a new record.
func (enc *Encoder) EndRecord() error {
_, err := enc.w.Write(newline)
if err == nil {
enc.needSep = false
}
return err
}
// Reset resets the encoder to the beginning of a new record.
func (enc *Encoder) Reset() {
enc.needSep = false
}
func safeError(err error) (s string, ok bool) {
defer func() {
if panicVal := recover(); panicVal != nil {
if v := reflect.ValueOf(err); v.Kind() == reflect.Ptr && v.IsNil() {
s, ok = "null", false
} else {
panic(panicVal)
}
}
}()
s, ok = err.Error(), true
return
}
func safeString(str fmt.Stringer) (s string, ok bool) {
defer func() {
if panicVal := recover(); panicVal != nil {
if v := reflect.ValueOf(str); v.Kind() == reflect.Ptr && v.IsNil() {
s, ok = "null", false
} else {
panic(panicVal)
}
}
}()
s, ok = str.String(), true
return
}
func safeMarshal(tm encoding.TextMarshaler) (b []byte, err error) {
defer func() {
if panicVal := recover(); panicVal != nil {
if v := reflect.ValueOf(tm); v.Kind() == reflect.Ptr && v.IsNil() {
b, err = nil, nil
} else {
panic(panicVal)
}
}
}()
b, err = tm.MarshalText()
if err != nil {
return nil, &MarshalerError{
Type: reflect.TypeOf(tm),
Err: err,
}
}
return
}

126
vendor/github.com/go-logfmt/logfmt/fuzz.go generated vendored Normal file
View file

@ -0,0 +1,126 @@
// +build gofuzz
package logfmt
import (
"bufio"
"bytes"
"fmt"
"io"
"reflect"
kr "github.com/kr/logfmt"
)
// Fuzz checks reserialized data matches
func Fuzz(data []byte) int {
parsed, err := parse(data)
if err != nil {
return 0
}
var w1 bytes.Buffer
if err = write(parsed, &w1); err != nil {
panic(err)
}
parsed, err = parse(w1.Bytes())
if err != nil {
panic(err)
}
var w2 bytes.Buffer
if err = write(parsed, &w2); err != nil {
panic(err)
}
if !bytes.Equal(w1.Bytes(), w2.Bytes()) {
panic(fmt.Sprintf("reserialized data does not match:\n%q\n%q\n", w1.Bytes(), w2.Bytes()))
}
return 1
}
// FuzzVsKR checks go-logfmt/logfmt against kr/logfmt
func FuzzVsKR(data []byte) int {
parsed, err := parse(data)
parsedKR, errKR := parseKR(data)
// github.com/go-logfmt/logfmt is a stricter parser. It returns errors for
// more inputs than github.com/kr/logfmt. Ignore any inputs that have a
// stict error.
if err != nil {
return 0
}
// Fail if the more forgiving parser finds an error not found by the
// stricter parser.
if errKR != nil {
panic(fmt.Sprintf("unmatched error: %v", errKR))
}
if !reflect.DeepEqual(parsed, parsedKR) {
panic(fmt.Sprintf("parsers disagree:\n%+v\n%+v\n", parsed, parsedKR))
}
return 1
}
type kv struct {
k, v []byte
}
func parse(data []byte) ([][]kv, error) {
var got [][]kv
dec := NewDecoder(bytes.NewReader(data))
for dec.ScanRecord() {
var kvs []kv
for dec.ScanKeyval() {
kvs = append(kvs, kv{dec.Key(), dec.Value()})
}
got = append(got, kvs)
}
return got, dec.Err()
}
func parseKR(data []byte) ([][]kv, error) {
var (
s = bufio.NewScanner(bytes.NewReader(data))
err error
h saveHandler
got [][]kv
)
for err == nil && s.Scan() {
h.kvs = nil
err = kr.Unmarshal(s.Bytes(), &h)
got = append(got, h.kvs)
}
if err == nil {
err = s.Err()
}
return got, err
}
type saveHandler struct {
kvs []kv
}
func (h *saveHandler) HandleLogfmt(key, val []byte) error {
if len(key) == 0 {
key = nil
}
if len(val) == 0 {
val = nil
}
h.kvs = append(h.kvs, kv{key, val})
return nil
}
func write(recs [][]kv, w io.Writer) error {
enc := NewEncoder(w)
for _, rec := range recs {
for _, f := range rec {
if err := enc.EncodeKeyval(f.k, f.v); err != nil {
return err
}
}
if err := enc.EndRecord(); err != nil {
return err
}
}
return nil
}

277
vendor/github.com/go-logfmt/logfmt/jsonstring.go generated vendored Normal file
View file

@ -0,0 +1,277 @@
package logfmt
import (
"bytes"
"io"
"strconv"
"sync"
"unicode"
"unicode/utf16"
"unicode/utf8"
)
// Taken from Go's encoding/json and modified for use here.
// Copyright 2010 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
var hex = "0123456789abcdef"
var bufferPool = sync.Pool{
New: func() interface{} {
return &bytes.Buffer{}
},
}
func getBuffer() *bytes.Buffer {
return bufferPool.Get().(*bytes.Buffer)
}
func poolBuffer(buf *bytes.Buffer) {
buf.Reset()
bufferPool.Put(buf)
}
// NOTE: keep in sync with writeQuotedBytes below.
func writeQuotedString(w io.Writer, s string) (int, error) {
buf := getBuffer()
buf.WriteByte('"')
start := 0
for i := 0; i < len(s); {
if b := s[i]; b < utf8.RuneSelf {
if 0x20 <= b && b != '\\' && b != '"' {
i++
continue
}
if start < i {
buf.WriteString(s[start:i])
}
switch b {
case '\\', '"':
buf.WriteByte('\\')
buf.WriteByte(b)
case '\n':
buf.WriteByte('\\')
buf.WriteByte('n')
case '\r':
buf.WriteByte('\\')
buf.WriteByte('r')
case '\t':
buf.WriteByte('\\')
buf.WriteByte('t')
default:
// This encodes bytes < 0x20 except for \n, \r, and \t.
buf.WriteString(`\u00`)
buf.WriteByte(hex[b>>4])
buf.WriteByte(hex[b&0xF])
}
i++
start = i
continue
}
c, size := utf8.DecodeRuneInString(s[i:])
if c == utf8.RuneError {
if start < i {
buf.WriteString(s[start:i])
}
buf.WriteString(`\ufffd`)
i += size
start = i
continue
}
i += size
}
if start < len(s) {
buf.WriteString(s[start:])
}
buf.WriteByte('"')
n, err := w.Write(buf.Bytes())
poolBuffer(buf)
return n, err
}
// NOTE: keep in sync with writeQuoteString above.
func writeQuotedBytes(w io.Writer, s []byte) (int, error) {
buf := getBuffer()
buf.WriteByte('"')
start := 0
for i := 0; i < len(s); {
if b := s[i]; b < utf8.RuneSelf {
if 0x20 <= b && b != '\\' && b != '"' {
i++
continue
}
if start < i {
buf.Write(s[start:i])
}
switch b {
case '\\', '"':
buf.WriteByte('\\')
buf.WriteByte(b)
case '\n':
buf.WriteByte('\\')
buf.WriteByte('n')
case '\r':
buf.WriteByte('\\')
buf.WriteByte('r')
case '\t':
buf.WriteByte('\\')
buf.WriteByte('t')
default:
// This encodes bytes < 0x20 except for \n, \r, and \t.
buf.WriteString(`\u00`)
buf.WriteByte(hex[b>>4])
buf.WriteByte(hex[b&0xF])
}
i++
start = i
continue
}
c, size := utf8.DecodeRune(s[i:])
if c == utf8.RuneError {
if start < i {
buf.Write(s[start:i])
}
buf.WriteString(`\ufffd`)
i += size
start = i
continue
}
i += size
}
if start < len(s) {
buf.Write(s[start:])
}
buf.WriteByte('"')
n, err := w.Write(buf.Bytes())
poolBuffer(buf)
return n, err
}
// getu4 decodes \uXXXX from the beginning of s, returning the hex value,
// or it returns -1.
func getu4(s []byte) rune {
if len(s) < 6 || s[0] != '\\' || s[1] != 'u' {
return -1
}
r, err := strconv.ParseUint(string(s[2:6]), 16, 64)
if err != nil {
return -1
}
return rune(r)
}
func unquoteBytes(s []byte) (t []byte, ok bool) {
if len(s) < 2 || s[0] != '"' || s[len(s)-1] != '"' {
return
}
s = s[1 : len(s)-1]
// Check for unusual characters. If there are none,
// then no unquoting is needed, so return a slice of the
// original bytes.
r := 0
for r < len(s) {
c := s[r]
if c == '\\' || c == '"' || c < ' ' {
break
}
if c < utf8.RuneSelf {
r++
continue
}
rr, size := utf8.DecodeRune(s[r:])
if rr == utf8.RuneError {
break
}
r += size
}
if r == len(s) {
return s, true
}
b := make([]byte, len(s)+2*utf8.UTFMax)
w := copy(b, s[0:r])
for r < len(s) {
// Out of room? Can only happen if s is full of
// malformed UTF-8 and we're replacing each
// byte with RuneError.
if w >= len(b)-2*utf8.UTFMax {
nb := make([]byte, (len(b)+utf8.UTFMax)*2)
copy(nb, b[0:w])
b = nb
}
switch c := s[r]; {
case c == '\\':
r++
if r >= len(s) {
return
}
switch s[r] {
default:
return
case '"', '\\', '/', '\'':
b[w] = s[r]
r++
w++
case 'b':
b[w] = '\b'
r++
w++
case 'f':
b[w] = '\f'
r++
w++
case 'n':
b[w] = '\n'
r++
w++
case 'r':
b[w] = '\r'
r++
w++
case 't':
b[w] = '\t'
r++
w++
case 'u':
r--
rr := getu4(s[r:])
if rr < 0 {
return
}
r += 6
if utf16.IsSurrogate(rr) {
rr1 := getu4(s[r:])
if dec := utf16.DecodeRune(rr, rr1); dec != unicode.ReplacementChar {
// A valid pair; consume.
r += 6
w += utf8.EncodeRune(b[w:], dec)
break
}
// Invalid surrogate; fall back to replacement rune.
rr = unicode.ReplacementChar
}
w += utf8.EncodeRune(b[w:], rr)
}
// Quote, control characters are invalid.
case c == '"', c < ' ':
return
// ASCII
case c < utf8.RuneSelf:
b[w] = c
r++
w++
// Coerce to well-formed UTF-8.
default:
rr, size := utf8.DecodeRune(s[r:])
r += size
w += utf8.EncodeRune(b[w:], rr)
}
}
return b[0:w], true
}

21
vendor/github.com/go-stack/stack/LICENSE.md generated vendored Normal file
View file

@ -0,0 +1,21 @@
The MIT License (MIT)
Copyright (c) 2014 Chris Hines
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.

322
vendor/github.com/go-stack/stack/stack.go generated vendored Normal file
View file

@ -0,0 +1,322 @@
// Package stack implements utilities to capture, manipulate, and format call
// stacks. It provides a simpler API than package runtime.
//
// The implementation takes care of the minutia and special cases of
// interpreting the program counter (pc) values returned by runtime.Callers.
//
// Package stack's types implement fmt.Formatter, which provides a simple and
// flexible way to declaratively configure formatting when used with logging
// or error tracking packages.
package stack
import (
"bytes"
"errors"
"fmt"
"io"
"runtime"
"strconv"
"strings"
)
// Call records a single function invocation from a goroutine stack.
type Call struct {
fn *runtime.Func
pc uintptr
}
// Caller returns a Call from the stack of the current goroutine. The argument
// skip is the number of stack frames to ascend, with 0 identifying the
// calling function.
func Caller(skip int) Call {
var pcs [2]uintptr
n := runtime.Callers(skip+1, pcs[:])
var c Call
if n < 2 {
return c
}
c.pc = pcs[1]
if runtime.FuncForPC(pcs[0]).Name() != "runtime.sigpanic" {
c.pc--
}
c.fn = runtime.FuncForPC(c.pc)
return c
}
// String implements fmt.Stinger. It is equivalent to fmt.Sprintf("%v", c).
func (c Call) String() string {
return fmt.Sprint(c)
}
// MarshalText implements encoding.TextMarshaler. It formats the Call the same
// as fmt.Sprintf("%v", c).
func (c Call) MarshalText() ([]byte, error) {
if c.fn == nil {
return nil, ErrNoFunc
}
buf := bytes.Buffer{}
fmt.Fprint(&buf, c)
return buf.Bytes(), nil
}
// ErrNoFunc means that the Call has a nil *runtime.Func. The most likely
// cause is a Call with the zero value.
var ErrNoFunc = errors.New("no call stack information")
// Format implements fmt.Formatter with support for the following verbs.
//
// %s source file
// %d line number
// %n function name
// %v equivalent to %s:%d
//
// It accepts the '+' and '#' flags for most of the verbs as follows.
//
// %+s path of source file relative to the compile time GOPATH
// %#s full path of source file
// %+n import path qualified function name
// %+v equivalent to %+s:%d
// %#v equivalent to %#s:%d
func (c Call) Format(s fmt.State, verb rune) {
if c.fn == nil {
fmt.Fprintf(s, "%%!%c(NOFUNC)", verb)
return
}
switch verb {
case 's', 'v':
file, line := c.fn.FileLine(c.pc)
switch {
case s.Flag('#'):
// done
case s.Flag('+'):
file = file[pkgIndex(file, c.fn.Name()):]
default:
const sep = "/"
if i := strings.LastIndex(file, sep); i != -1 {
file = file[i+len(sep):]
}
}
io.WriteString(s, file)
if verb == 'v' {
buf := [7]byte{':'}
s.Write(strconv.AppendInt(buf[:1], int64(line), 10))
}
case 'd':
_, line := c.fn.FileLine(c.pc)
buf := [6]byte{}
s.Write(strconv.AppendInt(buf[:0], int64(line), 10))
case 'n':
name := c.fn.Name()
if !s.Flag('+') {
const pathSep = "/"
if i := strings.LastIndex(name, pathSep); i != -1 {
name = name[i+len(pathSep):]
}
const pkgSep = "."
if i := strings.Index(name, pkgSep); i != -1 {
name = name[i+len(pkgSep):]
}
}
io.WriteString(s, name)
}
}
// PC returns the program counter for this call frame; multiple frames may
// have the same PC value.
func (c Call) PC() uintptr {
return c.pc
}
// name returns the import path qualified name of the function containing the
// call.
func (c Call) name() string {
if c.fn == nil {
return "???"
}
return c.fn.Name()
}
func (c Call) file() string {
if c.fn == nil {
return "???"
}
file, _ := c.fn.FileLine(c.pc)
return file
}
func (c Call) line() int {
if c.fn == nil {
return 0
}
_, line := c.fn.FileLine(c.pc)
return line
}
// CallStack records a sequence of function invocations from a goroutine
// stack.
type CallStack []Call
// String implements fmt.Stinger. It is equivalent to fmt.Sprintf("%v", cs).
func (cs CallStack) String() string {
return fmt.Sprint(cs)
}
var (
openBracketBytes = []byte("[")
closeBracketBytes = []byte("]")
spaceBytes = []byte(" ")
)
// MarshalText implements encoding.TextMarshaler. It formats the CallStack the
// same as fmt.Sprintf("%v", cs).
func (cs CallStack) MarshalText() ([]byte, error) {
buf := bytes.Buffer{}
buf.Write(openBracketBytes)
for i, pc := range cs {
if pc.fn == nil {
return nil, ErrNoFunc
}
if i > 0 {
buf.Write(spaceBytes)
}
fmt.Fprint(&buf, pc)
}
buf.Write(closeBracketBytes)
return buf.Bytes(), nil
}
// Format implements fmt.Formatter by printing the CallStack as square brackets
// ([, ]) surrounding a space separated list of Calls each formatted with the
// supplied verb and options.
func (cs CallStack) Format(s fmt.State, verb rune) {
s.Write(openBracketBytes)
for i, pc := range cs {
if i > 0 {
s.Write(spaceBytes)
}
pc.Format(s, verb)
}
s.Write(closeBracketBytes)
}
// Trace returns a CallStack for the current goroutine with element 0
// identifying the calling function.
func Trace() CallStack {
var pcs [512]uintptr
n := runtime.Callers(2, pcs[:])
cs := make([]Call, n)
for i, pc := range pcs[:n] {
pcFix := pc
if i > 0 && cs[i-1].fn.Name() != "runtime.sigpanic" {
pcFix--
}
cs[i] = Call{
fn: runtime.FuncForPC(pcFix),
pc: pcFix,
}
}
return cs
}
// TrimBelow returns a slice of the CallStack with all entries below c
// removed.
func (cs CallStack) TrimBelow(c Call) CallStack {
for len(cs) > 0 && cs[0].pc != c.pc {
cs = cs[1:]
}
return cs
}
// TrimAbove returns a slice of the CallStack with all entries above c
// removed.
func (cs CallStack) TrimAbove(c Call) CallStack {
for len(cs) > 0 && cs[len(cs)-1].pc != c.pc {
cs = cs[:len(cs)-1]
}
return cs
}
// pkgIndex returns the index that results in file[index:] being the path of
// file relative to the compile time GOPATH, and file[:index] being the
// $GOPATH/src/ portion of file. funcName must be the name of a function in
// file as returned by runtime.Func.Name.
func pkgIndex(file, funcName string) int {
// As of Go 1.6.2 there is no direct way to know the compile time GOPATH
// at runtime, but we can infer the number of path segments in the GOPATH.
// We note that runtime.Func.Name() returns the function name qualified by
// the import path, which does not include the GOPATH. Thus we can trim
// segments from the beginning of the file path until the number of path
// separators remaining is one more than the number of path separators in
// the function name. For example, given:
//
// GOPATH /home/user
// file /home/user/src/pkg/sub/file.go
// fn.Name() pkg/sub.Type.Method
//
// We want to produce:
//
// file[:idx] == /home/user/src/
// file[idx:] == pkg/sub/file.go
//
// From this we can easily see that fn.Name() has one less path separator
// than our desired result for file[idx:]. We count separators from the
// end of the file path until it finds two more than in the function name
// and then move one character forward to preserve the initial path
// segment without a leading separator.
const sep = "/"
i := len(file)
for n := strings.Count(funcName, sep) + 2; n > 0; n-- {
i = strings.LastIndex(file[:i], sep)
if i == -1 {
i = -len(sep)
break
}
}
// get back to 0 or trim the leading separator
return i + len(sep)
}
var runtimePath string
func init() {
var pcs [1]uintptr
runtime.Callers(0, pcs[:])
fn := runtime.FuncForPC(pcs[0])
file, _ := fn.FileLine(pcs[0])
idx := pkgIndex(file, fn.Name())
runtimePath = file[:idx]
if runtime.GOOS == "windows" {
runtimePath = strings.ToLower(runtimePath)
}
}
func inGoroot(c Call) bool {
file := c.file()
if len(file) == 0 || file[0] == '?' {
return true
}
if runtime.GOOS == "windows" {
file = strings.ToLower(file)
}
return strings.HasPrefix(file, runtimePath) || strings.HasSuffix(file, "/_testmain.go")
}
// TrimRuntime returns a slice of the CallStack with the topmost entries from
// the go runtime removed. It considers any calls originating from unknown
// files, files under GOROOT, or _testmain.go as part of the runtime.
func (cs CallStack) TrimRuntime() CallStack {
for len(cs) > 0 && inGoroot(cs[len(cs)-1]) {
cs = cs[:len(cs)-1]
}
return cs
}

184
vendor/github.com/kr/logfmt/decode.go generated vendored Normal file
View file

@ -0,0 +1,184 @@
// Package implements the decoding of logfmt key-value pairs.
//
// Example logfmt message:
//
// foo=bar a=14 baz="hello kitty" cool%story=bro f %^asdf
//
// Example result in JSON:
//
// { "foo": "bar", "a": 14, "baz": "hello kitty", "cool%story": "bro", "f": true, "%^asdf": true }
//
// EBNFish:
//
// ident_byte = any byte greater than ' ', excluding '=' and '"'
// string_byte = any byte excluding '"' and '\'
// garbage = !ident_byte
// ident = ident_byte, { ident byte }
// key = ident
// value = ident | '"', { string_byte | '\', '"' }, '"'
// pair = key, '=', value | key, '=' | key
// message = { garbage, pair }, garbage
package logfmt
import (
"reflect"
"strconv"
"strings"
"time"
)
// Handler is the interface implemented by objects that accept logfmt
// key-value pairs. HandleLogfmt must copy the logfmt data if it
// wishes to retain the data after returning.
type Handler interface {
HandleLogfmt(key, val []byte) error
}
// The HandlerFunc type is an adapter to allow the use of ordinary functions as
// logfmt handlers. If f is a function with the appropriate signature,
// HandlerFunc(f) is a Handler object that calls f.
type HandlerFunc func(key, val []byte) error
func (f HandlerFunc) HandleLogfmt(key, val []byte) error {
return f(key, val)
}
// Unmarshal parses the logfmt encoding data and stores the result in the value
// pointed to by v. If v is an Handler, HandleLogfmt will be called for each
// key-value pair.
//
// If v is not a Handler, it will pass v to NewStructHandler and use the
// returned StructHandler for decoding.
func Unmarshal(data []byte, v interface{}) (err error) {
h, ok := v.(Handler)
if !ok {
h, err = NewStructHandler(v)
if err != nil {
return err
}
}
return gotoScanner(data, h)
}
// StructHandler unmarshals logfmt into a struct. It matches incoming keys to
// the the struct's fields (either the struct field name or its tag, preferring
// an exact match but also accepting a case-insensitive match.
//
// Field types supported by StructHandler are:
//
// all numeric types (e.g. float32, int, etc.)
// []byte
// string
// bool - true if key is present, false otherwise (the value is ignored).
// time.Duration - uses time.ParseDuration
//
// If a field is a pointer to an above type, and a matching key is not present
// in the logfmt data, the pointer will be untouched.
//
// If v is not a pointer to an Handler or struct, Unmarshal will return an
// error.
type StructHandler struct {
rv reflect.Value
}
func NewStructHandler(v interface{}) (Handler, error) {
rv := reflect.ValueOf(v)
if rv.Kind() != reflect.Ptr || rv.IsNil() {
return nil, &InvalidUnmarshalError{reflect.TypeOf(v)}
}
return &StructHandler{rv: rv}, nil
}
func (h *StructHandler) HandleLogfmt(key, val []byte) error {
el := h.rv.Elem()
skey := string(key)
for i := 0; i < el.NumField(); i++ {
fv := el.Field(i)
ft := el.Type().Field(i)
switch {
case ft.Name == skey:
case ft.Tag.Get("logfmt") == skey:
case strings.EqualFold(ft.Name, skey):
default:
continue
}
if fv.Kind() == reflect.Ptr {
if fv.IsNil() {
t := fv.Type().Elem()
v := reflect.New(t)
fv.Set(v)
fv = v
}
fv = fv.Elem()
}
switch fv.Interface().(type) {
case time.Duration:
d, err := time.ParseDuration(string(val))
if err != nil {
return &UnmarshalTypeError{string(val), fv.Type()}
}
fv.Set(reflect.ValueOf(d))
case string:
fv.SetString(string(val))
case []byte:
b := make([]byte, len(val))
copy(b, val)
fv.SetBytes(b)
case bool:
fv.SetBool(true)
default:
switch {
case reflect.Int <= fv.Kind() && fv.Kind() <= reflect.Int64:
v, err := strconv.ParseInt(string(val), 10, 64)
if err != nil {
return err
}
fv.SetInt(v)
case reflect.Uint32 <= fv.Kind() && fv.Kind() <= reflect.Uint64:
v, err := strconv.ParseUint(string(val), 10, 64)
if err != nil {
return err
}
fv.SetUint(v)
case reflect.Float32 <= fv.Kind() && fv.Kind() <= reflect.Float64:
v, err := strconv.ParseFloat(string(val), 10)
if err != nil {
return err
}
fv.SetFloat(v)
default:
return &UnmarshalTypeError{string(val), fv.Type()}
}
}
}
return nil
}
// An InvalidUnmarshalError describes an invalid argument passed to Unmarshal.
// (The argument to Unmarshal must be a non-nil pointer.)
type InvalidUnmarshalError struct {
Type reflect.Type
}
func (e *InvalidUnmarshalError) Error() string {
if e.Type == nil {
return "logfmt: Unmarshal(nil)"
}
if e.Type.Kind() != reflect.Ptr {
return "logfmt: Unmarshal(non-pointer " + e.Type.String() + ")"
}
return "logfmt: Unmarshal(nil " + e.Type.String() + ")"
}
// An UnmarshalTypeError describes a logfmt value that was
// not appropriate for a value of a specific Go type.
type UnmarshalTypeError struct {
Value string // the logfmt value
Type reflect.Type // type of Go value it could not be assigned to
}
func (e *UnmarshalTypeError) Error() string {
return "logfmt: cannot unmarshal " + e.Value + " into Go value of type " + e.Type.String()
}

149
vendor/github.com/kr/logfmt/scanner.go generated vendored Normal file
View file

@ -0,0 +1,149 @@
package logfmt
import (
"errors"
"fmt"
)
var ErrUnterminatedString = errors.New("logfmt: unterminated string")
func gotoScanner(data []byte, h Handler) (err error) {
saveError := func(e error) {
if err == nil {
err = e
}
}
var c byte
var i int
var m int
var key []byte
var val []byte
var ok bool
var esc bool
garbage:
if i == len(data) {
return
}
c = data[i]
switch {
case c > ' ' && c != '"' && c != '=':
key, val = nil, nil
m = i
i++
goto key
default:
i++
goto garbage
}
key:
if i >= len(data) {
if m >= 0 {
key = data[m:i]
saveError(h.HandleLogfmt(key, nil))
}
return
}
c = data[i]
switch {
case c > ' ' && c != '"' && c != '=':
i++
goto key
case c == '=':
key = data[m:i]
i++
goto equal
default:
key = data[m:i]
i++
saveError(h.HandleLogfmt(key, nil))
goto garbage
}
equal:
if i >= len(data) {
if m >= 0 {
i--
key = data[m:i]
saveError(h.HandleLogfmt(key, nil))
}
return
}
c = data[i]
switch {
case c > ' ' && c != '"' && c != '=':
m = i
i++
goto ivalue
case c == '"':
m = i
i++
esc = false
goto qvalue
default:
if key != nil {
saveError(h.HandleLogfmt(key, val))
}
i++
goto garbage
}
ivalue:
if i >= len(data) {
if m >= 0 {
val = data[m:i]
saveError(h.HandleLogfmt(key, val))
}
return
}
c = data[i]
switch {
case c > ' ' && c != '"' && c != '=':
i++
goto ivalue
default:
val = data[m:i]
saveError(h.HandleLogfmt(key, val))
i++
goto garbage
}
qvalue:
if i >= len(data) {
if m >= 0 {
saveError(ErrUnterminatedString)
}
return
}
c = data[i]
switch c {
case '\\':
i += 2
esc = true
goto qvalue
case '"':
i++
val = data[m:i]
if esc {
val, ok = unquoteBytes(val)
if !ok {
saveError(fmt.Errorf("logfmt: error unquoting bytes %q", string(val)))
goto garbage
}
} else {
val = val[1 : len(val)-1]
}
saveError(h.HandleLogfmt(key, val))
goto garbage
default:
i++
goto qvalue
}
}

149
vendor/github.com/kr/logfmt/unquote.go generated vendored Normal file
View file

@ -0,0 +1,149 @@
package logfmt
import (
"strconv"
"unicode"
"unicode/utf16"
"unicode/utf8"
)
// Taken from Go's encoding/json
// Copyright 2010 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// getu4 decodes \uXXXX from the beginning of s, returning the hex value,
// or it returns -1.
func getu4(s []byte) rune {
if len(s) < 6 || s[0] != '\\' || s[1] != 'u' {
return -1
}
r, err := strconv.ParseUint(string(s[2:6]), 16, 64)
if err != nil {
return -1
}
return rune(r)
}
// unquote converts a quoted JSON string literal s into an actual string t.
// The rules are different than for Go, so cannot use strconv.Unquote.
func unquote(s []byte) (t string, ok bool) {
s, ok = unquoteBytes(s)
t = string(s)
return
}
func unquoteBytes(s []byte) (t []byte, ok bool) {
if len(s) < 2 || s[0] != '"' || s[len(s)-1] != '"' {
return
}
s = s[1 : len(s)-1]
// Check for unusual characters. If there are none,
// then no unquoting is needed, so return a slice of the
// original bytes.
r := 0
for r < len(s) {
c := s[r]
if c == '\\' || c == '"' || c < ' ' {
break
}
if c < utf8.RuneSelf {
r++
continue
}
rr, size := utf8.DecodeRune(s[r:])
if rr == utf8.RuneError && size == 1 {
break
}
r += size
}
if r == len(s) {
return s, true
}
b := make([]byte, len(s)+2*utf8.UTFMax)
w := copy(b, s[0:r])
for r < len(s) {
// Out of room? Can only happen if s is full of
// malformed UTF-8 and we're replacing each
// byte with RuneError.
if w >= len(b)-2*utf8.UTFMax {
nb := make([]byte, (len(b)+utf8.UTFMax)*2)
copy(nb, b[0:w])
b = nb
}
switch c := s[r]; {
case c == '\\':
r++
if r >= len(s) {
return
}
switch s[r] {
default:
return
case '"', '\\', '/', '\'':
b[w] = s[r]
r++
w++
case 'b':
b[w] = '\b'
r++
w++
case 'f':
b[w] = '\f'
r++
w++
case 'n':
b[w] = '\n'
r++
w++
case 'r':
b[w] = '\r'
r++
w++
case 't':
b[w] = '\t'
r++
w++
case 'u':
r--
rr := getu4(s[r:])
if rr < 0 {
return
}
r += 6
if utf16.IsSurrogate(rr) {
rr1 := getu4(s[r:])
if dec := utf16.DecodeRune(rr, rr1); dec != unicode.ReplacementChar {
// A valid pair; consume.
r += 6
w += utf8.EncodeRune(b[w:], dec)
break
}
// Invalid surrogate; fall back to replacement rune.
rr = unicode.ReplacementChar
}
w += utf8.EncodeRune(b[w:], rr)
}
// Quote, control characters are invalid.
case c == '"', c < ' ':
return
// ASCII
case c < utf8.RuneSelf:
b[w] = c
r++
w++
// Coerce to well-formed UTF-8.
default:
rr, size := utf8.DecodeRune(s[r:])
r += size
w += utf8.EncodeRune(b[w:], rr)
}
}
return b[0:w], true
}

22
vendor/github.com/stvp/go-udp-testing/LICENSE generated vendored Normal file
View file

@ -0,0 +1,22 @@
The MIT License (MIT)
Copyright (c) 2015 Stovepipe Studios, Inc.
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.

192
vendor/github.com/stvp/go-udp-testing/udp.go generated vendored Normal file
View file

@ -0,0 +1,192 @@
// Package udp implements UDP test helpers. It lets you assert that certain
// strings must or must not be sent to a given local UDP listener.
package udp
import (
"net"
"runtime"
"strings"
"testing"
"time"
)
var (
addr *string
listener *net.UDPConn
Timeout time.Duration = time.Millisecond
)
type fn func()
// SetAddr sets the UDP port that will be listened on.
func SetAddr(a string) {
addr = &a
}
func start(t *testing.T) {
resAddr, err := net.ResolveUDPAddr("udp", *addr)
if err != nil {
t.Fatal(err)
}
listener, err = net.ListenUDP("udp", resAddr)
if err != nil {
t.Fatal(err)
}
}
func stop(t *testing.T) {
if err := listener.Close(); err != nil {
t.Fatal(err)
}
}
func getMessage(t *testing.T, body fn) string {
start(t)
defer stop(t)
result := make(chan string)
go func() {
message := make([]byte, 1024*32)
var bufLen int
for {
listener.SetReadDeadline(time.Now().Add(Timeout))
n, _, _ := listener.ReadFrom(message[bufLen:])
if n == 0 {
result <- string(message[0:bufLen])
break
} else {
bufLen += n
}
}
}()
body()
return <-result
}
func get(t *testing.T, match string, body fn) (got string, equals bool, contains bool) {
got = getMessage(t, body)
equals = got == match
contains = strings.Contains(got, match)
return got, equals, contains
}
func printLocation(t *testing.T) {
_, file, line, _ := runtime.Caller(2)
t.Errorf("At: %s:%d", file, line)
}
// ShouldReceiveOnly will fire a test error if the given function doesn't send
// exactly the given string over UDP.
func ShouldReceiveOnly(t *testing.T, expected string, body fn) {
got, equals, _ := get(t, expected, body)
if !equals {
printLocation(t)
t.Errorf("Expected: %#v", expected)
t.Errorf("But got: %#v", got)
}
}
// ShouldNotReceiveOnly will fire a test error if the given function sends
// exactly the given string over UDP.
func ShouldNotReceiveOnly(t *testing.T, notExpected string, body fn) {
_, equals, _ := get(t, notExpected, body)
if equals {
printLocation(t)
t.Errorf("Expected not to get: %#v", notExpected)
}
}
// ShouldReceive will fire a test error if the given function doesn't send the
// given string over UDP.
func ShouldReceive(t *testing.T, expected string, body fn) {
got, _, contains := get(t, expected, body)
if !contains {
printLocation(t)
t.Errorf("Expected to find: %#v", expected)
t.Errorf("But got: %#v", got)
}
}
// ShouldNotReceive will fire a test error if the given function sends the
// given string over UDP.
func ShouldNotReceive(t *testing.T, expected string, body fn) {
got, _, contains := get(t, expected, body)
if contains {
printLocation(t)
t.Errorf("Expected not to find: %#v", expected)
t.Errorf("But got: %#v", got)
}
}
// ShouldReceiveAll will fire a test error unless all of the given strings are
// sent over UDP.
func ShouldReceiveAll(t *testing.T, expected []string, body fn) {
got := getMessage(t, body)
failed := false
for _, str := range expected {
if !strings.Contains(got, str) {
if !failed {
printLocation(t)
failed = true
}
t.Errorf("Expected to find: %#v", str)
}
}
if failed {
t.Errorf("But got: %#v", got)
}
}
// ShouldNotReceiveAny will fire a test error if any of the given strings are
// sent over UDP.
func ShouldNotReceiveAny(t *testing.T, unexpected []string, body fn) {
got := getMessage(t, body)
failed := false
for _, str := range unexpected {
if strings.Contains(got, str) {
if !failed {
printLocation(t)
failed = true
}
t.Errorf("Expected not to find: %#v", str)
}
}
if failed {
t.Errorf("But got: %#v", got)
}
}
func ShouldReceiveAllAndNotReceiveAny(t *testing.T, expected []string, unexpected []string, body fn) {
got := getMessage(t, body)
failed := false
for _, str := range expected {
if !strings.Contains(got, str) {
if !failed {
printLocation(t)
failed = true
}
t.Errorf("Expected to find: %#v", str)
}
}
for _, str := range unexpected {
if strings.Contains(got, str) {
if !failed {
printLocation(t)
failed = true
}
t.Errorf("Expected not to find: %#v", str)
}
}
if failed {
t.Errorf("but got: %#v", got)
}
}