2017-01-12 13:34:54 +00:00
|
|
|
package middlewares
|
|
|
|
|
|
|
|
import (
|
|
|
|
"github.com/containous/traefik/types"
|
|
|
|
"github.com/go-kit/kit/metrics"
|
|
|
|
"github.com/go-kit/kit/metrics/prometheus"
|
|
|
|
stdprometheus "github.com/prometheus/client_golang/prometheus"
|
|
|
|
"github.com/prometheus/client_golang/prometheus/promhttp"
|
|
|
|
"net/http"
|
|
|
|
)
|
|
|
|
|
|
|
|
const (
|
2017-01-17 17:14:13 +00:00
|
|
|
reqsName = "traefik_requests_total"
|
|
|
|
latencyName = "traefik_request_duration_seconds"
|
2017-01-12 13:34:54 +00:00
|
|
|
)
|
|
|
|
|
2017-01-17 17:14:13 +00:00
|
|
|
// Prometheus is an Implementation for Metrics that exposes prometheus metrics for the latency
|
|
|
|
// and the number of requests partitioned by status code and method.
|
2017-01-12 13:34:54 +00:00
|
|
|
type Prometheus struct {
|
|
|
|
reqsCounter metrics.Counter
|
|
|
|
latencyHistogram metrics.Histogram
|
|
|
|
}
|
|
|
|
|
|
|
|
func (p *Prometheus) getReqsCounter() metrics.Counter {
|
|
|
|
return p.reqsCounter
|
|
|
|
}
|
|
|
|
|
|
|
|
func (p *Prometheus) getLatencyHistogram() metrics.Histogram {
|
|
|
|
return p.latencyHistogram
|
|
|
|
}
|
|
|
|
|
|
|
|
// NewPrometheus returns a new prometheus Metrics implementation.
|
|
|
|
func NewPrometheus(name string, config *types.Prometheus) *Prometheus {
|
|
|
|
var m Prometheus
|
2017-03-08 14:17:07 +00:00
|
|
|
|
|
|
|
cv := stdprometheus.NewCounterVec(
|
2017-01-12 13:34:54 +00:00
|
|
|
stdprometheus.CounterOpts{
|
|
|
|
Name: reqsName,
|
|
|
|
Help: "How many HTTP requests processed, partitioned by status code and method.",
|
|
|
|
ConstLabels: stdprometheus.Labels{"service": name},
|
|
|
|
},
|
|
|
|
[]string{"code", "method"},
|
|
|
|
)
|
|
|
|
|
2017-03-08 14:17:07 +00:00
|
|
|
err := stdprometheus.Register(cv)
|
|
|
|
if err != nil {
|
|
|
|
e, ok := err.(stdprometheus.AlreadyRegisteredError)
|
|
|
|
if !ok {
|
|
|
|
panic(err)
|
|
|
|
}
|
|
|
|
m.reqsCounter = prometheus.NewCounter(e.ExistingCollector.(*stdprometheus.CounterVec))
|
|
|
|
} else {
|
|
|
|
m.reqsCounter = prometheus.NewCounter(cv)
|
|
|
|
}
|
|
|
|
|
2017-01-12 13:34:54 +00:00
|
|
|
var buckets []float64
|
|
|
|
if config.Buckets != nil {
|
|
|
|
buckets = config.Buckets
|
|
|
|
} else {
|
2017-01-17 17:14:13 +00:00
|
|
|
buckets = []float64{0.1, 0.3, 1.2, 5}
|
2017-01-12 13:34:54 +00:00
|
|
|
}
|
|
|
|
|
2017-03-08 14:17:07 +00:00
|
|
|
hv := stdprometheus.NewHistogramVec(
|
2017-01-12 13:34:54 +00:00
|
|
|
stdprometheus.HistogramOpts{
|
|
|
|
Name: latencyName,
|
2017-01-17 17:14:13 +00:00
|
|
|
Help: "How long it took to process the request.",
|
2017-01-12 13:34:54 +00:00
|
|
|
ConstLabels: stdprometheus.Labels{"service": name},
|
|
|
|
Buckets: buckets,
|
|
|
|
},
|
2017-01-17 17:14:13 +00:00
|
|
|
[]string{},
|
2017-01-12 13:34:54 +00:00
|
|
|
)
|
2017-03-08 14:17:07 +00:00
|
|
|
|
|
|
|
err = stdprometheus.Register(hv)
|
|
|
|
if err != nil {
|
|
|
|
e, ok := err.(stdprometheus.AlreadyRegisteredError)
|
|
|
|
if !ok {
|
|
|
|
panic(err)
|
|
|
|
}
|
|
|
|
m.latencyHistogram = prometheus.NewHistogram(e.ExistingCollector.(*stdprometheus.HistogramVec))
|
|
|
|
} else {
|
|
|
|
m.latencyHistogram = prometheus.NewHistogram(hv)
|
|
|
|
}
|
|
|
|
|
2017-01-12 13:34:54 +00:00
|
|
|
return &m
|
|
|
|
}
|
|
|
|
|
|
|
|
func (p *Prometheus) handler() http.Handler {
|
|
|
|
return promhttp.Handler()
|
|
|
|
}
|