traefik/pkg/metrics/prometheus.go

556 lines
18 KiB
Go
Raw Normal View History

package metrics
import (
2018-11-14 09:18:03 +00:00
"context"
2017-11-20 08:40:03 +00:00
"net/http"
"sort"
"strings"
"sync"
"time"
2017-11-20 08:40:03 +00:00
2019-08-03 01:58:23 +00:00
"github.com/containous/traefik/v2/pkg/config/dynamic"
"github.com/containous/traefik/v2/pkg/log"
"github.com/containous/traefik/v2/pkg/safe"
"github.com/containous/traefik/v2/pkg/types"
"github.com/go-kit/kit/metrics"
stdprometheus "github.com/prometheus/client_golang/prometheus"
"github.com/prometheus/client_golang/prometheus/promhttp"
)
const (
2020-08-21 09:12:04 +00:00
// MetricNamePrefix prefix of all metric names.
2018-08-06 12:58:03 +00:00
MetricNamePrefix = "traefik_"
2020-08-21 09:12:04 +00:00
// server meta information.
2018-08-06 12:58:03 +00:00
metricConfigPrefix = MetricNamePrefix + "config_"
configReloadsTotalName = metricConfigPrefix + "reloads_total"
configReloadsFailuresTotalName = metricConfigPrefix + "reloads_failure_total"
configLastReloadSuccessName = metricConfigPrefix + "last_reload_success"
configLastReloadFailureName = metricConfigPrefix + "last_reload_failure"
2020-08-21 09:12:04 +00:00
// entry point.
2020-03-05 12:30:05 +00:00
metricEntryPointPrefix = MetricNamePrefix + "entrypoint_"
entryPointReqsTotalName = metricEntryPointPrefix + "requests_total"
entryPointReqsTLSTotalName = metricEntryPointPrefix + "requests_tls_total"
entryPointReqDurationName = metricEntryPointPrefix + "request_duration_seconds"
entryPointOpenConnsName = metricEntryPointPrefix + "open_connections"
2019-07-18 19:36:05 +00:00
// service level.
2020-08-21 09:12:04 +00:00
// MetricServicePrefix prefix of all service metric names.
2019-07-18 19:36:05 +00:00
MetricServicePrefix = MetricNamePrefix + "service_"
serviceReqsTotalName = MetricServicePrefix + "requests_total"
2020-03-05 12:30:05 +00:00
serviceReqsTLSTotalName = MetricServicePrefix + "requests_tls_total"
2019-07-18 19:36:05 +00:00
serviceReqDurationName = MetricServicePrefix + "request_duration_seconds"
serviceOpenConnsName = MetricServicePrefix + "open_connections"
serviceRetriesTotalName = MetricServicePrefix + "retries_total"
serviceServerUpName = MetricServicePrefix + "server_up"
)
// promState holds all metric state internally and acts as the only Collector we register for Prometheus.
//
// This enables control to remove metrics that belong to outdated configuration.
// As an example why this is required, consider Traefik learns about a new service.
2019-07-18 19:36:05 +00:00
// It populates the 'traefik_server_service_up' metric for it with a value of 1 (alive).
// When the service is undeployed now the metric is still there in the client library
2018-06-05 10:32:03 +00:00
// and will be returned on the metrics endpoint until Traefik would be restarted.
//
2018-06-05 10:32:03 +00:00
// To solve this problem promState keeps track of Traefik's dynamic configuration.
2019-07-18 19:36:05 +00:00
// Metrics that "belong" to a dynamic configuration part like services or entryPoints
2018-06-05 10:32:03 +00:00
// are removed after they were scraped at least once when the corresponding object
// doesn't exist anymore.
var promState = newPrometheusState()
2019-07-18 19:36:05 +00:00
var promRegistry = stdprometheus.NewRegistry()
// PrometheusHandler exposes Prometheus routes.
func PrometheusHandler() http.Handler {
return promhttp.HandlerFor(promRegistry, promhttp.HandlerOpts{})
}
// RegisterPrometheus registers all Prometheus metrics.
// It must be called only once and failing to register the metrics will lead to a panic.
2018-11-14 09:18:03 +00:00
func RegisterPrometheus(ctx context.Context, config *types.Prometheus) Registry {
2018-08-06 12:58:03 +00:00
standardRegistry := initStandardRegistry(config)
2019-07-18 19:36:05 +00:00
if err := promRegistry.Register(stdprometheus.NewProcessCollector(stdprometheus.ProcessCollectorOpts{})); err != nil {
if _, ok := err.(stdprometheus.AlreadyRegisteredError); !ok {
log.FromContext(ctx).Warn("ProcessCollector is already registered")
}
}
if err := promRegistry.Register(stdprometheus.NewGoCollector()); err != nil {
if _, ok := err.(stdprometheus.AlreadyRegisteredError); !ok {
log.FromContext(ctx).Warn("GoCollector is already registered")
}
}
2018-11-14 09:18:03 +00:00
if !registerPromState(ctx) {
2018-08-06 12:58:03 +00:00
return nil
}
return standardRegistry
}
func initStandardRegistry(config *types.Prometheus) Registry {
buckets := []float64{0.1, 0.3, 1.2, 5.0}
if config.Buckets != nil {
buckets = config.Buckets
}
safe.Go(func() {
promState.ListenValueUpdates()
})
configReloads := newCounterFrom(promState.collectors, stdprometheus.CounterOpts{
Name: configReloadsTotalName,
Help: "Config reloads",
}, []string{})
configReloadsFailures := newCounterFrom(promState.collectors, stdprometheus.CounterOpts{
Name: configReloadsFailuresTotalName,
Help: "Config failure reloads",
}, []string{})
lastConfigReloadSuccess := newGaugeFrom(promState.collectors, stdprometheus.GaugeOpts{
Name: configLastReloadSuccessName,
Help: "Last config reload success",
}, []string{})
lastConfigReloadFailure := newGaugeFrom(promState.collectors, stdprometheus.GaugeOpts{
Name: configLastReloadFailureName,
Help: "Last config reload failure",
}, []string{})
promState.describers = []func(chan<- *stdprometheus.Desc){
configReloads.cv.Describe,
configReloadsFailures.cv.Describe,
lastConfigReloadSuccess.gv.Describe,
lastConfigReloadFailure.gv.Describe,
}
2019-07-18 19:36:05 +00:00
reg := &standardRegistry{
epEnabled: config.AddEntryPointsLabels,
svcEnabled: config.AddServicesLabels,
configReloadsCounter: configReloads,
configReloadsFailureCounter: configReloadsFailures,
lastConfigReloadSuccessGauge: lastConfigReloadSuccess,
lastConfigReloadFailureGauge: lastConfigReloadFailure,
}
if config.AddEntryPointsLabels {
entryPointReqs := newCounterFrom(promState.collectors, stdprometheus.CounterOpts{
Name: entryPointReqsTotalName,
Help: "How many HTTP requests processed on an entrypoint, partitioned by status code, protocol, and method.",
}, []string{"code", "method", "protocol", "entrypoint"})
2020-03-05 12:30:05 +00:00
entryPointReqsTLS := newCounterFrom(promState.collectors, stdprometheus.CounterOpts{
Name: entryPointReqsTLSTotalName,
Help: "How many HTTP requests with TLS processed on an entrypoint, partitioned by TLS Version and TLS cipher Used.",
}, []string{"tls_version", "tls_cipher", "entrypoint"})
2019-07-18 19:36:05 +00:00
entryPointReqDurations := newHistogramFrom(promState.collectors, stdprometheus.HistogramOpts{
Name: entryPointReqDurationName,
Help: "How long it took to process the request on an entrypoint, partitioned by status code, protocol, and method.",
Buckets: buckets,
}, []string{"code", "method", "protocol", "entrypoint"})
entryPointOpenConns := newGaugeFrom(promState.collectors, stdprometheus.GaugeOpts{
Name: entryPointOpenConnsName,
Help: "How many open connections exist on an entrypoint, partitioned by method and protocol.",
}, []string{"method", "protocol", "entrypoint"})
promState.describers = append(promState.describers, []func(chan<- *stdprometheus.Desc){
entryPointReqs.cv.Describe,
2020-03-05 12:30:05 +00:00
entryPointReqsTLS.cv.Describe,
2019-07-18 19:36:05 +00:00
entryPointReqDurations.hv.Describe,
entryPointOpenConns.gv.Describe,
}...)
reg.entryPointReqsCounter = entryPointReqs
2020-03-05 12:30:05 +00:00
reg.entryPointReqsTLSCounter = entryPointReqsTLS
reg.entryPointReqDurationHistogram, _ = NewHistogramWithScale(entryPointReqDurations, time.Second)
2019-07-18 19:36:05 +00:00
reg.entryPointOpenConnsGauge = entryPointOpenConns
}
if config.AddServicesLabels {
serviceReqs := newCounterFrom(promState.collectors, stdprometheus.CounterOpts{
Name: serviceReqsTotalName,
Help: "How many HTTP requests processed on a service, partitioned by status code, protocol, and method.",
}, []string{"code", "method", "protocol", "service"})
2020-03-05 12:30:05 +00:00
serviceReqsTLS := newCounterFrom(promState.collectors, stdprometheus.CounterOpts{
Name: serviceReqsTLSTotalName,
Help: "How many HTTP requests with TLS processed on a service, partitioned by TLS version and TLS cipher.",
}, []string{"tls_version", "tls_cipher", "service"})
2019-07-18 19:36:05 +00:00
serviceReqDurations := newHistogramFrom(promState.collectors, stdprometheus.HistogramOpts{
Name: serviceReqDurationName,
Help: "How long it took to process the request on a service, partitioned by status code, protocol, and method.",
Buckets: buckets,
}, []string{"code", "method", "protocol", "service"})
serviceOpenConns := newGaugeFrom(promState.collectors, stdprometheus.GaugeOpts{
Name: serviceOpenConnsName,
Help: "How many open connections exist on a service, partitioned by method and protocol.",
}, []string{"method", "protocol", "service"})
serviceRetries := newCounterFrom(promState.collectors, stdprometheus.CounterOpts{
Name: serviceRetriesTotalName,
Help: "How many request retries happened on a service.",
}, []string{"service"})
serviceServerUp := newGaugeFrom(promState.collectors, stdprometheus.GaugeOpts{
Name: serviceServerUpName,
Help: "service server is up, described by gauge value of 0 or 1.",
}, []string{"service", "url"})
promState.describers = append(promState.describers, []func(chan<- *stdprometheus.Desc){
serviceReqs.cv.Describe,
2020-03-05 12:30:05 +00:00
serviceReqsTLS.cv.Describe,
2019-07-18 19:36:05 +00:00
serviceReqDurations.hv.Describe,
serviceOpenConns.gv.Describe,
serviceRetries.cv.Describe,
serviceServerUp.gv.Describe,
}...)
reg.serviceReqsCounter = serviceReqs
2020-03-05 12:30:05 +00:00
reg.serviceReqsTLSCounter = serviceReqsTLS
reg.serviceReqDurationHistogram, _ = NewHistogramWithScale(serviceReqDurations, time.Second)
2019-07-18 19:36:05 +00:00
reg.serviceOpenConnsGauge = serviceOpenConns
reg.serviceRetriesCounter = serviceRetries
reg.serviceServerUpGauge = serviceServerUp
}
return reg
}
2018-11-14 09:18:03 +00:00
func registerPromState(ctx context.Context) bool {
2019-07-18 19:36:05 +00:00
if err := promRegistry.Register(promState); err != nil {
2018-11-14 09:18:03 +00:00
logger := log.FromContext(ctx)
2018-08-06 12:58:03 +00:00
if _, ok := err.(stdprometheus.AlreadyRegisteredError); !ok {
2018-11-14 09:18:03 +00:00
logger.Errorf("Unable to register Traefik to Prometheus: %v", err)
2018-08-06 12:58:03 +00:00
return false
}
2018-11-14 09:18:03 +00:00
logger.Debug("Prometheus collector already registered.")
2018-08-06 12:58:03 +00:00
}
return true
}
2018-06-05 10:32:03 +00:00
// OnConfigurationUpdate receives the current configuration from Traefik.
// It then converts the configuration to the optimized package internal format
// and sets it to the promState.
func OnConfigurationUpdate(conf dynamic.Configuration, entryPoints []string) {
2018-06-05 10:32:03 +00:00
dynamicConfig := newDynamicConfig()
2019-07-18 19:36:05 +00:00
for _, value := range entryPoints {
dynamicConfig.entryPoints[value] = true
}
for name := range conf.HTTP.Routers {
dynamicConfig.routers[name] = true
}
for serviceName, service := range conf.HTTP.Services {
dynamicConfig.services[serviceName] = make(map[string]bool)
if service.LoadBalancer != nil {
2019-07-18 19:36:05 +00:00
for _, server := range service.LoadBalancer.Servers {
dynamicConfig.services[serviceName][server.URL] = true
2019-07-18 19:36:05 +00:00
}
}
}
2018-06-05 10:32:03 +00:00
promState.SetDynamicConfig(dynamicConfig)
}
func newPrometheusState() *prometheusState {
return &prometheusState{
2018-06-05 10:32:03 +00:00
collectors: make(chan *collector),
dynamicConfig: newDynamicConfig(),
state: make(map[string]*collector),
}
}
type prometheusState struct {
2018-06-05 10:32:03 +00:00
collectors chan *collector
describers []func(ch chan<- *stdprometheus.Desc)
mtx sync.Mutex
dynamicConfig *dynamicConfig
state map[string]*collector
}
2018-06-05 10:32:03 +00:00
func (ps *prometheusState) SetDynamicConfig(dynamicConfig *dynamicConfig) {
ps.mtx.Lock()
defer ps.mtx.Unlock()
2018-06-05 10:32:03 +00:00
ps.dynamicConfig = dynamicConfig
}
func (ps *prometheusState) ListenValueUpdates() {
for collector := range ps.collectors {
ps.mtx.Lock()
ps.state[collector.id] = collector
ps.mtx.Unlock()
}
}
// Describe implements prometheus.Collector and simply calls
// the registered describer functions.
func (ps *prometheusState) Describe(ch chan<- *stdprometheus.Desc) {
for _, desc := range ps.describers {
desc(ch)
}
}
// Collect implements prometheus.Collector. It calls the Collect
// method of all metrics it received on the collectors channel.
2018-06-05 10:32:03 +00:00
// It's also responsible to remove metrics that belong to an outdated configuration.
// The removal happens only after their Collect method was called to ensure that
// also those metrics will be exported on the current scrape.
func (ps *prometheusState) Collect(ch chan<- stdprometheus.Metric) {
ps.mtx.Lock()
defer ps.mtx.Unlock()
2018-06-05 10:32:03 +00:00
var outdatedKeys []string
for key, cs := range ps.state {
cs.collector.Collect(ch)
2018-06-05 10:32:03 +00:00
if ps.isOutdated(cs) {
outdatedKeys = append(outdatedKeys, key)
}
}
for _, key := range outdatedKeys {
2018-06-05 10:32:03 +00:00
ps.state[key].delete()
delete(ps.state, key)
}
}
2018-06-05 10:32:03 +00:00
// isOutdated checks whether the passed collector has labels that mark
// it as belonging to an outdated configuration of Traefik.
func (ps *prometheusState) isOutdated(collector *collector) bool {
labels := collector.labels
2019-07-18 19:36:05 +00:00
if entrypointName, ok := labels["entrypoint"]; ok && !ps.dynamicConfig.hasEntryPoint(entrypointName) {
2018-06-05 10:32:03 +00:00
return true
}
2019-07-18 19:36:05 +00:00
if serviceName, ok := labels["service"]; ok {
if !ps.dynamicConfig.hasService(serviceName) {
2018-06-05 10:32:03 +00:00
return true
}
2019-07-18 19:36:05 +00:00
if url, ok := labels["url"]; ok && !ps.dynamicConfig.hasServerURL(serviceName, url) {
2018-06-05 10:32:03 +00:00
return true
}
}
return false
}
func newDynamicConfig() *dynamicConfig {
return &dynamicConfig{
2019-07-18 19:36:05 +00:00
entryPoints: make(map[string]bool),
routers: make(map[string]bool),
services: make(map[string]map[string]bool),
2018-06-05 10:32:03 +00:00
}
}
2019-07-18 19:36:05 +00:00
// dynamicConfig holds the current configuration for entryPoints, services,
2018-06-05 10:32:03 +00:00
// and server URLs in an optimized way to check for existence. This provides
// a performant way to check whether the collected metrics belong to the
// current configuration or to an outdated one.
type dynamicConfig struct {
2019-07-18 19:36:05 +00:00
entryPoints map[string]bool
routers map[string]bool
services map[string]map[string]bool
2018-06-05 10:32:03 +00:00
}
2019-07-18 19:36:05 +00:00
func (d *dynamicConfig) hasEntryPoint(entrypointName string) bool {
_, ok := d.entryPoints[entrypointName]
2018-06-05 10:32:03 +00:00
return ok
}
2019-07-18 19:36:05 +00:00
func (d *dynamicConfig) hasService(serviceName string) bool {
_, ok := d.services[serviceName]
2018-06-05 10:32:03 +00:00
return ok
}
2019-07-18 19:36:05 +00:00
func (d *dynamicConfig) hasServerURL(serviceName, serverURL string) bool {
if service, hasService := d.services[serviceName]; hasService {
_, ok := service[serverURL]
2018-06-05 10:32:03 +00:00
return ok
}
2018-06-05 10:32:03 +00:00
return false
}
2018-06-05 10:32:03 +00:00
func newCollector(metricName string, labels stdprometheus.Labels, c stdprometheus.Collector, delete func()) *collector {
return &collector{
2018-06-05 10:32:03 +00:00
id: buildMetricID(metricName, labels),
labels: labels,
collector: c,
2018-06-05 10:32:03 +00:00
delete: delete,
}
}
// collector wraps a Collector object from the Prometheus client library.
// It adds information on how many generations this metric should be present
// in the /metrics output, relatived to the time it was last tracked.
type collector struct {
2018-06-05 10:32:03 +00:00
id string
labels stdprometheus.Labels
collector stdprometheus.Collector
delete func()
}
2018-06-05 10:32:03 +00:00
func buildMetricID(metricName string, labels stdprometheus.Labels) string {
var labelNamesValues []string
for name, value := range labels {
labelNamesValues = append(labelNamesValues, name, value)
}
sort.Strings(labelNamesValues)
return metricName + ":" + strings.Join(labelNamesValues, "|")
}
func newCounterFrom(collectors chan<- *collector, opts stdprometheus.CounterOpts, labelNames []string) *counter {
cv := stdprometheus.NewCounterVec(opts, labelNames)
c := &counter{
name: opts.Name,
cv: cv,
collectors: collectors,
}
if len(labelNames) == 0 {
c.Add(0)
}
return c
}
type counter struct {
name string
cv *stdprometheus.CounterVec
labelNamesValues labelNamesValues
collectors chan<- *collector
}
func (c *counter) With(labelValues ...string) metrics.Counter {
return &counter{
name: c.name,
cv: c.cv,
labelNamesValues: c.labelNamesValues.With(labelValues...),
collectors: c.collectors,
}
}
func (c *counter) Add(delta float64) {
2018-06-05 10:32:03 +00:00
labels := c.labelNamesValues.ToLabels()
collector := c.cv.With(labels)
collector.Add(delta)
2018-06-05 10:32:03 +00:00
c.collectors <- newCollector(c.name, labels, collector, func() {
c.cv.Delete(labels)
})
}
func (c *counter) Describe(ch chan<- *stdprometheus.Desc) {
c.cv.Describe(ch)
}
func newGaugeFrom(collectors chan<- *collector, opts stdprometheus.GaugeOpts, labelNames []string) *gauge {
gv := stdprometheus.NewGaugeVec(opts, labelNames)
g := &gauge{
name: opts.Name,
gv: gv,
collectors: collectors,
}
if len(labelNames) == 0 {
g.Set(0)
}
return g
}
type gauge struct {
name string
gv *stdprometheus.GaugeVec
labelNamesValues labelNamesValues
collectors chan<- *collector
}
func (g *gauge) With(labelValues ...string) metrics.Gauge {
return &gauge{
name: g.name,
gv: g.gv,
labelNamesValues: g.labelNamesValues.With(labelValues...),
collectors: g.collectors,
}
}
2018-04-16 08:28:04 +00:00
func (g *gauge) Add(delta float64) {
2018-06-05 10:32:03 +00:00
labels := g.labelNamesValues.ToLabels()
collector := g.gv.With(labels)
2018-04-16 08:28:04 +00:00
collector.Add(delta)
2018-06-05 10:32:03 +00:00
g.collectors <- newCollector(g.name, labels, collector, func() {
g.gv.Delete(labels)
})
2018-04-16 08:28:04 +00:00
}
func (g *gauge) Set(value float64) {
2018-06-05 10:32:03 +00:00
labels := g.labelNamesValues.ToLabels()
collector := g.gv.With(labels)
collector.Set(value)
2018-06-05 10:32:03 +00:00
g.collectors <- newCollector(g.name, labels, collector, func() {
g.gv.Delete(labels)
})
}
func (g *gauge) Describe(ch chan<- *stdprometheus.Desc) {
g.gv.Describe(ch)
}
func newHistogramFrom(collectors chan<- *collector, opts stdprometheus.HistogramOpts, labelNames []string) *histogram {
hv := stdprometheus.NewHistogramVec(opts, labelNames)
return &histogram{
name: opts.Name,
hv: hv,
collectors: collectors,
}
}
type histogram struct {
name string
hv *stdprometheus.HistogramVec
labelNamesValues labelNamesValues
collectors chan<- *collector
}
func (h *histogram) With(labelValues ...string) metrics.Histogram {
return &histogram{
name: h.name,
hv: h.hv,
labelNamesValues: h.labelNamesValues.With(labelValues...),
collectors: h.collectors,
}
}
func (h *histogram) Observe(value float64) {
2018-06-05 10:32:03 +00:00
labels := h.labelNamesValues.ToLabels()
2019-07-24 10:38:03 +00:00
observer := h.hv.With(labels)
observer.Observe(value)
// Do a type assertion to be sure that prometheus will be able to call the Collect method.
if collector, ok := observer.(stdprometheus.Histogram); ok {
h.collectors <- newCollector(h.name, labels, collector, func() {
h.hv.Delete(labels)
})
}
}
func (h *histogram) Describe(ch chan<- *stdprometheus.Desc) {
h.hv.Describe(ch)
}
// labelNamesValues is a type alias that provides validation on its With method.
// Metrics may include it as a member to help them satisfy With semantics and
// save some code duplication.
type labelNamesValues []string
// With validates the input, and returns a new aggregate labelNamesValues.
func (lvs labelNamesValues) With(labelValues ...string) labelNamesValues {
if len(labelValues)%2 != 0 {
labelValues = append(labelValues, "unknown")
}
return append(lvs, labelValues...)
}
// ToLabels is a convenience method to convert a labelNamesValues
// to the native prometheus.Labels.
func (lvs labelNamesValues) ToLabels() stdprometheus.Labels {
labels := stdprometheus.Labels{}
for i := 0; i < len(lvs); i += 2 {
labels[lvs[i]] = lvs[i+1]
}
return labels
}