traefik/pkg/metrics/prometheus.go

673 lines
22 KiB
Go
Raw Permalink Normal View History

package metrics
import (
2018-11-14 09:18:03 +00:00
"context"
2020-11-06 08:26:03 +00:00
"errors"
2017-11-20 08:40:03 +00:00
"net/http"
"sync"
"time"
2017-11-20 08:40:03 +00:00
"github.com/go-kit/kit/metrics"
stdprometheus "github.com/prometheus/client_golang/prometheus"
2021-10-04 15:54:10 +00:00
"github.com/prometheus/client_golang/prometheus/collectors"
"github.com/prometheus/client_golang/prometheus/promhttp"
2022-11-21 17:36:05 +00:00
"github.com/rs/zerolog/log"
2023-02-03 14:24:05 +00:00
"github.com/traefik/traefik/v3/pkg/config/dynamic"
"github.com/traefik/traefik/v3/pkg/types"
)
const (
2020-08-21 09:12:04 +00:00
// MetricNamePrefix prefix of all metric names.
2018-08-06 12:58:03 +00:00
MetricNamePrefix = "traefik_"
2020-08-21 09:12:04 +00:00
// server meta information.
2023-03-20 14:14:05 +00:00
metricConfigPrefix = MetricNamePrefix + "config_"
configReloadsTotalName = metricConfigPrefix + "reloads_total"
configLastReloadSuccessName = metricConfigPrefix + "last_reload_success"
openConnectionsName = MetricNamePrefix + "open_connections"
2020-12-18 17:44:03 +00:00
// TLS.
metricsTLSPrefix = MetricNamePrefix + "tls_"
tlsCertsNotAfterTimestampName = metricsTLSPrefix + "certs_not_after"
2020-12-18 17:44:03 +00:00
2020-08-21 09:12:04 +00:00
// entry point.
metricEntryPointPrefix = MetricNamePrefix + "entrypoint_"
entryPointReqsTotalName = metricEntryPointPrefix + "requests_total"
entryPointReqsTLSTotalName = metricEntryPointPrefix + "requests_tls_total"
entryPointReqDurationName = metricEntryPointPrefix + "request_duration_seconds"
entryPointReqsBytesTotalName = metricEntryPointPrefix + "requests_bytes_total"
entryPointRespsBytesTotalName = metricEntryPointPrefix + "responses_bytes_total"
2019-07-18 19:36:05 +00:00
2021-04-30 08:22:04 +00:00
// router level.
metricRouterPrefix = MetricNamePrefix + "router_"
routerReqsTotalName = metricRouterPrefix + "requests_total"
routerReqsTLSTotalName = metricRouterPrefix + "requests_tls_total"
routerReqDurationName = metricRouterPrefix + "request_duration_seconds"
routerReqsBytesTotalName = metricRouterPrefix + "requests_bytes_total"
routerRespsBytesTotalName = metricRouterPrefix + "responses_bytes_total"
2019-07-18 19:36:05 +00:00
2021-04-30 08:22:04 +00:00
// service level.
metricServicePrefix = MetricNamePrefix + "service_"
serviceReqsTotalName = metricServicePrefix + "requests_total"
serviceReqsTLSTotalName = metricServicePrefix + "requests_tls_total"
serviceReqDurationName = metricServicePrefix + "request_duration_seconds"
serviceRetriesTotalName = metricServicePrefix + "retries_total"
serviceServerUpName = metricServicePrefix + "server_up"
serviceReqsBytesTotalName = metricServicePrefix + "requests_bytes_total"
serviceRespsBytesTotalName = metricServicePrefix + "responses_bytes_total"
)
// promState holds all metric state internally and acts as the only Collector we register for Prometheus.
//
// This enables control to remove metrics that belong to outdated configuration.
// As an example why this is required, consider Traefik learns about a new service.
2019-07-18 19:36:05 +00:00
// It populates the 'traefik_server_service_up' metric for it with a value of 1 (alive).
// When the service is undeployed now the metric is still there in the client library
2018-06-05 10:32:03 +00:00
// and will be returned on the metrics endpoint until Traefik would be restarted.
//
2018-06-05 10:32:03 +00:00
// To solve this problem promState keeps track of Traefik's dynamic configuration.
2019-07-18 19:36:05 +00:00
// Metrics that "belong" to a dynamic configuration part like services or entryPoints
2018-06-05 10:32:03 +00:00
// are removed after they were scraped at least once when the corresponding object
// doesn't exist anymore.
var promState = newPrometheusState()
2019-07-18 19:36:05 +00:00
var promRegistry = stdprometheus.NewRegistry()
// PrometheusHandler exposes Prometheus routes.
func PrometheusHandler() http.Handler {
return promhttp.HandlerFor(promRegistry, promhttp.HandlerOpts{})
}
// RegisterPrometheus registers all Prometheus metrics.
// It must be called only once and failing to register the metrics will lead to a panic.
2018-11-14 09:18:03 +00:00
func RegisterPrometheus(ctx context.Context, config *types.Prometheus) Registry {
2018-08-06 12:58:03 +00:00
standardRegistry := initStandardRegistry(config)
2021-10-04 15:54:10 +00:00
if err := promRegistry.Register(collectors.NewProcessCollector(collectors.ProcessCollectorOpts{})); err != nil {
2020-11-06 08:26:03 +00:00
var arErr stdprometheus.AlreadyRegisteredError
if !errors.As(err, &arErr) {
2022-11-21 17:36:05 +00:00
log.Ctx(ctx).Warn().Msg("ProcessCollector is already registered")
2019-07-18 19:36:05 +00:00
}
}
2020-11-06 08:26:03 +00:00
2021-10-04 15:54:10 +00:00
if err := promRegistry.Register(collectors.NewGoCollector()); err != nil {
2020-11-06 08:26:03 +00:00
var arErr stdprometheus.AlreadyRegisteredError
if !errors.As(err, &arErr) {
2022-11-21 17:36:05 +00:00
log.Ctx(ctx).Warn().Msg("GoCollector is already registered")
2019-07-18 19:36:05 +00:00
}
}
2018-11-14 09:18:03 +00:00
if !registerPromState(ctx) {
2018-08-06 12:58:03 +00:00
return nil
}
return standardRegistry
}
func initStandardRegistry(config *types.Prometheus) Registry {
buckets := []float64{0.1, 0.3, 1.2, 5.0}
if config.Buckets != nil {
buckets = config.Buckets
}
configReloads := newCounterFrom(stdprometheus.CounterOpts{
Name: configReloadsTotalName,
Help: "Config reloads",
}, []string{})
lastConfigReloadSuccess := newGaugeFrom(stdprometheus.GaugeOpts{
Name: configLastReloadSuccessName,
Help: "Last config reload success",
}, []string{})
tlsCertsNotAfterTimestamp := newGaugeFrom(stdprometheus.GaugeOpts{
Name: tlsCertsNotAfterTimestampName,
2020-12-18 17:44:03 +00:00
Help: "Certificate expiration timestamp",
}, []string{"cn", "serial", "sans"})
openConnections := newGaugeFrom(stdprometheus.GaugeOpts{
Name: openConnectionsName,
Help: "How many open connections exist, by entryPoint and protocol",
}, []string{"entrypoint", "protocol"})
promState.vectors = []vector{
configReloads.cv,
lastConfigReloadSuccess.gv,
tlsCertsNotAfterTimestamp.gv,
openConnections.gv,
}
2019-07-18 19:36:05 +00:00
reg := &standardRegistry{
2020-12-18 17:44:03 +00:00
epEnabled: config.AddEntryPointsLabels,
2021-04-30 08:22:04 +00:00
routerEnabled: config.AddRoutersLabels,
2020-12-18 17:44:03 +00:00
svcEnabled: config.AddServicesLabels,
configReloadsCounter: configReloads,
lastConfigReloadSuccessGauge: lastConfigReloadSuccess,
2022-02-21 11:40:09 +00:00
tlsCertsNotAfterTimestampGauge: tlsCertsNotAfterTimestamp,
openConnectionsGauge: openConnections,
2019-07-18 19:36:05 +00:00
}
if config.AddEntryPointsLabels {
entryPointReqs := newCounterWithHeadersFrom(stdprometheus.CounterOpts{
2019-07-18 19:36:05 +00:00
Name: entryPointReqsTotalName,
Help: "How many HTTP requests processed on an entrypoint, partitioned by status code, protocol, and method.",
}, config.HeaderLabels, []string{"code", "method", "protocol", "entrypoint"})
entryPointReqsTLS := newCounterFrom(stdprometheus.CounterOpts{
2020-03-05 12:30:05 +00:00
Name: entryPointReqsTLSTotalName,
Help: "How many HTTP requests with TLS processed on an entrypoint, partitioned by TLS Version and TLS cipher Used.",
}, []string{"tls_version", "tls_cipher", "entrypoint"})
entryPointReqDurations := newHistogramFrom(stdprometheus.HistogramOpts{
2019-07-18 19:36:05 +00:00
Name: entryPointReqDurationName,
Help: "How long it took to process the request on an entrypoint, partitioned by status code, protocol, and method.",
Buckets: buckets,
}, []string{"code", "method", "protocol", "entrypoint"})
entryPointReqsBytesTotal := newCounterFrom(stdprometheus.CounterOpts{
Name: entryPointReqsBytesTotalName,
Help: "The total size of requests in bytes handled by an entrypoint, partitioned by status code, protocol, and method.",
}, []string{"code", "method", "protocol", "entrypoint"})
entryPointRespsBytesTotal := newCounterFrom(stdprometheus.CounterOpts{
Name: entryPointRespsBytesTotalName,
Help: "The total size of responses in bytes handled by an entrypoint, partitioned by status code, protocol, and method.",
}, []string{"code", "method", "protocol", "entrypoint"})
2019-07-18 19:36:05 +00:00
promState.vectors = append(promState.vectors,
entryPointReqs.cv,
entryPointReqsTLS.cv,
entryPointReqDurations.hv,
entryPointReqsBytesTotal.cv,
entryPointRespsBytesTotal.cv,
)
2020-12-18 17:44:03 +00:00
2019-07-18 19:36:05 +00:00
reg.entryPointReqsCounter = entryPointReqs
2020-03-05 12:30:05 +00:00
reg.entryPointReqsTLSCounter = entryPointReqsTLS
reg.entryPointReqDurationHistogram, _ = NewHistogramWithScale(entryPointReqDurations, time.Second)
reg.entryPointReqsBytesCounter = entryPointReqsBytesTotal
reg.entryPointRespsBytesCounter = entryPointRespsBytesTotal
2019-07-18 19:36:05 +00:00
}
2020-12-18 17:44:03 +00:00
2021-04-30 08:22:04 +00:00
if config.AddRoutersLabels {
routerReqs := newCounterWithHeadersFrom(stdprometheus.CounterOpts{
2021-04-30 08:22:04 +00:00
Name: routerReqsTotalName,
Help: "How many HTTP requests are processed on a router, partitioned by service, status code, protocol, and method.",
}, config.HeaderLabels, []string{"code", "method", "protocol", "router", "service"})
routerReqsTLS := newCounterFrom(stdprometheus.CounterOpts{
2021-04-30 08:22:04 +00:00
Name: routerReqsTLSTotalName,
Help: "How many HTTP requests with TLS are processed on a router, partitioned by service, TLS Version, and TLS cipher Used.",
}, []string{"tls_version", "tls_cipher", "router", "service"})
routerReqDurations := newHistogramFrom(stdprometheus.HistogramOpts{
2021-04-30 08:22:04 +00:00
Name: routerReqDurationName,
Help: "How long it took to process the request on a router, partitioned by service, status code, protocol, and method.",
Buckets: buckets,
}, []string{"code", "method", "protocol", "router", "service"})
routerReqsBytesTotal := newCounterFrom(stdprometheus.CounterOpts{
Name: routerReqsBytesTotalName,
Help: "The total size of requests in bytes handled by a router, partitioned by service, status code, protocol, and method.",
}, []string{"code", "method", "protocol", "router", "service"})
routerRespsBytesTotal := newCounterFrom(stdprometheus.CounterOpts{
Name: routerRespsBytesTotalName,
Help: "The total size of responses in bytes handled by a router, partitioned by service, status code, protocol, and method.",
}, []string{"code", "method", "protocol", "router", "service"})
2021-04-30 08:22:04 +00:00
promState.vectors = append(promState.vectors,
routerReqs.cv,
routerReqsTLS.cv,
routerReqDurations.hv,
routerReqsBytesTotal.cv,
routerRespsBytesTotal.cv,
)
2021-04-30 08:22:04 +00:00
reg.routerReqsCounter = routerReqs
reg.routerReqsTLSCounter = routerReqsTLS
reg.routerReqDurationHistogram, _ = NewHistogramWithScale(routerReqDurations, time.Second)
reg.routerReqsBytesCounter = routerReqsBytesTotal
reg.routerRespsBytesCounter = routerRespsBytesTotal
2021-04-30 08:22:04 +00:00
}
2019-07-18 19:36:05 +00:00
if config.AddServicesLabels {
serviceReqs := newCounterWithHeadersFrom(stdprometheus.CounterOpts{
2019-07-18 19:36:05 +00:00
Name: serviceReqsTotalName,
Help: "How many HTTP requests processed on a service, partitioned by status code, protocol, and method.",
}, config.HeaderLabels, []string{"code", "method", "protocol", "service"})
serviceReqsTLS := newCounterFrom(stdprometheus.CounterOpts{
2020-03-05 12:30:05 +00:00
Name: serviceReqsTLSTotalName,
Help: "How many HTTP requests with TLS processed on a service, partitioned by TLS version and TLS cipher.",
}, []string{"tls_version", "tls_cipher", "service"})
serviceReqDurations := newHistogramFrom(stdprometheus.HistogramOpts{
2019-07-18 19:36:05 +00:00
Name: serviceReqDurationName,
Help: "How long it took to process the request on a service, partitioned by status code, protocol, and method.",
Buckets: buckets,
}, []string{"code", "method", "protocol", "service"})
serviceRetries := newCounterFrom(stdprometheus.CounterOpts{
2019-07-18 19:36:05 +00:00
Name: serviceRetriesTotalName,
Help: "How many request retries happened on a service.",
}, []string{"service"})
serviceServerUp := newGaugeFrom(stdprometheus.GaugeOpts{
2019-07-18 19:36:05 +00:00
Name: serviceServerUpName,
Help: "service server is up, described by gauge value of 0 or 1.",
}, []string{"service", "url"})
serviceReqsBytesTotal := newCounterFrom(stdprometheus.CounterOpts{
Name: serviceReqsBytesTotalName,
Help: "The total size of requests in bytes received by a service, partitioned by status code, protocol, and method.",
}, []string{"code", "method", "protocol", "service"})
serviceRespsBytesTotal := newCounterFrom(stdprometheus.CounterOpts{
Name: serviceRespsBytesTotalName,
Help: "The total size of responses in bytes returned by a service, partitioned by status code, protocol, and method.",
}, []string{"code", "method", "protocol", "service"})
2019-07-18 19:36:05 +00:00
promState.vectors = append(promState.vectors,
serviceReqs.cv,
serviceReqsTLS.cv,
serviceReqDurations.hv,
serviceRetries.cv,
serviceServerUp.gv,
serviceReqsBytesTotal.cv,
serviceRespsBytesTotal.cv,
)
2019-07-18 19:36:05 +00:00
reg.serviceReqsCounter = serviceReqs
2020-03-05 12:30:05 +00:00
reg.serviceReqsTLSCounter = serviceReqsTLS
reg.serviceReqDurationHistogram, _ = NewHistogramWithScale(serviceReqDurations, time.Second)
2019-07-18 19:36:05 +00:00
reg.serviceRetriesCounter = serviceRetries
reg.serviceServerUpGauge = serviceServerUp
reg.serviceReqsBytesCounter = serviceReqsBytesTotal
reg.serviceRespsBytesCounter = serviceRespsBytesTotal
2019-07-18 19:36:05 +00:00
}
return reg
}
2018-11-14 09:18:03 +00:00
func registerPromState(ctx context.Context) bool {
2020-11-06 08:26:03 +00:00
err := promRegistry.Register(promState)
if err == nil {
return true
}
2022-11-21 17:36:05 +00:00
logger := log.Ctx(ctx)
2020-11-06 08:26:03 +00:00
var arErr stdprometheus.AlreadyRegisteredError
if errors.As(err, &arErr) {
2022-11-21 17:36:05 +00:00
logger.Debug().Msg("Prometheus collector already registered.")
2020-11-06 08:26:03 +00:00
return true
2018-08-06 12:58:03 +00:00
}
2020-11-06 08:26:03 +00:00
2022-11-21 17:36:05 +00:00
logger.Error().Err(err).Msg("Unable to register Traefik to Prometheus")
2020-11-06 08:26:03 +00:00
return false
2018-08-06 12:58:03 +00:00
}
2018-06-05 10:32:03 +00:00
// OnConfigurationUpdate receives the current configuration from Traefik.
// It then converts the configuration to the optimized package internal format
// and sets it to the promState.
func OnConfigurationUpdate(conf dynamic.Configuration, entryPoints []string) {
dynCfg := newDynamicConfig()
2018-06-05 10:32:03 +00:00
2019-07-18 19:36:05 +00:00
for _, value := range entryPoints {
dynCfg.entryPoints[value] = true
}
if conf.HTTP == nil {
promState.SetDynamicConfig(dynCfg)
return
2019-07-18 19:36:05 +00:00
}
for name := range conf.HTTP.Routers {
dynCfg.routers[name] = true
}
for serviceName, service := range conf.HTTP.Services {
dynCfg.services[serviceName] = make(map[string]bool)
if service.LoadBalancer != nil {
2019-07-18 19:36:05 +00:00
for _, server := range service.LoadBalancer.Servers {
dynCfg.services[serviceName][server.URL] = true
2019-07-18 19:36:05 +00:00
}
}
}
2018-06-05 10:32:03 +00:00
promState.SetDynamicConfig(dynCfg)
}
func newPrometheusState() *prometheusState {
return &prometheusState{
2018-06-05 10:32:03 +00:00
dynamicConfig: newDynamicConfig(),
deletedURLs: make(map[string][]string),
}
}
type vector interface {
stdprometheus.Collector
DeletePartialMatch(labels stdprometheus.Labels) int
}
type prometheusState struct {
vectors []vector
2018-06-05 10:32:03 +00:00
mtx sync.Mutex
dynamicConfig *dynamicConfig
deletedEP []string
deletedRouters []string
deletedServices []string
deletedURLs map[string][]string
2018-06-05 10:32:03 +00:00
}
2018-06-05 10:32:03 +00:00
func (ps *prometheusState) SetDynamicConfig(dynamicConfig *dynamicConfig) {
ps.mtx.Lock()
defer ps.mtx.Unlock()
for ep := range ps.dynamicConfig.entryPoints {
if _, ok := dynamicConfig.entryPoints[ep]; !ok {
ps.deletedEP = append(ps.deletedEP, ep)
}
}
for router := range ps.dynamicConfig.routers {
if _, ok := dynamicConfig.routers[router]; !ok {
ps.deletedRouters = append(ps.deletedRouters, router)
}
}
for service, serV := range ps.dynamicConfig.services {
actualService, ok := dynamicConfig.services[service]
if !ok {
ps.deletedServices = append(ps.deletedServices, service)
}
for url := range serV {
if _, ok := actualService[url]; !ok {
ps.deletedURLs[service] = append(ps.deletedURLs[service], url)
}
}
}
ps.dynamicConfig = dynamicConfig
}
// Describe implements prometheus.Collector and simply calls
// the registered describer functions.
func (ps *prometheusState) Describe(ch chan<- *stdprometheus.Desc) {
for _, v := range ps.vectors {
v.Describe(ch)
}
}
// Collect implements prometheus.Collector. It calls the Collect
// method of all metrics it received on the collectors channel.
2018-06-05 10:32:03 +00:00
// It's also responsible to remove metrics that belong to an outdated configuration.
// The removal happens only after their Collect method was called to ensure that
// also those metrics will be exported on the current scrape.
func (ps *prometheusState) Collect(ch chan<- stdprometheus.Metric) {
for _, v := range ps.vectors {
v.Collect(ch)
}
ps.mtx.Lock()
defer ps.mtx.Unlock()
for _, ep := range ps.deletedEP {
if !ps.dynamicConfig.hasEntryPoint(ep) {
ps.DeletePartialMatch(map[string]string{"entrypoint": ep})
}
}
for _, router := range ps.deletedRouters {
if !ps.dynamicConfig.hasRouter(router) {
ps.DeletePartialMatch(map[string]string{"router": router})
}
2018-06-05 10:32:03 +00:00
}
for _, service := range ps.deletedServices {
if !ps.dynamicConfig.hasService(service) {
ps.DeletePartialMatch(map[string]string{"service": service})
}
}
for service, urls := range ps.deletedURLs {
for _, url := range urls {
if !ps.dynamicConfig.hasServerURL(service, url) {
ps.DeletePartialMatch(map[string]string{"service": service, "url": url})
}
2018-06-05 10:32:03 +00:00
}
}
ps.deletedEP = nil
ps.deletedRouters = nil
ps.deletedServices = nil
ps.deletedURLs = make(map[string][]string)
}
// DeletePartialMatch deletes all metrics where the variable labels contain all of those passed in as labels.
// The order of the labels does not matter.
// It returns the number of metrics deleted.
func (ps *prometheusState) DeletePartialMatch(labels stdprometheus.Labels) int {
var count int
for _, elem := range ps.vectors {
count += elem.DeletePartialMatch(labels)
}
return count
2018-06-05 10:32:03 +00:00
}
func newDynamicConfig() *dynamicConfig {
return &dynamicConfig{
2019-07-18 19:36:05 +00:00
entryPoints: make(map[string]bool),
routers: make(map[string]bool),
services: make(map[string]map[string]bool),
2018-06-05 10:32:03 +00:00
}
}
2019-07-18 19:36:05 +00:00
// dynamicConfig holds the current configuration for entryPoints, services,
2018-06-05 10:32:03 +00:00
// and server URLs in an optimized way to check for existence. This provides
// a performant way to check whether the collected metrics belong to the
// current configuration or to an outdated one.
type dynamicConfig struct {
2019-07-18 19:36:05 +00:00
entryPoints map[string]bool
routers map[string]bool
services map[string]map[string]bool
2018-06-05 10:32:03 +00:00
}
2019-07-18 19:36:05 +00:00
func (d *dynamicConfig) hasEntryPoint(entrypointName string) bool {
_, ok := d.entryPoints[entrypointName]
2018-06-05 10:32:03 +00:00
return ok
}
2019-07-18 19:36:05 +00:00
func (d *dynamicConfig) hasService(serviceName string) bool {
_, ok := d.services[serviceName]
2018-06-05 10:32:03 +00:00
return ok
}
func (d *dynamicConfig) hasRouter(routerName string) bool {
_, ok := d.routers[routerName]
return ok
}
2019-07-18 19:36:05 +00:00
func (d *dynamicConfig) hasServerURL(serviceName, serverURL string) bool {
if service, hasService := d.services[serviceName]; hasService {
_, ok := service[serverURL]
2018-06-05 10:32:03 +00:00
return ok
}
2018-06-05 10:32:03 +00:00
return false
}
func newCounterWithHeadersFrom(opts stdprometheus.CounterOpts, headers map[string]string, labelNames []string) *counterWithHeaders {
var headerLabels []string
for k := range headers {
headerLabels = append(headerLabels, k)
}
cv := stdprometheus.NewCounterVec(opts, append(labelNames, headerLabels...))
c := &counterWithHeaders{
name: opts.Name,
headers: headers,
cv: cv,
}
if len(labelNames) == 0 && len(headerLabels) == 0 {
c.collector = cv.WithLabelValues()
c.Add(0)
}
return c
}
type counterWithHeaders struct {
name string
cv *stdprometheus.CounterVec
labelNamesValues labelNamesValues
headers map[string]string
collector stdprometheus.Counter
}
func (c *counterWithHeaders) With(headers http.Header, labelValues ...string) CounterWithHeaders {
for headerLabel, headerKey := range c.headers {
labelValues = append(labelValues, headerLabel, headers.Get(headerKey))
}
lnv := c.labelNamesValues.With(labelValues...)
return &counterWithHeaders{
name: c.name,
headers: c.headers,
cv: c.cv,
labelNamesValues: lnv,
collector: c.cv.With(lnv.ToLabels()),
}
}
func (c *counterWithHeaders) Add(delta float64) {
c.collector.Add(delta)
}
func (c *counterWithHeaders) Describe(ch chan<- *stdprometheus.Desc) {
c.cv.Describe(ch)
}
func newCounterFrom(opts stdprometheus.CounterOpts, labelNames []string) *counter {
cv := stdprometheus.NewCounterVec(opts, labelNames)
c := &counter{
name: opts.Name,
cv: cv,
}
if len(labelNames) == 0 {
c.collector = cv.WithLabelValues()
c.Add(0)
}
return c
}
type counter struct {
name string
cv *stdprometheus.CounterVec
labelNamesValues labelNamesValues
collector stdprometheus.Counter
}
func (c *counter) With(labelValues ...string) metrics.Counter {
lnv := c.labelNamesValues.With(labelValues...)
return &counter{
name: c.name,
cv: c.cv,
labelNamesValues: lnv,
collector: c.cv.With(lnv.ToLabels()),
}
}
func (c *counter) Add(delta float64) {
c.collector.Add(delta)
}
func (c *counter) Describe(ch chan<- *stdprometheus.Desc) {
c.cv.Describe(ch)
}
func newGaugeFrom(opts stdprometheus.GaugeOpts, labelNames []string) *gauge {
gv := stdprometheus.NewGaugeVec(opts, labelNames)
g := &gauge{
name: opts.Name,
gv: gv,
}
if len(labelNames) == 0 {
g.collector = gv.WithLabelValues()
g.Set(0)
}
return g
}
type gauge struct {
name string
gv *stdprometheus.GaugeVec
labelNamesValues labelNamesValues
collector stdprometheus.Gauge
}
func (g *gauge) With(labelValues ...string) metrics.Gauge {
lnv := g.labelNamesValues.With(labelValues...)
return &gauge{
name: g.name,
gv: g.gv,
labelNamesValues: lnv,
collector: g.gv.With(lnv.ToLabels()),
}
}
2018-04-16 08:28:04 +00:00
func (g *gauge) Add(delta float64) {
g.collector.Add(delta)
2018-04-16 08:28:04 +00:00
}
func (g *gauge) Set(value float64) {
g.collector.Set(value)
}
func (g *gauge) Describe(ch chan<- *stdprometheus.Desc) {
g.gv.Describe(ch)
}
func newHistogramFrom(opts stdprometheus.HistogramOpts, labelNames []string) *histogram {
hv := stdprometheus.NewHistogramVec(opts, labelNames)
return &histogram{
name: opts.Name,
hv: hv,
}
}
type histogram struct {
name string
hv *stdprometheus.HistogramVec
labelNamesValues labelNamesValues
collector stdprometheus.Observer
}
func (h *histogram) With(labelValues ...string) metrics.Histogram {
lnv := h.labelNamesValues.With(labelValues...)
return &histogram{
name: h.name,
hv: h.hv,
labelNamesValues: lnv,
collector: h.hv.With(lnv.ToLabels()),
}
}
func (h *histogram) Observe(value float64) {
h.collector.Observe(value)
}
func (h *histogram) Describe(ch chan<- *stdprometheus.Desc) {
h.hv.Describe(ch)
}
// labelNamesValues is a type alias that provides validation on its With method.
// Metrics may include it as a member to help them satisfy With semantics and
// save some code duplication.
type labelNamesValues []string
// With validates the input, and returns a new aggregate labelNamesValues.
func (lvs labelNamesValues) With(labelValues ...string) labelNamesValues {
if len(labelValues)%2 != 0 {
labelValues = append(labelValues, "unknown")
}
labels := make([]string, len(lvs)+len(labelValues))
n := copy(labels, lvs)
copy(labels[n:], labelValues)
return labels
}
// ToLabels is a convenience method to convert a labelNamesValues
// to the native prometheus.Labels.
func (lvs labelNamesValues) ToLabels() stdprometheus.Labels {
labels := make(map[string]string, len(lvs)/2)
for i := 0; i < len(lvs); i += 2 {
labels[lvs[i]] = lvs[i+1]
}
return labels
}