Add Metrics

This commit is contained in:
Michael 2019-07-18 21:36:05 +02:00 committed by Traefiker Bot
parent 4dc448056c
commit 8e97af8dc3
121 changed files with 8364 additions and 3811 deletions

52
Gopkg.lock generated
View file

@ -701,7 +701,7 @@
version = "v1.41.0" version = "v1.41.0"
[[projects]] [[projects]]
digest = "1:9e53c5e9ee65a2c587d6ade11761ef2f976abfcd9599c5016b7046e63c1f7fb2" digest = "1:bed40e7a58536b77890de9fc4911a1322a31cd2495bbcad8446d182063eb1ae4"
name = "github.com/go-kit/kit" name = "github.com/go-kit/kit"
packages = [ packages = [
"log", "log",
@ -716,8 +716,8 @@
"util/conn", "util/conn",
] ]
pruneopts = "NUT" pruneopts = "NUT"
revision = "ca4112baa34cb55091301bdc13b1420a122b1b9e" revision = "150a65a7ec6156b4b640c1fd55f26fd3d475d656"
version = "v0.7.0" version = "v0.9.0"
[[projects]] [[projects]]
digest = "1:341a7df38da99fe91ed40e4008c13cc5d02dcc98ed1a094360cb7d5df26d6d26" digest = "1:341a7df38da99fe91ed40e4008c13cc5d02dcc98ed1a094360cb7d5df26d6d26"
@ -735,14 +735,6 @@
revision = "d4920dcf5b7689548a6db640278a9b35a5b48ec6" revision = "d4920dcf5b7689548a6db640278a9b35a5b48ec6"
version = "v1.9.1" version = "v1.9.1"
[[projects]]
digest = "1:8cf58169eb0a8c009ed3a4c36486980d602ab4cc4e478130493d6cd0404f889b"
name = "github.com/go-stack/stack"
packages = ["."]
pruneopts = "NUT"
revision = "54be5f394ed2c3e19dac9134a40a95ba5a017f7b"
version = "v1.5.4"
[[projects]] [[projects]]
digest = "1:6689652ec1f6e30455551da19c707f2bfac75e4df5c7bbe3f0ad7b49b9aa2cfc" digest = "1:6689652ec1f6e30455551da19c707f2bfac75e4df5c7bbe3f0ad7b49b9aa2cfc"
name = "github.com/gogo/protobuf" name = "github.com/gogo/protobuf"
@ -938,16 +930,16 @@
version = "0.2.4" version = "0.2.4"
[[projects]] [[projects]]
digest = "1:9813d5a93abcc5690fa5830bf7186c835493516986be7a2b11b46e7b12e13317" branch = "master"
name = "github.com/influxdata/influxdb" digest = "1:50708c8fc92aec981df5c446581cf9f90ba9e2a5692118e0ce75d4534aaa14a2"
name = "github.com/influxdata/influxdb1-client"
packages = [ packages = [
"client/v2",
"models", "models",
"pkg/escape", "pkg/escape",
"v2",
] ]
pruneopts = "NUT" pruneopts = "NUT"
revision = "2d474a3089bcfce6b472779be9470a1f0ef3d5e4" revision = "8ff2fc3824fcb533795f9a2f233275f0bb18d6c5"
version = "v1.3.7"
[[projects]] [[projects]]
digest = "1:78efd72f12ed0244e5fbe82bd0ecdbaf3e21402ee9176525ef1138a2fc0d3b17" digest = "1:78efd72f12ed0244e5fbe82bd0ecdbaf3e21402ee9176525ef1138a2fc0d3b17"
@ -1362,14 +1354,16 @@
version = "v1.0.0" version = "v1.0.0"
[[projects]] [[projects]]
digest = "1:d05ebef91c056e176dc4dfe905002bd3dd7b1dc8703b53bf6e88761053236a75" digest = "1:097cc61836050f45cbb712ae3bb45d66fba464c16b8fac09907fa3c1f753eff6"
name = "github.com/prometheus/client_golang" name = "github.com/prometheus/client_golang"
packages = [ packages = [
"prometheus", "prometheus",
"prometheus/internal",
"prometheus/promhttp", "prometheus/promhttp",
] ]
pruneopts = "NUT" pruneopts = "NUT"
revision = "08fd2e12372a66e68e30523c7642e0cbc3e4fbde" revision = "4ab88e80c249ed361d3299e2930427d9ac43ef8d"
version = "v1.0.0"
[[projects]] [[projects]]
digest = "1:32d10bdfa8f09ecf13598324dba86ab891f11db3c538b6a34d1c3b5b99d7c36b" digest = "1:32d10bdfa8f09ecf13598324dba86ab891f11db3c538b6a34d1c3b5b99d7c36b"
@ -1379,7 +1373,7 @@
revision = "6f3806018612930941127f2a7c6c453ba2c527d2" revision = "6f3806018612930941127f2a7c6c453ba2c527d2"
[[projects]] [[projects]]
digest = "1:65f12bb82877d6e049a41b5feec5f79f11e3e0ea5748f677d68f206ac408c403" digest = "1:d03ca24670416dc8fccc78b05d6736ec655416ca7db0a028e8fb92cfdfe3b55e"
name = "github.com/prometheus/common" name = "github.com/prometheus/common"
packages = [ packages = [
"expfmt", "expfmt",
@ -1387,17 +1381,19 @@
"model", "model",
] ]
pruneopts = "NUT" pruneopts = "NUT"
revision = "49fee292b27bfff7f354ee0f64e1bc4850462edf" revision = "31bed53e4047fd6c510e43a941f90cb31be0972a"
version = "v0.6.0"
[[projects]] [[projects]]
digest = "1:60d19aad385900a8aa4a755524e68965fcb31b444ec30e673812e06c98674f2e" digest = "1:19305fc369377c111c865a7a01e11c675c57c52a932353bbd4ea360bd5b72d99"
name = "github.com/prometheus/procfs" name = "github.com/prometheus/procfs"
packages = [ packages = [
".", ".",
"xfs", "internal/fs",
] ]
pruneopts = "NUT" pruneopts = "NUT"
revision = "a1dba9ce8baed984a2495b658c82687f8157b98f" revision = "3f98efb27840a48a7a2898ec80be07674d19f9c8"
version = "v0.0.3"
[[projects]] [[projects]]
branch = "containous-fork" branch = "containous-fork"
@ -1857,7 +1853,7 @@
version = "v1.20.1" version = "v1.20.1"
[[projects]] [[projects]]
digest = "1:b49eceff862a3048ec28dad1fce40bcbdc1703119dbad35d7e5f1beb4f9a4527" digest = "1:d732242a429138da899dfecea82b3c65b4157bdf0b5317c229d9c559b6c3450e"
name = "gopkg.in/DataDog/dd-trace-go.v1" name = "gopkg.in/DataDog/dd-trace-go.v1"
packages = [ packages = [
"ddtrace", "ddtrace",
@ -1866,10 +1862,12 @@
"ddtrace/opentracer", "ddtrace/opentracer",
"ddtrace/tracer", "ddtrace/tracer",
"internal/globalconfig", "internal/globalconfig",
"internal/log",
"internal/version",
] ]
pruneopts = "NUT" pruneopts = "NUT"
revision = "c19e9e56d5b5b71b6507ce1b0ec06d85aa3705a1" revision = "8d2998bc69008aa4553846ac9a044aa730bd4ce4"
version = "v1.14.0" version = "v1.15.0"
[[projects]] [[projects]]
digest = "1:c970218a20933dd0a2eb2006de922217fa9276f57d25009b2a934eb1c50031cc" digest = "1:c970218a20933dd0a2eb2006de922217fa9276f57d25009b2a934eb1c50031cc"
@ -2263,7 +2261,7 @@
"github.com/google/go-github/github", "github.com/google/go-github/github",
"github.com/gorilla/websocket", "github.com/gorilla/websocket",
"github.com/hashicorp/go-version", "github.com/hashicorp/go-version",
"github.com/influxdata/influxdb/client/v2", "github.com/influxdata/influxdb1-client/v2",
"github.com/instana/go-sensor", "github.com/instana/go-sensor",
"github.com/libkermit/compose/check", "github.com/libkermit/compose/check",
"github.com/libkermit/docker", "github.com/libkermit/docker",

View file

@ -111,7 +111,11 @@ required = [
[[constraint]] [[constraint]]
name = "github.com/go-kit/kit" name = "github.com/go-kit/kit"
version = "0.7.0" version = "v0.9.0"
[[constraint]]
name = "github.com/prometheus/client_golang"
version = "v1.0.0"
[[constraint]] [[constraint]]
branch = "master" branch = "master"
@ -121,10 +125,6 @@ required = [
# name = "github.com/hashicorp/consul" # name = "github.com/hashicorp/consul"
# version = "1.0.6" # version = "1.0.6"
[[constraint]]
name = "github.com/influxdata/influxdb"
version = "1.3.7"
#[[constraint]] #[[constraint]]
# branch = "master" # branch = "master"
# name = "github.com/jjcollinge/servicefabric" # name = "github.com/jjcollinge/servicefabric"
@ -273,7 +273,7 @@ required = [
[[constraint]] [[constraint]]
name = "gopkg.in/DataDog/dd-trace-go.v1" name = "gopkg.in/DataDog/dd-trace-go.v1"
version = "1.13.0" version = "1.15.0"
[[constraint]] [[constraint]]
name = "github.com/instana/go-sensor" name = "github.com/instana/go-sensor"

View file

@ -0,0 +1,106 @@
# DataDog
To enable the DataDog:
```toml tab="File (TOML)"
[metrics]
[metrics.dataDog]
```
```bash tab="CLI"
--metrics
--metrics.datadog
```
#### `address`
_Required, Default="127.0.0.1:8125"_
Address instructs exporter to send metrics to datadog-agent at this address.
```toml tab="File (TOML)"
[metrics]
[metrics.dataDog]
address = "127.0.0.1:8125"
```
```yaml tab="File (TOML)"
metrics:
dataDog:
address: 127.0.0.1:8125
```
```bash tab="CLI"
--metrics
--metrics.datadog.address="127.0.0.1:8125"
```
#### `addEntryPointsLabels`
_Optional, Default=true_
Enable metrics on entry points.
```toml tab="File (TOML)"
[metrics]
[metrics.dataDog]
addEntryPointsLabels = true
```
```yaml tab="File (TOML)"
metrics:
dataDog:
addEntryPointsLabels: true
```
```bash tab="CLI"
--metrics
--metrics.datadog.addEntryPointsLabels=true
```
#### `addServicesLabels`
_Optional, Default=true_
Enable metrics on services.
```toml tab="File (TOML)"
[metrics]
[metrics.dataDog]
addServicesLabels = true
```
```yaml tab="File (TOML)"
metrics:
dataDog:
addServicesLabels: true
```
```bash tab="CLI"
--metrics
--metrics.datadog.addServicesLabels=true
```
#### `pushInterval`
_Optional, Default=10s_
The interval used by the exporter to push metrics to datadog-agent.
```toml tab="File (TOML)"
[metrics]
[metrics.dataDog]
pushInterval = 10s
```
```yaml tab="File (TOML)"
metrics:
dataDog:
pushInterval: 10s
```
```bash tab="CLI"
--metrics
--metrics.datadog.pushInterval=10s
```

View file

@ -0,0 +1,225 @@
# InfluxDB
To enable the InfluxDB:
```toml tab="File (TOML)"
[metrics]
[metrics.influxdb]
```
```yaml tab="File (TOML)"
metrics:
influxdb: {}
```
```bash tab="CLI"
--metrics
--metrics.influxdb
```
#### `address`
_Required, Default="localhost:8089"_
Address instructs exporter to send metrics to influxdb at this address.
```toml tab="File (TOML)"
[metrics]
[metrics.influxdb]
address = "localhost:8089"
```
```yaml tab="File (TOML)"
metrics:
influxdb:
address: localhost:8089
```
```bash tab="CLI"
--metrics
--metrics.influxdb.address="localhost:8089"
```
#### `protocol`
_Required, Default="udp"_
InfluxDB's address protocol (udp or http).
```toml tab="File (TOML)"
[metrics]
[metrics.influxdb]
protocol = "upd"
```
```yaml tab="File (TOML)"
metrics:
influxdb:
protocol: udp
```
```bash tab="CLI"
--metrics
--metrics.influxdb.protocol="udp"
```
#### `database`
_Optional, Default=""_
InfluxDB database used when protocol is http.
```toml tab="File (TOML)"
[metrics]
[metrics.influxdb]
database = ""
```
```yaml tab="File (TOML)"
metrics:
influxdb:
database: ""
```
```bash tab="CLI"
--metrics
--metrics.influxdb.database=""
```
#### `retentionPolicy`
_Optional, Default=""_
InfluxDB retention policy used when protocol is http.
```toml tab="File (TOML)"
[metrics]
[metrics.influxdb]
retentionPolicy = ""
```
```yaml tab="File (TOML)"
metrics:
influxdb:
retentionPolicy: ""
```
```bash tab="CLI"
--metrics
--metrics.influxdb.retentionPolicy=""
```
#### `username`
_Optional, Default=""_
InfluxDB username (only with http).
```toml tab="File (TOML)"
[metrics]
[metrics.influxdb]
username = ""
```
```yaml tab="File (TOML)"
metrics:
influxdb:
username: ""
```
```bash tab="CLI"
--metrics
--metrics.influxdb.username=""
```
#### `password`
_Optional, Default=""_
InfluxDB password (only with http).
```toml tab="File (TOML)"
[metrics]
[metrics.influxdb]
password = ""
```
```yaml tab="File (TOML)"
metrics:
influxdb:
password: ""
```
```bash tab="CLI"
--metrics
--metrics.influxdb.password=""
```
#### `addEntryPointsLabels`
_Optional, Default=true_
Enable metrics on entry points.
```toml tab="File (TOML)"
[metrics]
[metrics.influxdb]
addEntryPointsLabels = true
```
```yaml tab="File (TOML)"
metrics:
influxdb:
addEntryPointsLabels: true
```
```bash tab="CLI"
--metrics
--metrics.influxdb.addEntryPointsLabels=true
```
#### `addServicesLabels`
_Optional, Default=true_
Enable metrics on services.
```toml tab="File (TOML)"
[metrics]
[metrics.influxdb]
addServicesLabels = true
```
```yaml tab="File (TOML)"
metrics:
influxdb:
addServicesLabels: true
```
```bash tab="CLI"
--metrics
--metrics.influxdb.addServicesLabels=true
```
#### `pushInterval`
_Optional, Default=10s_
The interval used by the exporter to push metrics to influxdb.
```toml tab="File (TOML)"
[metrics]
[metrics.influxdb]
pushInterval = 10s
```
```yaml tab="File (TOML)"
metrics:
influxdb:
pushInterval: 10s
```
```bash tab="CLI"
--metrics
--metrics.influxdb.pushInterval=10s
```

View file

@ -0,0 +1,26 @@
# Metrics
Metrics system
{: .subtitle }
Traefik supports 4 metrics backends:
- [DataDog](./datadog.md)
- [InfluxDB](./influxdb.md)
- [Prometheus](./prometheus.md)
- [StatsD](./statsd.md)
## Configuration
To enable metrics:
```toml tab="File (TOML)"
[metrics]
```
```yaml tab="File (TOML)"
metrics: {}
```
```bash tab="CLI"
--metrics
```

View file

@ -0,0 +1,139 @@
# Prometheus
To enable the Prometheus:
```toml tab="File (TOML)"
[metrics]
[metrics.prometheus]
```
```yaml tab="File (TOML)"
metrics:
prometheus: {}
```
```bash tab="CLI"
--metrics
--metrics.prometheus
```
#### `buckets`
_Optional, Default="0.100000, 0.300000, 1.200000, 5.000000"_
Buckets for latency metrics.
```toml tab="File (TOML)"
[metrics]
[metrics.prometheus]
buckets = [0.1,0.3,1.2,5.0]
```
```yaml tab="File (TOML)"
metrics:
prometheus:
buckets:
- 0.1
- 0.3
- 1.2
- 5.0
```
```bash tab="CLI"
--metrics
--metrics.prometheus.buckets=0.100000, 0.300000, 1.200000, 5.000000
```
#### `entryPoint`
_Optional, Default=traefik_
Entry-point used by prometheus to expose metrics.
```toml tab="File (TOML)"
[metrics]
[metrics.prometheus]
entryPoint = traefik
```
```yaml tab="File (TOML)"
metrics:
prometheus:
entryPoint: traefik
```
```bash tab="CLI"
--metrics
--metrics.prometheus.entryPoint=traefik
```
#### `middlewares`
_Optional, Default=""_
Middlewares.
```toml tab="File (TOML)"
[metrics]
[metrics.prometheus]
middlewares = ["xxx", "yyy"]
```
```yaml tab="File (TOML)"
metrics:
prometheus:
middlewares:
- xxx
- yyy
```
```bash tab="CLI"
--metrics
--metrics.prometheus.middlewares="xxx,yyy"
```
#### `addEntryPointsLabels`
_Optional, Default=true_
Enable metrics on entry points.
```toml tab="File (TOML)"
[metrics]
[metrics.prometheus]
addEntryPointsLabels = true
```
```yaml tab="File (TOML)"
metrics:
prometheus:
addEntryPointsLabels: true
```
```bash tab="CLI"
--metrics
--metrics.prometheus.addEntryPointsLabels=true
```
#### `addServicesLabels`
_Optional, Default=true_
Enable metrics on services.
```toml tab="File (TOML)"
[metrics]
[metrics.prometheus]
addServicesLabels = true
```
```yaml tab="File (TOML)"
metrics:
prometheus:
addServicesLabels: true
```
```bash tab="CLI"
--metrics
--metrics.prometheus.addServicesLabels=true
```

View file

@ -0,0 +1,110 @@
# StatsD
To enable the Statsd:
```toml tab="File (TOML)"
[metrics]
[metrics.statsd]
```
```yaml tab="File (TOML)"
metrics:
statsd: {}
```
```bash tab="CLI"
--metrics
--metrics.statsd
```
#### `address`
_Required, Default="localhost:8125"_
Address instructs exporter to send metrics to statsd at this address.
```toml tab="File (TOML)"
[metrics]
[metrics.statsd]
address = "localhost:8125"
```
```yaml tab="File (TOML)"
metrics:
statsd:
address: localhost:8125
```
```bash tab="CLI"
--metrics
--metrics.statsd.address="localhost:8125"
```
#### `addEntryPointsLabels`
_Optional, Default=true_
Enable metrics on entry points.
```toml tab="File (TOML)"
[metrics]
[metrics.statsd]
addEntryPointsLabels = true
```
```yaml tab="File (TOML)"
metrics:
statsd:
addEntryPointsLabels: true
```
```bash tab="CLI"
--metrics
--metrics.statsd.addEntryPointsLabels=true
```
#### `addServicesLabels`
_Optional, Default=true_
Enable metrics on services.
```toml tab="File (TOML)"
[metrics]
[metrics.statsd]
addServicesLabels = true
```
```yaml tab="File (TOML)"
metrics:
statsd:
addServicesLabels: true
```
```bash tab="CLI"
--metrics
--metrics.statsd.addServicesLabels=true
```
#### `pushInterval`
_Optional, Default=10s_
The interval used by the exporter to push metrics to statsD.
```toml tab="File (TOML)"
[metrics]
[metrics.statsd]
pushInterval = 10s
```
```yaml tab="File (TOML)"
metrics:
statsd:
pushInterval: 10s
```
```bash tab="CLI"
--metrics
--metrics.statsd.pushInterval=10s
```

View file

@ -180,18 +180,30 @@ Log level set to traefik logs. (Default: ```ERROR```)
`--metrics.datadog`: `--metrics.datadog`:
DataDog metrics exporter type. (Default: ```false```) DataDog metrics exporter type. (Default: ```false```)
`--metrics.datadog.addentrypointslabels`:
Enable metrics on entry points. (Default: ```true```)
`--metrics.datadog.address`: `--metrics.datadog.address`:
DataDog's address. (Default: ```localhost:8125```) DataDog's address. (Default: ```localhost:8125```)
`--metrics.datadog.addserviceslabels`:
Enable metrics on services. (Default: ```true```)
`--metrics.datadog.pushinterval`: `--metrics.datadog.pushinterval`:
DataDog push interval. (Default: ```10```) DataDog push interval. (Default: ```10```)
`--metrics.influxdb`: `--metrics.influxdb`:
InfluxDB metrics exporter type. (Default: ```false```) InfluxDB metrics exporter type. (Default: ```false```)
`--metrics.influxdb.addentrypointslabels`:
Enable metrics on entry points. (Default: ```true```)
`--metrics.influxdb.address`: `--metrics.influxdb.address`:
InfluxDB address. (Default: ```localhost:8089```) InfluxDB address. (Default: ```localhost:8089```)
`--metrics.influxdb.addserviceslabels`:
Enable metrics on services. (Default: ```true```)
`--metrics.influxdb.database`: `--metrics.influxdb.database`:
InfluxDB database used when protocol is http. InfluxDB database used when protocol is http.
@ -213,6 +225,12 @@ InfluxDB username (only with http).
`--metrics.prometheus`: `--metrics.prometheus`:
Prometheus metrics exporter type. (Default: ```false```) Prometheus metrics exporter type. (Default: ```false```)
`--metrics.prometheus.addentrypointslabels`:
Enable metrics on entry points. (Default: ```true```)
`--metrics.prometheus.addserviceslabels`:
Enable metrics on services. (Default: ```true```)
`--metrics.prometheus.buckets`: `--metrics.prometheus.buckets`:
Buckets for latency metrics. (Default: ```0.100000, 0.300000, 1.200000, 5.000000```) Buckets for latency metrics. (Default: ```0.100000, 0.300000, 1.200000, 5.000000```)
@ -225,9 +243,15 @@ Middlewares.
`--metrics.statsd`: `--metrics.statsd`:
StatsD metrics exporter type. (Default: ```false```) StatsD metrics exporter type. (Default: ```false```)
`--metrics.statsd.addentrypointslabels`:
Enable metrics on entry points. (Default: ```true```)
`--metrics.statsd.address`: `--metrics.statsd.address`:
StatsD address. (Default: ```localhost:8125```) StatsD address. (Default: ```localhost:8125```)
`--metrics.statsd.addserviceslabels`:
Enable metrics on services. (Default: ```true```)
`--metrics.statsd.pushinterval`: `--metrics.statsd.pushinterval`:
StatsD push interval. (Default: ```10```) StatsD push interval. (Default: ```10```)

View file

@ -180,18 +180,30 @@ Log level set to traefik logs. (Default: ```ERROR```)
`TRAEFIK_METRICS_DATADOG`: `TRAEFIK_METRICS_DATADOG`:
DataDog metrics exporter type. (Default: ```false```) DataDog metrics exporter type. (Default: ```false```)
`TRAEFIK_METRICS_DATADOG_ADDENTRYPOINTSLABELS`:
Enable metrics on entry points. (Default: ```true```)
`TRAEFIK_METRICS_DATADOG_ADDRESS`: `TRAEFIK_METRICS_DATADOG_ADDRESS`:
DataDog's address. (Default: ```localhost:8125```) DataDog's address. (Default: ```localhost:8125```)
`TRAEFIK_METRICS_DATADOG_ADDSERVICESLABELS`:
Enable metrics on services. (Default: ```true```)
`TRAEFIK_METRICS_DATADOG_PUSHINTERVAL`: `TRAEFIK_METRICS_DATADOG_PUSHINTERVAL`:
DataDog push interval. (Default: ```10```) DataDog push interval. (Default: ```10```)
`TRAEFIK_METRICS_INFLUXDB`: `TRAEFIK_METRICS_INFLUXDB`:
InfluxDB metrics exporter type. (Default: ```false```) InfluxDB metrics exporter type. (Default: ```false```)
`TRAEFIK_METRICS_INFLUXDB_ADDENTRYPOINTSLABELS`:
Enable metrics on entry points. (Default: ```true```)
`TRAEFIK_METRICS_INFLUXDB_ADDRESS`: `TRAEFIK_METRICS_INFLUXDB_ADDRESS`:
InfluxDB address. (Default: ```localhost:8089```) InfluxDB address. (Default: ```localhost:8089```)
`TRAEFIK_METRICS_INFLUXDB_ADDSERVICESLABELS`:
Enable metrics on services. (Default: ```true```)
`TRAEFIK_METRICS_INFLUXDB_DATABASE`: `TRAEFIK_METRICS_INFLUXDB_DATABASE`:
InfluxDB database used when protocol is http. InfluxDB database used when protocol is http.
@ -213,6 +225,12 @@ InfluxDB username (only with http).
`TRAEFIK_METRICS_PROMETHEUS`: `TRAEFIK_METRICS_PROMETHEUS`:
Prometheus metrics exporter type. (Default: ```false```) Prometheus metrics exporter type. (Default: ```false```)
`TRAEFIK_METRICS_PROMETHEUS_ADDENTRYPOINTSLABELS`:
Enable metrics on entry points. (Default: ```true```)
`TRAEFIK_METRICS_PROMETHEUS_ADDSERVICESLABELS`:
Enable metrics on services. (Default: ```true```)
`TRAEFIK_METRICS_PROMETHEUS_BUCKETS`: `TRAEFIK_METRICS_PROMETHEUS_BUCKETS`:
Buckets for latency metrics. (Default: ```0.100000, 0.300000, 1.200000, 5.000000```) Buckets for latency metrics. (Default: ```0.100000, 0.300000, 1.200000, 5.000000```)
@ -225,9 +243,15 @@ Middlewares.
`TRAEFIK_METRICS_STATSD`: `TRAEFIK_METRICS_STATSD`:
StatsD metrics exporter type. (Default: ```false```) StatsD metrics exporter type. (Default: ```false```)
`TRAEFIK_METRICS_STATSD_ADDENTRYPOINTSLABELS`:
Enable metrics on entry points. (Default: ```true```)
`TRAEFIK_METRICS_STATSD_ADDRESS`: `TRAEFIK_METRICS_STATSD_ADDRESS`:
StatsD address. (Default: ```localhost:8125```) StatsD address. (Default: ```localhost:8125```)
`TRAEFIK_METRICS_STATSD_ADDSERVICESLABELS`:
Enable metrics on services. (Default: ```true```)
`TRAEFIK_METRICS_STATSD_PUSHINTERVAL`: `TRAEFIK_METRICS_STATSD_PUSHINTERVAL`:
StatsD push interval. (Default: ```10```) StatsD push interval. (Default: ```10```)

View file

@ -120,12 +120,18 @@
buckets = [42.0, 42.0] buckets = [42.0, 42.0]
entryPoint = "foobar" entryPoint = "foobar"
middlewares = ["foobar", "foobar"] middlewares = ["foobar", "foobar"]
addEntryPointsLabels = true
addServicesLabels = true
[metrics.dataDog] [metrics.dataDog]
address = "foobar" address = "foobar"
pushInterval = "10s" pushInterval = "10s"
addEntryPointsLabels = true
addServicesLabels = true
[metrics.statsD] [metrics.statsD]
address = "foobar" address = "foobar"
pushInterval = "10s" pushInterval = "10s"
addEntryPointsLabels = true
addServicesLabels = true
[metrics.influxDB] [metrics.influxDB]
address = "foobar" address = "foobar"
protocol = "foobar" protocol = "foobar"
@ -134,6 +140,8 @@
retentionPolicy = "foobar" retentionPolicy = "foobar"
username = "foobar" username = "foobar"
password = "foobar" password = "foobar"
addEntryPointsLabels = true
addServicesLabels = true
[ping] [ping]
entryPoint = "foobar" entryPoint = "foobar"

View file

@ -131,12 +131,18 @@ metrics:
middlewares: middlewares:
- foobar - foobar
- foobar - foobar
addEntryPointsLabels: true
addServicesLabels: true
dataDog: dataDog:
address: foobar address: foobar
pushInterval: 42 pushInterval: 42
addEntryPointsLabels: true
addServicesLabels: true
statsD: statsD:
address: foobar address: foobar
pushInterval: 42 pushInterval: 42
addEntryPointsLabels: true
addServicesLabels: true
influxDB: influxDB:
address: foobar address: foobar
protocol: foobar protocol: foobar
@ -145,6 +151,8 @@ metrics:
retentionPolicy: foobar retentionPolicy: foobar
username: foobar username: foobar
password: foobar password: foobar
addEntryPointsLabels: true
addServicesLabels: true
ping: ping:
entryPoint: foobar entryPoint: foobar
middlewares: middlewares:

View file

@ -119,6 +119,12 @@ nav:
- 'Observability': - 'Observability':
- 'Logs': 'observability/logs.md' - 'Logs': 'observability/logs.md'
- 'Access Logs': 'observability/access-logs.md' - 'Access Logs': 'observability/access-logs.md'
- 'Metrics':
- 'Overview': 'observability/metrics/overview.md'
- 'DataDog': 'observability/metrics/datadog.md'
- 'InfluxDB': 'observability/metrics/influxdb.md'
- 'Prometheus': 'observability/metrics/prometheus.md'
- 'StatsD': 'observability/metrics/statsd.md'
- 'Tracing': - 'Tracing':
- 'Overview': 'observability/tracing/overview.md' - 'Overview': 'observability/tracing/overview.md'
- 'Jaeger': 'observability/tracing/jaeger.md' - 'Jaeger': 'observability/tracing/jaeger.md'

View file

@ -20,18 +20,18 @@ var datadogTicker *time.Ticker
// Metric names consistent with https://github.com/DataDog/integrations-extras/pull/64 // Metric names consistent with https://github.com/DataDog/integrations-extras/pull/64
const ( const (
ddMetricsBackendReqsName = "backend.request.total" ddMetricsServiceReqsName = "service.request.total"
ddMetricsBackendLatencyName = "backend.request.duration" ddMetricsServiceLatencyName = "service.request.duration"
ddRetriesTotalName = "backend.retries.total" ddRetriesTotalName = "service.retries.total"
ddConfigReloadsName = "config.reload.total" ddConfigReloadsName = "config.reload.total"
ddConfigReloadsFailureTagName = "failure" ddConfigReloadsFailureTagName = "failure"
ddLastConfigReloadSuccessName = "config.reload.lastSuccessTimestamp" ddLastConfigReloadSuccessName = "config.reload.lastSuccessTimestamp"
ddLastConfigReloadFailureName = "config.reload.lastFailureTimestamp" ddLastConfigReloadFailureName = "config.reload.lastFailureTimestamp"
ddEntrypointReqsName = "entrypoint.request.total" ddEntryPointReqsName = "entrypoint.request.total"
ddEntrypointReqDurationName = "entrypoint.request.duration" ddEntryPointReqDurationName = "entrypoint.request.duration"
ddEntrypointOpenConnsName = "entrypoint.connections.open" ddEntryPointOpenConnsName = "entrypoint.connections.open"
ddOpenConnsName = "backend.connections.open" ddOpenConnsName = "service.connections.open"
ddServerUpName = "backend.server.up" ddServerUpName = "service.server.up"
) )
// RegisterDatadog registers the metrics pusher if this didn't happen yet and creates a datadog Registry instance. // RegisterDatadog registers the metrics pusher if this didn't happen yet and creates a datadog Registry instance.
@ -41,19 +41,26 @@ func RegisterDatadog(ctx context.Context, config *types.DataDog) Registry {
} }
registry := &standardRegistry{ registry := &standardRegistry{
enabled: true,
configReloadsCounter: datadogClient.NewCounter(ddConfigReloadsName, 1.0), configReloadsCounter: datadogClient.NewCounter(ddConfigReloadsName, 1.0),
configReloadsFailureCounter: datadogClient.NewCounter(ddConfigReloadsName, 1.0).With(ddConfigReloadsFailureTagName, "true"), configReloadsFailureCounter: datadogClient.NewCounter(ddConfigReloadsName, 1.0).With(ddConfigReloadsFailureTagName, "true"),
lastConfigReloadSuccessGauge: datadogClient.NewGauge(ddLastConfigReloadSuccessName), lastConfigReloadSuccessGauge: datadogClient.NewGauge(ddLastConfigReloadSuccessName),
lastConfigReloadFailureGauge: datadogClient.NewGauge(ddLastConfigReloadFailureName), lastConfigReloadFailureGauge: datadogClient.NewGauge(ddLastConfigReloadFailureName),
entrypointReqsCounter: datadogClient.NewCounter(ddEntrypointReqsName, 1.0), }
entrypointReqDurationHistogram: datadogClient.NewHistogram(ddEntrypointReqDurationName, 1.0),
entrypointOpenConnsGauge: datadogClient.NewGauge(ddEntrypointOpenConnsName), if config.AddEntryPointsLabels {
backendReqsCounter: datadogClient.NewCounter(ddMetricsBackendReqsName, 1.0), registry.epEnabled = config.AddEntryPointsLabels
backendReqDurationHistogram: datadogClient.NewHistogram(ddMetricsBackendLatencyName, 1.0), registry.entryPointReqsCounter = datadogClient.NewCounter(ddEntryPointReqsName, 1.0)
backendRetriesCounter: datadogClient.NewCounter(ddRetriesTotalName, 1.0), registry.entryPointReqDurationHistogram = datadogClient.NewHistogram(ddEntryPointReqDurationName, 1.0)
backendOpenConnsGauge: datadogClient.NewGauge(ddOpenConnsName), registry.entryPointOpenConnsGauge = datadogClient.NewGauge(ddEntryPointOpenConnsName)
backendServerUpGauge: datadogClient.NewGauge(ddServerUpName), }
if config.AddServicesLabels {
registry.svcEnabled = config.AddServicesLabels
registry.serviceReqsCounter = datadogClient.NewCounter(ddMetricsServiceReqsName, 1.0)
registry.serviceReqDurationHistogram = datadogClient.NewHistogram(ddMetricsServiceLatencyName, 1.0)
registry.serviceRetriesCounter = datadogClient.NewCounter(ddRetriesTotalName, 1.0)
registry.serviceOpenConnsGauge = datadogClient.NewGauge(ddOpenConnsName)
registry.serviceServerUpGauge = datadogClient.NewGauge(ddServerUpName)
} }
return registry return registry
@ -68,7 +75,7 @@ func initDatadogClient(ctx context.Context, config *types.DataDog) *time.Ticker
report := time.NewTicker(time.Duration(config.PushInterval)) report := time.NewTicker(time.Duration(config.PushInterval))
safe.Go(func() { safe.Go(func() {
datadogClient.SendLoop(report.C, "udp", address) datadogClient.SendLoop(ctx, report.C, "udp", address)
}) })
return report return report

View file

@ -16,38 +16,38 @@ func TestDatadog(t *testing.T) {
// This is needed to make sure that UDP Listener listens for data a bit longer, otherwise it will quit after a millisecond // This is needed to make sure that UDP Listener listens for data a bit longer, otherwise it will quit after a millisecond
udp.Timeout = 5 * time.Second udp.Timeout = 5 * time.Second
datadogRegistry := RegisterDatadog(context.Background(), &types.DataDog{Address: ":18125", PushInterval: types.Duration(time.Second)}) datadogRegistry := RegisterDatadog(context.Background(), &types.DataDog{Address: ":18125", PushInterval: types.Duration(time.Second), AddEntryPointsLabels: true, AddServicesLabels: true})
defer StopDatadog() defer StopDatadog()
if !datadogRegistry.IsEnabled() { if !datadogRegistry.IsEpEnabled() || !datadogRegistry.IsSvcEnabled() {
t.Errorf("DatadogRegistry should return true for IsEnabled()") t.Errorf("DatadogRegistry should return true for IsEnabled()")
} }
expected := []string{ expected := []string{
// We are only validating counts, as it is nearly impossible to validate latency, since it varies every run // We are only validating counts, as it is nearly impossible to validate latency, since it varies every run
"traefik.backend.request.total:1.000000|c|#service:test,code:404,method:GET\n", "traefik.service.request.total:1.000000|c|#service:test,code:404,method:GET\n",
"traefik.backend.request.total:1.000000|c|#service:test,code:200,method:GET\n", "traefik.service.request.total:1.000000|c|#service:test,code:200,method:GET\n",
"traefik.backend.retries.total:2.000000|c|#service:test\n", "traefik.service.retries.total:2.000000|c|#service:test\n",
"traefik.backend.request.duration:10000.000000|h|#service:test,code:200\n", "traefik.service.request.duration:10000.000000|h|#service:test,code:200\n",
"traefik.config.reload.total:1.000000|c\n", "traefik.config.reload.total:1.000000|c\n",
"traefik.config.reload.total:1.000000|c|#failure:true\n", "traefik.config.reload.total:1.000000|c|#failure:true\n",
"traefik.entrypoint.request.total:1.000000|c|#entrypoint:test\n", "traefik.entrypoint.request.total:1.000000|c|#entrypoint:test\n",
"traefik.entrypoint.request.duration:10000.000000|h|#entrypoint:test\n", "traefik.entrypoint.request.duration:10000.000000|h|#entrypoint:test\n",
"traefik.entrypoint.connections.open:1.000000|g|#entrypoint:test\n", "traefik.entrypoint.connections.open:1.000000|g|#entrypoint:test\n",
"traefik.backend.server.up:1.000000|g|#backend:test,url:http://127.0.0.1,one:two\n", "traefik.service.server.up:1.000000|g|#service:test,url:http://127.0.0.1,one:two\n",
} }
udp.ShouldReceiveAll(t, expected, func() { udp.ShouldReceiveAll(t, expected, func() {
datadogRegistry.BackendReqsCounter().With("service", "test", "code", strconv.Itoa(http.StatusOK), "method", http.MethodGet).Add(1) datadogRegistry.ServiceReqsCounter().With("service", "test", "code", strconv.Itoa(http.StatusOK), "method", http.MethodGet).Add(1)
datadogRegistry.BackendReqsCounter().With("service", "test", "code", strconv.Itoa(http.StatusNotFound), "method", http.MethodGet).Add(1) datadogRegistry.ServiceReqsCounter().With("service", "test", "code", strconv.Itoa(http.StatusNotFound), "method", http.MethodGet).Add(1)
datadogRegistry.BackendReqDurationHistogram().With("service", "test", "code", strconv.Itoa(http.StatusOK)).Observe(10000) datadogRegistry.ServiceReqDurationHistogram().With("service", "test", "code", strconv.Itoa(http.StatusOK)).Observe(10000)
datadogRegistry.BackendRetriesCounter().With("service", "test").Add(1) datadogRegistry.ServiceRetriesCounter().With("service", "test").Add(1)
datadogRegistry.BackendRetriesCounter().With("service", "test").Add(1) datadogRegistry.ServiceRetriesCounter().With("service", "test").Add(1)
datadogRegistry.ConfigReloadsCounter().Add(1) datadogRegistry.ConfigReloadsCounter().Add(1)
datadogRegistry.ConfigReloadsFailureCounter().Add(1) datadogRegistry.ConfigReloadsFailureCounter().Add(1)
datadogRegistry.EntrypointReqsCounter().With("entrypoint", "test").Add(1) datadogRegistry.EntryPointReqsCounter().With("entrypoint", "test").Add(1)
datadogRegistry.EntrypointReqDurationHistogram().With("entrypoint", "test").Observe(10000) datadogRegistry.EntryPointReqDurationHistogram().With("entrypoint", "test").Observe(10000)
datadogRegistry.EntrypointOpenConnsGauge().With("entrypoint", "test").Set(1) datadogRegistry.EntryPointOpenConnsGauge().With("entrypoint", "test").Set(1)
datadogRegistry.BackendServerUpGauge().With("backend", "test", "url", "http://127.0.0.1", "one", "two").Set(1) datadogRegistry.ServiceServerUpGauge().With("service", "test", "url", "http://127.0.0.1", "one", "two").Set(1)
}) })
} }

View file

@ -13,7 +13,7 @@ import (
"github.com/containous/traefik/pkg/types" "github.com/containous/traefik/pkg/types"
kitlog "github.com/go-kit/kit/log" kitlog "github.com/go-kit/kit/log"
"github.com/go-kit/kit/metrics/influx" "github.com/go-kit/kit/metrics/influx"
influxdb "github.com/influxdata/influxdb/client/v2" influxdb "github.com/influxdata/influxdb1-client/v2"
) )
var influxDBClient *influx.Influx var influxDBClient *influx.Influx
@ -26,18 +26,18 @@ type influxDBWriter struct {
var influxDBTicker *time.Ticker var influxDBTicker *time.Ticker
const ( const (
influxDBMetricsBackendReqsName = "traefik.backend.requests.total" influxDBMetricsServiceReqsName = "traefik.service.requests.total"
influxDBMetricsBackendLatencyName = "traefik.backend.request.duration" influxDBMetricsServiceLatencyName = "traefik.service.request.duration"
influxDBRetriesTotalName = "traefik.backend.retries.total" influxDBRetriesTotalName = "traefik.service.retries.total"
influxDBConfigReloadsName = "traefik.config.reload.total" influxDBConfigReloadsName = "traefik.config.reload.total"
influxDBConfigReloadsFailureName = influxDBConfigReloadsName + ".failure" influxDBConfigReloadsFailureName = influxDBConfigReloadsName + ".failure"
influxDBLastConfigReloadSuccessName = "traefik.config.reload.lastSuccessTimestamp" influxDBLastConfigReloadSuccessName = "traefik.config.reload.lastSuccessTimestamp"
influxDBLastConfigReloadFailureName = "traefik.config.reload.lastFailureTimestamp" influxDBLastConfigReloadFailureName = "traefik.config.reload.lastFailureTimestamp"
influxDBEntrypointReqsName = "traefik.entrypoint.requests.total" influxDBEntryPointReqsName = "traefik.entrypoint.requests.total"
influxDBEntrypointReqDurationName = "traefik.entrypoint.request.duration" influxDBEntryPointReqDurationName = "traefik.entrypoint.request.duration"
influxDBEntrypointOpenConnsName = "traefik.entrypoint.connections.open" influxDBEntryPointOpenConnsName = "traefik.entrypoint.connections.open"
influxDBOpenConnsName = "traefik.backend.connections.open" influxDBOpenConnsName = "traefik.service.connections.open"
influxDBServerUpName = "traefik.backend.server.up" influxDBServerUpName = "traefik.service.server.up"
) )
const ( const (
@ -51,24 +51,33 @@ func RegisterInfluxDB(ctx context.Context, config *types.InfluxDB) Registry {
influxDBClient = initInfluxDBClient(ctx, config) influxDBClient = initInfluxDBClient(ctx, config)
} }
if influxDBTicker == nil { if influxDBTicker == nil {
influxDBTicker = initInfluxDBTicker(config) influxDBTicker = initInfluxDBTicker(ctx, config)
} }
return &standardRegistry{ registry := &standardRegistry{
enabled: true,
configReloadsCounter: influxDBClient.NewCounter(influxDBConfigReloadsName), configReloadsCounter: influxDBClient.NewCounter(influxDBConfigReloadsName),
configReloadsFailureCounter: influxDBClient.NewCounter(influxDBConfigReloadsFailureName), configReloadsFailureCounter: influxDBClient.NewCounter(influxDBConfigReloadsFailureName),
lastConfigReloadSuccessGauge: influxDBClient.NewGauge(influxDBLastConfigReloadSuccessName), lastConfigReloadSuccessGauge: influxDBClient.NewGauge(influxDBLastConfigReloadSuccessName),
lastConfigReloadFailureGauge: influxDBClient.NewGauge(influxDBLastConfigReloadFailureName), lastConfigReloadFailureGauge: influxDBClient.NewGauge(influxDBLastConfigReloadFailureName),
entrypointReqsCounter: influxDBClient.NewCounter(influxDBEntrypointReqsName),
entrypointReqDurationHistogram: influxDBClient.NewHistogram(influxDBEntrypointReqDurationName),
entrypointOpenConnsGauge: influxDBClient.NewGauge(influxDBEntrypointOpenConnsName),
backendReqsCounter: influxDBClient.NewCounter(influxDBMetricsBackendReqsName),
backendReqDurationHistogram: influxDBClient.NewHistogram(influxDBMetricsBackendLatencyName),
backendRetriesCounter: influxDBClient.NewCounter(influxDBRetriesTotalName),
backendOpenConnsGauge: influxDBClient.NewGauge(influxDBOpenConnsName),
backendServerUpGauge: influxDBClient.NewGauge(influxDBServerUpName),
} }
if config.AddEntryPointsLabels {
registry.epEnabled = config.AddEntryPointsLabels
registry.entryPointReqsCounter = influxDBClient.NewCounter(influxDBEntryPointReqsName)
registry.entryPointReqDurationHistogram = influxDBClient.NewHistogram(influxDBEntryPointReqDurationName)
registry.entryPointOpenConnsGauge = influxDBClient.NewGauge(influxDBEntryPointOpenConnsName)
}
if config.AddServicesLabels {
registry.svcEnabled = config.AddServicesLabels
registry.serviceReqsCounter = influxDBClient.NewCounter(influxDBMetricsServiceReqsName)
registry.serviceReqDurationHistogram = influxDBClient.NewHistogram(influxDBMetricsServiceLatencyName)
registry.serviceRetriesCounter = influxDBClient.NewCounter(influxDBRetriesTotalName)
registry.serviceOpenConnsGauge = influxDBClient.NewGauge(influxDBOpenConnsName)
registry.serviceServerUpGauge = influxDBClient.NewGauge(influxDBServerUpName)
}
return registry
} }
// initInfluxDBTicker creates a influxDBClient // initInfluxDBTicker creates a influxDBClient
@ -115,12 +124,12 @@ func initInfluxDBClient(ctx context.Context, config *types.InfluxDB) *influx.Inf
} }
// initInfluxDBTicker initializes metrics pusher // initInfluxDBTicker initializes metrics pusher
func initInfluxDBTicker(config *types.InfluxDB) *time.Ticker { func initInfluxDBTicker(ctx context.Context, config *types.InfluxDB) *time.Ticker {
report := time.NewTicker(time.Duration(config.PushInterval)) report := time.NewTicker(time.Duration(config.PushInterval))
safe.Go(func() { safe.Go(func() {
var buf bytes.Buffer var buf bytes.Buffer
influxDBClient.WriteLoop(report.C, &influxDBWriter{buf: buf, config: config}) influxDBClient.WriteLoop(ctx, report.C, &influxDBWriter{buf: buf, config: config})
}) })
return report return report

View file

@ -20,35 +20,35 @@ func TestInfluxDB(t *testing.T) {
// This is needed to make sure that UDP Listener listens for data a bit longer, otherwise it will quit after a millisecond // This is needed to make sure that UDP Listener listens for data a bit longer, otherwise it will quit after a millisecond
udp.Timeout = 5 * time.Second udp.Timeout = 5 * time.Second
influxDBRegistry := RegisterInfluxDB(context.Background(), &types.InfluxDB{Address: ":8089", PushInterval: types.Duration(time.Second)}) influxDBRegistry := RegisterInfluxDB(context.Background(), &types.InfluxDB{Address: ":8089", PushInterval: types.Duration(time.Second), AddEntryPointsLabels: true, AddServicesLabels: true})
defer StopInfluxDB() defer StopInfluxDB()
if !influxDBRegistry.IsEnabled() { if !influxDBRegistry.IsEpEnabled() || !influxDBRegistry.IsSvcEnabled() {
t.Fatalf("InfluxDB registry must be enabled") t.Fatalf("InfluxDB registry must be epEnabled")
} }
expectedBackend := []string{ expectedService := []string{
`(traefik\.backend\.requests\.total,backend=test,code=200,method=GET count=1) [\d]{19}`, `(traefik\.service\.requests\.total,code=200,method=GET,service=test count=1) [\d]{19}`,
`(traefik\.backend\.requests\.total,backend=test,code=404,method=GET count=1) [\d]{19}`, `(traefik\.service\.requests\.total,code=404,method=GET,service=test count=1) [\d]{19}`,
`(traefik\.backend\.request\.duration,backend=test,code=200 p50=10000,p90=10000,p95=10000,p99=10000) [\d]{19}`, `(traefik\.service\.request\.duration,code=200,service=test p50=10000,p90=10000,p95=10000,p99=10000) [\d]{19}`,
`(traefik\.backend\.retries\.total(?:,code=[\d]{3},method=GET)?,backend=test count=2) [\d]{19}`, `(traefik\.service\.retries\.total(?:,code=[\d]{3},method=GET)?,service=test count=2) [\d]{19}`,
`(traefik\.config\.reload\.total(?:[a-z=0-9A-Z,]+)? count=1) [\d]{19}`, `(traefik\.config\.reload\.total(?:[a-z=0-9A-Z,]+)? count=1) [\d]{19}`,
`(traefik\.config\.reload\.total\.failure(?:[a-z=0-9A-Z,]+)? count=1) [\d]{19}`, `(traefik\.config\.reload\.total\.failure(?:[a-z=0-9A-Z,]+)? count=1) [\d]{19}`,
`(traefik\.backend\.server\.up,backend=test(?:[a-z=0-9A-Z,]+)?,url=http://127.0.0.1 value=1) [\d]{19}`, `(traefik\.service\.server\.up,service=test(?:[a-z=0-9A-Z,]+)?,url=http://127.0.0.1 value=1) [\d]{19}`,
} }
msgBackend := udp.ReceiveString(t, func() { msgService := udp.ReceiveString(t, func() {
influxDBRegistry.BackendReqsCounter().With("backend", "test", "code", strconv.Itoa(http.StatusOK), "method", http.MethodGet).Add(1) influxDBRegistry.ServiceReqsCounter().With("service", "test", "code", strconv.Itoa(http.StatusOK), "method", http.MethodGet).Add(1)
influxDBRegistry.BackendReqsCounter().With("backend", "test", "code", strconv.Itoa(http.StatusNotFound), "method", http.MethodGet).Add(1) influxDBRegistry.ServiceReqsCounter().With("service", "test", "code", strconv.Itoa(http.StatusNotFound), "method", http.MethodGet).Add(1)
influxDBRegistry.BackendRetriesCounter().With("backend", "test").Add(1) influxDBRegistry.ServiceRetriesCounter().With("service", "test").Add(1)
influxDBRegistry.BackendRetriesCounter().With("backend", "test").Add(1) influxDBRegistry.ServiceRetriesCounter().With("service", "test").Add(1)
influxDBRegistry.BackendReqDurationHistogram().With("backend", "test", "code", strconv.Itoa(http.StatusOK)).Observe(10000) influxDBRegistry.ServiceReqDurationHistogram().With("service", "test", "code", strconv.Itoa(http.StatusOK)).Observe(10000)
influxDBRegistry.ConfigReloadsCounter().Add(1) influxDBRegistry.ConfigReloadsCounter().Add(1)
influxDBRegistry.ConfigReloadsFailureCounter().Add(1) influxDBRegistry.ConfigReloadsFailureCounter().Add(1)
influxDBRegistry.BackendServerUpGauge().With("backend", "test", "url", "http://127.0.0.1").Set(1) influxDBRegistry.ServiceServerUpGauge().With("service", "test", "url", "http://127.0.0.1").Set(1)
}) })
assertMessage(t, msgBackend, expectedBackend) assertMessage(t, msgService, expectedService)
expectedEntrypoint := []string{ expectedEntrypoint := []string{
`(traefik\.entrypoint\.requests\.total,entrypoint=test(?:[a-z=0-9A-Z,:/.]+)? count=1) [\d]{19}`, `(traefik\.entrypoint\.requests\.total,entrypoint=test(?:[a-z=0-9A-Z,:/.]+)? count=1) [\d]{19}`,
@ -57,9 +57,9 @@ func TestInfluxDB(t *testing.T) {
} }
msgEntrypoint := udp.ReceiveString(t, func() { msgEntrypoint := udp.ReceiveString(t, func() {
influxDBRegistry.EntrypointReqsCounter().With("entrypoint", "test").Add(1) influxDBRegistry.EntryPointReqsCounter().With("entrypoint", "test").Add(1)
influxDBRegistry.EntrypointReqDurationHistogram().With("entrypoint", "test").Observe(10000) influxDBRegistry.EntryPointReqDurationHistogram().With("entrypoint", "test").Observe(10000)
influxDBRegistry.EntrypointOpenConnsGauge().With("entrypoint", "test").Set(1) influxDBRegistry.EntryPointOpenConnsGauge().With("entrypoint", "test").Set(1)
}) })
@ -76,38 +76,38 @@ func TestInfluxDBHTTP(t *testing.T) {
} }
bodyStr := string(body) bodyStr := string(body)
c <- &bodyStr c <- &bodyStr
fmt.Fprintln(w, "ok") _, _ = fmt.Fprintln(w, "ok")
})) }))
defer ts.Close() defer ts.Close()
influxDBRegistry := RegisterInfluxDB(context.Background(), &types.InfluxDB{Address: ts.URL, Protocol: "http", PushInterval: types.Duration(time.Second), Database: "test", RetentionPolicy: "autogen"}) influxDBRegistry := RegisterInfluxDB(context.Background(), &types.InfluxDB{Address: ts.URL, Protocol: "http", PushInterval: types.Duration(time.Second), Database: "test", RetentionPolicy: "autogen", AddEntryPointsLabels: true, AddServicesLabels: true})
defer StopInfluxDB() defer StopInfluxDB()
if !influxDBRegistry.IsEnabled() { if !influxDBRegistry.IsEpEnabled() || !influxDBRegistry.IsSvcEnabled() {
t.Fatalf("InfluxDB registry must be enabled") t.Fatalf("InfluxDB registry must be epEnabled")
} }
expectedBackend := []string{ expectedService := []string{
`(traefik\.backend\.requests\.total,backend=test,code=200,method=GET count=1) [\d]{19}`, `(traefik\.service\.requests\.total,code=200,method=GET,service=test count=1) [\d]{19}`,
`(traefik\.backend\.requests\.total,backend=test,code=404,method=GET count=1) [\d]{19}`, `(traefik\.service\.requests\.total,code=404,method=GET,service=test count=1) [\d]{19}`,
`(traefik\.backend\.request\.duration,backend=test,code=200 p50=10000,p90=10000,p95=10000,p99=10000) [\d]{19}`, `(traefik\.service\.request\.duration,code=200,service=test p50=10000,p90=10000,p95=10000,p99=10000) [\d]{19}`,
`(traefik\.backend\.retries\.total(?:,code=[\d]{3},method=GET)?,backend=test count=2) [\d]{19}`, `(traefik\.service\.retries\.total(?:,code=[\d]{3},method=GET)?,service=test count=2) [\d]{19}`,
`(traefik\.config\.reload\.total(?:[a-z=0-9A-Z,]+)? count=1) [\d]{19}`, `(traefik\.config\.reload\.total(?:[a-z=0-9A-Z,]+)? count=1) [\d]{19}`,
`(traefik\.config\.reload\.total\.failure(?:[a-z=0-9A-Z,]+)? count=1) [\d]{19}`, `(traefik\.config\.reload\.total\.failure(?:[a-z=0-9A-Z,]+)? count=1) [\d]{19}`,
`(traefik\.backend\.server\.up,backend=test(?:[a-z=0-9A-Z,]+)?,url=http://127.0.0.1 value=1) [\d]{19}`, `(traefik\.service\.server\.up,service=test(?:[a-z=0-9A-Z,]+)?,url=http://127.0.0.1 value=1) [\d]{19}`,
} }
influxDBRegistry.BackendReqsCounter().With("backend", "test", "code", strconv.Itoa(http.StatusOK), "method", http.MethodGet).Add(1) influxDBRegistry.ServiceReqsCounter().With("service", "test", "code", strconv.Itoa(http.StatusOK), "method", http.MethodGet).Add(1)
influxDBRegistry.BackendReqsCounter().With("backend", "test", "code", strconv.Itoa(http.StatusNotFound), "method", http.MethodGet).Add(1) influxDBRegistry.ServiceReqsCounter().With("service", "test", "code", strconv.Itoa(http.StatusNotFound), "method", http.MethodGet).Add(1)
influxDBRegistry.BackendRetriesCounter().With("backend", "test").Add(1) influxDBRegistry.ServiceRetriesCounter().With("service", "test").Add(1)
influxDBRegistry.BackendRetriesCounter().With("backend", "test").Add(1) influxDBRegistry.ServiceRetriesCounter().With("service", "test").Add(1)
influxDBRegistry.BackendReqDurationHistogram().With("backend", "test", "code", strconv.Itoa(http.StatusOK)).Observe(10000) influxDBRegistry.ServiceReqDurationHistogram().With("service", "test", "code", strconv.Itoa(http.StatusOK)).Observe(10000)
influxDBRegistry.ConfigReloadsCounter().Add(1) influxDBRegistry.ConfigReloadsCounter().Add(1)
influxDBRegistry.ConfigReloadsFailureCounter().Add(1) influxDBRegistry.ConfigReloadsFailureCounter().Add(1)
influxDBRegistry.BackendServerUpGauge().With("backend", "test", "url", "http://127.0.0.1").Set(1) influxDBRegistry.ServiceServerUpGauge().With("service", "test", "url", "http://127.0.0.1").Set(1)
msgBackend := <-c msgService := <-c
assertMessage(t, *msgBackend, expectedBackend) assertMessage(t, *msgService, expectedService)
expectedEntrypoint := []string{ expectedEntrypoint := []string{
`(traefik\.entrypoint\.requests\.total,entrypoint=test(?:[a-z=0-9A-Z,:/.]+)? count=1) [\d]{19}`, `(traefik\.entrypoint\.requests\.total,entrypoint=test(?:[a-z=0-9A-Z,:/.]+)? count=1) [\d]{19}`,
@ -115,9 +115,9 @@ func TestInfluxDBHTTP(t *testing.T) {
`(traefik\.entrypoint\.connections\.open,entrypoint=test value=1) [\d]{19}`, `(traefik\.entrypoint\.connections\.open,entrypoint=test value=1) [\d]{19}`,
} }
influxDBRegistry.EntrypointReqsCounter().With("entrypoint", "test").Add(1) influxDBRegistry.EntryPointReqsCounter().With("entrypoint", "test").Add(1)
influxDBRegistry.EntrypointReqDurationHistogram().With("entrypoint", "test").Observe(10000) influxDBRegistry.EntryPointReqDurationHistogram().With("entrypoint", "test").Observe(10000)
influxDBRegistry.EntrypointOpenConnsGauge().With("entrypoint", "test").Set(1) influxDBRegistry.EntryPointOpenConnsGauge().With("entrypoint", "test").Set(1)
msgEntrypoint := <-c msgEntrypoint := <-c
assertMessage(t, *msgEntrypoint, expectedEntrypoint) assertMessage(t, *msgEntrypoint, expectedEntrypoint)

View file

@ -7,8 +7,10 @@ import (
// Registry has to implemented by any system that wants to monitor and expose metrics. // Registry has to implemented by any system that wants to monitor and expose metrics.
type Registry interface { type Registry interface {
// IsEnabled shows whether metrics instrumentation is enabled. // IsEpEnabled shows whether metrics instrumentation is enabled on entry points.
IsEnabled() bool IsEpEnabled() bool
// IsSvcEnabled shows whether metrics instrumentation is enabled on services.
IsSvcEnabled() bool
// server metrics // server metrics
ConfigReloadsCounter() metrics.Counter ConfigReloadsCounter() metrics.Counter
@ -17,16 +19,16 @@ type Registry interface {
LastConfigReloadFailureGauge() metrics.Gauge LastConfigReloadFailureGauge() metrics.Gauge
// entry point metrics // entry point metrics
EntrypointReqsCounter() metrics.Counter EntryPointReqsCounter() metrics.Counter
EntrypointReqDurationHistogram() metrics.Histogram EntryPointReqDurationHistogram() metrics.Histogram
EntrypointOpenConnsGauge() metrics.Gauge EntryPointOpenConnsGauge() metrics.Gauge
// backend metrics // service metrics
BackendReqsCounter() metrics.Counter ServiceReqsCounter() metrics.Counter
BackendReqDurationHistogram() metrics.Histogram ServiceReqDurationHistogram() metrics.Histogram
BackendOpenConnsGauge() metrics.Gauge ServiceOpenConnsGauge() metrics.Gauge
BackendRetriesCounter() metrics.Counter ServiceRetriesCounter() metrics.Counter
BackendServerUpGauge() metrics.Gauge ServiceServerUpGauge() metrics.Gauge
} }
// NewVoidRegistry is a noop implementation of metrics.Registry. // NewVoidRegistry is a noop implementation of metrics.Registry.
@ -43,14 +45,14 @@ func NewMultiRegistry(registries []Registry) Registry {
var configReloadsFailureCounter []metrics.Counter var configReloadsFailureCounter []metrics.Counter
var lastConfigReloadSuccessGauge []metrics.Gauge var lastConfigReloadSuccessGauge []metrics.Gauge
var lastConfigReloadFailureGauge []metrics.Gauge var lastConfigReloadFailureGauge []metrics.Gauge
var entrypointReqsCounter []metrics.Counter var entryPointReqsCounter []metrics.Counter
var entrypointReqDurationHistogram []metrics.Histogram var entryPointReqDurationHistogram []metrics.Histogram
var entrypointOpenConnsGauge []metrics.Gauge var entryPointOpenConnsGauge []metrics.Gauge
var backendReqsCounter []metrics.Counter var serviceReqsCounter []metrics.Counter
var backendReqDurationHistogram []metrics.Histogram var serviceReqDurationHistogram []metrics.Histogram
var backendOpenConnsGauge []metrics.Gauge var serviceOpenConnsGauge []metrics.Gauge
var backendRetriesCounter []metrics.Counter var serviceRetriesCounter []metrics.Counter
var backendServerUpGauge []metrics.Gauge var serviceServerUpGauge []metrics.Gauge
for _, r := range registries { for _, r := range registries {
if r.ConfigReloadsCounter() != nil { if r.ConfigReloadsCounter() != nil {
@ -65,67 +67,73 @@ func NewMultiRegistry(registries []Registry) Registry {
if r.LastConfigReloadFailureGauge() != nil { if r.LastConfigReloadFailureGauge() != nil {
lastConfigReloadFailureGauge = append(lastConfigReloadFailureGauge, r.LastConfigReloadFailureGauge()) lastConfigReloadFailureGauge = append(lastConfigReloadFailureGauge, r.LastConfigReloadFailureGauge())
} }
if r.EntrypointReqsCounter() != nil { if r.EntryPointReqsCounter() != nil {
entrypointReqsCounter = append(entrypointReqsCounter, r.EntrypointReqsCounter()) entryPointReqsCounter = append(entryPointReqsCounter, r.EntryPointReqsCounter())
} }
if r.EntrypointReqDurationHistogram() != nil { if r.EntryPointReqDurationHistogram() != nil {
entrypointReqDurationHistogram = append(entrypointReqDurationHistogram, r.EntrypointReqDurationHistogram()) entryPointReqDurationHistogram = append(entryPointReqDurationHistogram, r.EntryPointReqDurationHistogram())
} }
if r.EntrypointOpenConnsGauge() != nil { if r.EntryPointOpenConnsGauge() != nil {
entrypointOpenConnsGauge = append(entrypointOpenConnsGauge, r.EntrypointOpenConnsGauge()) entryPointOpenConnsGauge = append(entryPointOpenConnsGauge, r.EntryPointOpenConnsGauge())
} }
if r.BackendReqsCounter() != nil { if r.ServiceReqsCounter() != nil {
backendReqsCounter = append(backendReqsCounter, r.BackendReqsCounter()) serviceReqsCounter = append(serviceReqsCounter, r.ServiceReqsCounter())
} }
if r.BackendReqDurationHistogram() != nil { if r.ServiceReqDurationHistogram() != nil {
backendReqDurationHistogram = append(backendReqDurationHistogram, r.BackendReqDurationHistogram()) serviceReqDurationHistogram = append(serviceReqDurationHistogram, r.ServiceReqDurationHistogram())
} }
if r.BackendOpenConnsGauge() != nil { if r.ServiceOpenConnsGauge() != nil {
backendOpenConnsGauge = append(backendOpenConnsGauge, r.BackendOpenConnsGauge()) serviceOpenConnsGauge = append(serviceOpenConnsGauge, r.ServiceOpenConnsGauge())
} }
if r.BackendRetriesCounter() != nil { if r.ServiceRetriesCounter() != nil {
backendRetriesCounter = append(backendRetriesCounter, r.BackendRetriesCounter()) serviceRetriesCounter = append(serviceRetriesCounter, r.ServiceRetriesCounter())
} }
if r.BackendServerUpGauge() != nil { if r.ServiceServerUpGauge() != nil {
backendServerUpGauge = append(backendServerUpGauge, r.BackendServerUpGauge()) serviceServerUpGauge = append(serviceServerUpGauge, r.ServiceServerUpGauge())
} }
} }
return &standardRegistry{ return &standardRegistry{
enabled: len(registries) > 0, epEnabled: len(entryPointReqsCounter) > 0 || len(entryPointReqDurationHistogram) > 0 || len(entryPointOpenConnsGauge) > 0,
svcEnabled: len(serviceReqsCounter) > 0 || len(serviceReqDurationHistogram) > 0 || len(serviceOpenConnsGauge) > 0 || len(serviceRetriesCounter) > 0 || len(serviceServerUpGauge) > 0,
configReloadsCounter: multi.NewCounter(configReloadsCounter...), configReloadsCounter: multi.NewCounter(configReloadsCounter...),
configReloadsFailureCounter: multi.NewCounter(configReloadsFailureCounter...), configReloadsFailureCounter: multi.NewCounter(configReloadsFailureCounter...),
lastConfigReloadSuccessGauge: multi.NewGauge(lastConfigReloadSuccessGauge...), lastConfigReloadSuccessGauge: multi.NewGauge(lastConfigReloadSuccessGauge...),
lastConfigReloadFailureGauge: multi.NewGauge(lastConfigReloadFailureGauge...), lastConfigReloadFailureGauge: multi.NewGauge(lastConfigReloadFailureGauge...),
entrypointReqsCounter: multi.NewCounter(entrypointReqsCounter...), entryPointReqsCounter: multi.NewCounter(entryPointReqsCounter...),
entrypointReqDurationHistogram: multi.NewHistogram(entrypointReqDurationHistogram...), entryPointReqDurationHistogram: multi.NewHistogram(entryPointReqDurationHistogram...),
entrypointOpenConnsGauge: multi.NewGauge(entrypointOpenConnsGauge...), entryPointOpenConnsGauge: multi.NewGauge(entryPointOpenConnsGauge...),
backendReqsCounter: multi.NewCounter(backendReqsCounter...), serviceReqsCounter: multi.NewCounter(serviceReqsCounter...),
backendReqDurationHistogram: multi.NewHistogram(backendReqDurationHistogram...), serviceReqDurationHistogram: multi.NewHistogram(serviceReqDurationHistogram...),
backendOpenConnsGauge: multi.NewGauge(backendOpenConnsGauge...), serviceOpenConnsGauge: multi.NewGauge(serviceOpenConnsGauge...),
backendRetriesCounter: multi.NewCounter(backendRetriesCounter...), serviceRetriesCounter: multi.NewCounter(serviceRetriesCounter...),
backendServerUpGauge: multi.NewGauge(backendServerUpGauge...), serviceServerUpGauge: multi.NewGauge(serviceServerUpGauge...),
} }
} }
type standardRegistry struct { type standardRegistry struct {
enabled bool epEnabled bool
svcEnabled bool
configReloadsCounter metrics.Counter configReloadsCounter metrics.Counter
configReloadsFailureCounter metrics.Counter configReloadsFailureCounter metrics.Counter
lastConfigReloadSuccessGauge metrics.Gauge lastConfigReloadSuccessGauge metrics.Gauge
lastConfigReloadFailureGauge metrics.Gauge lastConfigReloadFailureGauge metrics.Gauge
entrypointReqsCounter metrics.Counter entryPointReqsCounter metrics.Counter
entrypointReqDurationHistogram metrics.Histogram entryPointReqDurationHistogram metrics.Histogram
entrypointOpenConnsGauge metrics.Gauge entryPointOpenConnsGauge metrics.Gauge
backendReqsCounter metrics.Counter serviceReqsCounter metrics.Counter
backendReqDurationHistogram metrics.Histogram serviceReqDurationHistogram metrics.Histogram
backendOpenConnsGauge metrics.Gauge serviceOpenConnsGauge metrics.Gauge
backendRetriesCounter metrics.Counter serviceRetriesCounter metrics.Counter
backendServerUpGauge metrics.Gauge serviceServerUpGauge metrics.Gauge
} }
func (r *standardRegistry) IsEnabled() bool { func (r *standardRegistry) IsEpEnabled() bool {
return r.enabled return r.epEnabled
}
func (r *standardRegistry) IsSvcEnabled() bool {
return r.svcEnabled
} }
func (r *standardRegistry) ConfigReloadsCounter() metrics.Counter { func (r *standardRegistry) ConfigReloadsCounter() metrics.Counter {
@ -144,34 +152,34 @@ func (r *standardRegistry) LastConfigReloadFailureGauge() metrics.Gauge {
return r.lastConfigReloadFailureGauge return r.lastConfigReloadFailureGauge
} }
func (r *standardRegistry) EntrypointReqsCounter() metrics.Counter { func (r *standardRegistry) EntryPointReqsCounter() metrics.Counter {
return r.entrypointReqsCounter return r.entryPointReqsCounter
} }
func (r *standardRegistry) EntrypointReqDurationHistogram() metrics.Histogram { func (r *standardRegistry) EntryPointReqDurationHistogram() metrics.Histogram {
return r.entrypointReqDurationHistogram return r.entryPointReqDurationHistogram
} }
func (r *standardRegistry) EntrypointOpenConnsGauge() metrics.Gauge { func (r *standardRegistry) EntryPointOpenConnsGauge() metrics.Gauge {
return r.entrypointOpenConnsGauge return r.entryPointOpenConnsGauge
} }
func (r *standardRegistry) BackendReqsCounter() metrics.Counter { func (r *standardRegistry) ServiceReqsCounter() metrics.Counter {
return r.backendReqsCounter return r.serviceReqsCounter
} }
func (r *standardRegistry) BackendReqDurationHistogram() metrics.Histogram { func (r *standardRegistry) ServiceReqDurationHistogram() metrics.Histogram {
return r.backendReqDurationHistogram return r.serviceReqDurationHistogram
} }
func (r *standardRegistry) BackendOpenConnsGauge() metrics.Gauge { func (r *standardRegistry) ServiceOpenConnsGauge() metrics.Gauge {
return r.backendOpenConnsGauge return r.serviceOpenConnsGauge
} }
func (r *standardRegistry) BackendRetriesCounter() metrics.Counter { func (r *standardRegistry) ServiceRetriesCounter() metrics.Counter {
return r.backendRetriesCounter return r.serviceRetriesCounter
} }
func (r *standardRegistry) BackendServerUpGauge() metrics.Gauge { func (r *standardRegistry) ServiceServerUpGauge() metrics.Gauge {
return r.backendServerUpGauge return r.serviceServerUpGauge
} }

View file

@ -11,14 +11,14 @@ func TestNewMultiRegistry(t *testing.T) {
registries := []Registry{newCollectingRetryMetrics(), newCollectingRetryMetrics()} registries := []Registry{newCollectingRetryMetrics(), newCollectingRetryMetrics()}
registry := NewMultiRegistry(registries) registry := NewMultiRegistry(registries)
registry.BackendReqsCounter().With("key", "requests").Add(1) registry.ServiceReqsCounter().With("key", "requests").Add(1)
registry.BackendReqDurationHistogram().With("key", "durations").Observe(2) registry.ServiceReqDurationHistogram().With("key", "durations").Observe(2)
registry.BackendRetriesCounter().With("key", "retries").Add(3) registry.ServiceRetriesCounter().With("key", "retries").Add(3)
for _, collectingRegistry := range registries { for _, collectingRegistry := range registries {
cReqsCounter := collectingRegistry.BackendReqsCounter().(*counterMock) cReqsCounter := collectingRegistry.ServiceReqsCounter().(*counterMock)
cReqDurationHistogram := collectingRegistry.BackendReqDurationHistogram().(*histogramMock) cReqDurationHistogram := collectingRegistry.ServiceReqDurationHistogram().(*histogramMock)
cRetriesCounter := collectingRegistry.BackendRetriesCounter().(*counterMock) cRetriesCounter := collectingRegistry.ServiceRetriesCounter().(*counterMock)
wantCounterValue := float64(1) wantCounterValue := float64(1)
if cReqsCounter.counterValue != wantCounterValue { if cReqsCounter.counterValue != wantCounterValue {
@ -41,9 +41,9 @@ func TestNewMultiRegistry(t *testing.T) {
func newCollectingRetryMetrics() Registry { func newCollectingRetryMetrics() Registry {
return &standardRegistry{ return &standardRegistry{
backendReqsCounter: &counterMock{}, serviceReqsCounter: &counterMock{},
backendReqDurationHistogram: &histogramMock{}, serviceReqDurationHistogram: &histogramMock{},
backendRetriesCounter: &counterMock{}, serviceRetriesCounter: &counterMock{},
} }
} }

View file

@ -2,6 +2,7 @@ package metrics
import ( import (
"context" "context"
"fmt"
"net/http" "net/http"
"sort" "sort"
"strings" "strings"
@ -30,41 +31,43 @@ const (
// entry point // entry point
metricEntryPointPrefix = MetricNamePrefix + "entrypoint_" metricEntryPointPrefix = MetricNamePrefix + "entrypoint_"
entrypointReqsTotalName = metricEntryPointPrefix + "requests_total" entryPointReqsTotalName = metricEntryPointPrefix + "requests_total"
entrypointReqDurationName = metricEntryPointPrefix + "request_duration_seconds" entryPointReqDurationName = metricEntryPointPrefix + "request_duration_seconds"
entrypointOpenConnsName = metricEntryPointPrefix + "open_connections" entryPointOpenConnsName = metricEntryPointPrefix + "open_connections"
// backend level. // service level.
// MetricBackendPrefix prefix of all backend metric names // MetricServicePrefix prefix of all service metric names
MetricBackendPrefix = MetricNamePrefix + "backend_" MetricServicePrefix = MetricNamePrefix + "service_"
backendReqsTotalName = MetricBackendPrefix + "requests_total" serviceReqsTotalName = MetricServicePrefix + "requests_total"
backendReqDurationName = MetricBackendPrefix + "request_duration_seconds" serviceReqDurationName = MetricServicePrefix + "request_duration_seconds"
backendOpenConnsName = MetricBackendPrefix + "open_connections" serviceOpenConnsName = MetricServicePrefix + "open_connections"
backendRetriesTotalName = MetricBackendPrefix + "retries_total" serviceRetriesTotalName = MetricServicePrefix + "retries_total"
backendServerUpName = MetricBackendPrefix + "server_up" serviceServerUpName = MetricServicePrefix + "server_up"
) )
// promState holds all metric state internally and acts as the only Collector we register for Prometheus. // promState holds all metric state internally and acts as the only Collector we register for Prometheus.
// //
// This enables control to remove metrics that belong to outdated configuration. // This enables control to remove metrics that belong to outdated configuration.
// As an example why this is required, consider Traefik learns about a new service. // As an example why this is required, consider Traefik learns about a new service.
// It populates the 'traefik_server_backend_up' metric for it with a value of 1 (alive). // It populates the 'traefik_server_service_up' metric for it with a value of 1 (alive).
// When the backend is undeployed now the metric is still there in the client library // When the service is undeployed now the metric is still there in the client library
// and will be returned on the metrics endpoint until Traefik would be restarted. // and will be returned on the metrics endpoint until Traefik would be restarted.
// //
// To solve this problem promState keeps track of Traefik's dynamic configuration. // To solve this problem promState keeps track of Traefik's dynamic configuration.
// Metrics that "belong" to a dynamic configuration part like backends or entrypoints // Metrics that "belong" to a dynamic configuration part like services or entryPoints
// are removed after they were scraped at least once when the corresponding object // are removed after they were scraped at least once when the corresponding object
// doesn't exist anymore. // doesn't exist anymore.
var promState = newPrometheusState() var promState = newPrometheusState()
var promRegistry = stdprometheus.NewRegistry()
// PrometheusHandler exposes Prometheus routes. // PrometheusHandler exposes Prometheus routes.
type PrometheusHandler struct{} type PrometheusHandler struct{}
// Append adds Prometheus routes on a router. // Append adds Prometheus routes on a router.
func (h PrometheusHandler) Append(router *mux.Router) { func (h PrometheusHandler) Append(router *mux.Router) {
router.Methods(http.MethodGet).Path("/metrics").Handler(promhttp.Handler()) router.Methods(http.MethodGet).Path("/metrics").Handler(promhttp.HandlerFor(promRegistry, promhttp.HandlerOpts{}))
} }
// RegisterPrometheus registers all Prometheus metrics. // RegisterPrometheus registers all Prometheus metrics.
@ -72,6 +75,17 @@ func (h PrometheusHandler) Append(router *mux.Router) {
func RegisterPrometheus(ctx context.Context, config *types.Prometheus) Registry { func RegisterPrometheus(ctx context.Context, config *types.Prometheus) Registry {
standardRegistry := initStandardRegistry(config) standardRegistry := initStandardRegistry(config)
if err := promRegistry.Register(stdprometheus.NewProcessCollector(stdprometheus.ProcessCollectorOpts{})); err != nil {
if _, ok := err.(stdprometheus.AlreadyRegisteredError); !ok {
log.FromContext(ctx).Warn("ProcessCollector is already registered")
}
}
if err := promRegistry.Register(stdprometheus.NewGoCollector()); err != nil {
if _, ok := err.(stdprometheus.AlreadyRegisteredError); !ok {
log.FromContext(ctx).Warn("GoCollector is already registered")
}
}
if !registerPromState(ctx) { if !registerPromState(ctx) {
return nil return nil
} }
@ -106,76 +120,89 @@ func initStandardRegistry(config *types.Prometheus) Registry {
Help: "Last config reload failure", Help: "Last config reload failure",
}, []string{}) }, []string{})
entrypointReqs := newCounterFrom(promState.collectors, stdprometheus.CounterOpts{
Name: entrypointReqsTotalName,
Help: "How many HTTP requests processed on an entrypoint, partitioned by status code, protocol, and method.",
}, []string{"code", "method", "protocol", "entrypoint"})
entrypointReqDurations := newHistogramFrom(promState.collectors, stdprometheus.HistogramOpts{
Name: entrypointReqDurationName,
Help: "How long it took to process the request on an entrypoint, partitioned by status code, protocol, and method.",
Buckets: buckets,
}, []string{"code", "method", "protocol", "entrypoint"})
entrypointOpenConns := newGaugeFrom(promState.collectors, stdprometheus.GaugeOpts{
Name: entrypointOpenConnsName,
Help: "How many open connections exist on an entrypoint, partitioned by method and protocol.",
}, []string{"method", "protocol", "entrypoint"})
backendReqs := newCounterFrom(promState.collectors, stdprometheus.CounterOpts{
Name: backendReqsTotalName,
Help: "How many HTTP requests processed on a backend, partitioned by status code, protocol, and method.",
}, []string{"code", "method", "protocol", "backend"})
backendReqDurations := newHistogramFrom(promState.collectors, stdprometheus.HistogramOpts{
Name: backendReqDurationName,
Help: "How long it took to process the request on a backend, partitioned by status code, protocol, and method.",
Buckets: buckets,
}, []string{"code", "method", "protocol", "backend"})
backendOpenConns := newGaugeFrom(promState.collectors, stdprometheus.GaugeOpts{
Name: backendOpenConnsName,
Help: "How many open connections exist on a backend, partitioned by method and protocol.",
}, []string{"method", "protocol", "backend"})
backendRetries := newCounterFrom(promState.collectors, stdprometheus.CounterOpts{
Name: backendRetriesTotalName,
Help: "How many request retries happened on a backend.",
}, []string{"backend"})
backendServerUp := newGaugeFrom(promState.collectors, stdprometheus.GaugeOpts{
Name: backendServerUpName,
Help: "Backend server is up, described by gauge value of 0 or 1.",
}, []string{"backend", "url"})
promState.describers = []func(chan<- *stdprometheus.Desc){ promState.describers = []func(chan<- *stdprometheus.Desc){
configReloads.cv.Describe, configReloads.cv.Describe,
configReloadsFailures.cv.Describe, configReloadsFailures.cv.Describe,
lastConfigReloadSuccess.gv.Describe, lastConfigReloadSuccess.gv.Describe,
lastConfigReloadFailure.gv.Describe, lastConfigReloadFailure.gv.Describe,
entrypointReqs.cv.Describe,
entrypointReqDurations.hv.Describe,
entrypointOpenConns.gv.Describe,
backendReqs.cv.Describe,
backendReqDurations.hv.Describe,
backendOpenConns.gv.Describe,
backendRetries.cv.Describe,
backendServerUp.gv.Describe,
} }
return &standardRegistry{ reg := &standardRegistry{
enabled: true, epEnabled: config.AddEntryPointsLabels,
svcEnabled: config.AddServicesLabels,
configReloadsCounter: configReloads, configReloadsCounter: configReloads,
configReloadsFailureCounter: configReloadsFailures, configReloadsFailureCounter: configReloadsFailures,
lastConfigReloadSuccessGauge: lastConfigReloadSuccess, lastConfigReloadSuccessGauge: lastConfigReloadSuccess,
lastConfigReloadFailureGauge: lastConfigReloadFailure, lastConfigReloadFailureGauge: lastConfigReloadFailure,
entrypointReqsCounter: entrypointReqs,
entrypointReqDurationHistogram: entrypointReqDurations,
entrypointOpenConnsGauge: entrypointOpenConns,
backendReqsCounter: backendReqs,
backendReqDurationHistogram: backendReqDurations,
backendOpenConnsGauge: backendOpenConns,
backendRetriesCounter: backendRetries,
backendServerUpGauge: backendServerUp,
} }
if config.AddEntryPointsLabels {
entryPointReqs := newCounterFrom(promState.collectors, stdprometheus.CounterOpts{
Name: entryPointReqsTotalName,
Help: "How many HTTP requests processed on an entrypoint, partitioned by status code, protocol, and method.",
}, []string{"code", "method", "protocol", "entrypoint"})
entryPointReqDurations := newHistogramFrom(promState.collectors, stdprometheus.HistogramOpts{
Name: entryPointReqDurationName,
Help: "How long it took to process the request on an entrypoint, partitioned by status code, protocol, and method.",
Buckets: buckets,
}, []string{"code", "method", "protocol", "entrypoint"})
entryPointOpenConns := newGaugeFrom(promState.collectors, stdprometheus.GaugeOpts{
Name: entryPointOpenConnsName,
Help: "How many open connections exist on an entrypoint, partitioned by method and protocol.",
}, []string{"method", "protocol", "entrypoint"})
promState.describers = append(promState.describers, []func(chan<- *stdprometheus.Desc){
entryPointReqs.cv.Describe,
entryPointReqDurations.hv.Describe,
entryPointOpenConns.gv.Describe,
}...)
reg.entryPointReqsCounter = entryPointReqs
reg.entryPointReqDurationHistogram = entryPointReqDurations
reg.entryPointOpenConnsGauge = entryPointOpenConns
}
if config.AddServicesLabels {
serviceReqs := newCounterFrom(promState.collectors, stdprometheus.CounterOpts{
Name: serviceReqsTotalName,
Help: "How many HTTP requests processed on a service, partitioned by status code, protocol, and method.",
}, []string{"code", "method", "protocol", "service"})
serviceReqDurations := newHistogramFrom(promState.collectors, stdprometheus.HistogramOpts{
Name: serviceReqDurationName,
Help: "How long it took to process the request on a service, partitioned by status code, protocol, and method.",
Buckets: buckets,
}, []string{"code", "method", "protocol", "service"})
serviceOpenConns := newGaugeFrom(promState.collectors, stdprometheus.GaugeOpts{
Name: serviceOpenConnsName,
Help: "How many open connections exist on a service, partitioned by method and protocol.",
}, []string{"method", "protocol", "service"})
serviceRetries := newCounterFrom(promState.collectors, stdprometheus.CounterOpts{
Name: serviceRetriesTotalName,
Help: "How many request retries happened on a service.",
}, []string{"service"})
serviceServerUp := newGaugeFrom(promState.collectors, stdprometheus.GaugeOpts{
Name: serviceServerUpName,
Help: "service server is up, described by gauge value of 0 or 1.",
}, []string{"service", "url"})
promState.describers = append(promState.describers, []func(chan<- *stdprometheus.Desc){
serviceReqs.cv.Describe,
serviceReqDurations.hv.Describe,
serviceOpenConns.gv.Describe,
serviceRetries.cv.Describe,
serviceServerUp.gv.Describe,
}...)
reg.serviceReqsCounter = serviceReqs
reg.serviceReqDurationHistogram = serviceReqDurations
reg.serviceOpenConnsGauge = serviceOpenConns
reg.serviceRetriesCounter = serviceRetries
reg.serviceServerUpGauge = serviceServerUp
}
return reg
} }
func registerPromState(ctx context.Context) bool { func registerPromState(ctx context.Context) bool {
if err := stdprometheus.Register(promState); err != nil { if err := promRegistry.Register(promState); err != nil {
logger := log.FromContext(ctx) logger := log.FromContext(ctx)
if _, ok := err.(stdprometheus.AlreadyRegisteredError); !ok { if _, ok := err.(stdprometheus.AlreadyRegisteredError); !ok {
logger.Errorf("Unable to register Traefik to Prometheus: %v", err) logger.Errorf("Unable to register Traefik to Prometheus: %v", err)
@ -189,24 +216,24 @@ func registerPromState(ctx context.Context) bool {
// OnConfigurationUpdate receives the current configuration from Traefik. // OnConfigurationUpdate receives the current configuration from Traefik.
// It then converts the configuration to the optimized package internal format // It then converts the configuration to the optimized package internal format
// and sets it to the promState. // and sets it to the promState.
func OnConfigurationUpdate(configurations dynamic.Configurations) { func OnConfigurationUpdate(dynConf dynamic.Configurations, entryPoints []string) {
dynamicConfig := newDynamicConfig() dynamicConfig := newDynamicConfig()
// FIXME metrics for _, value := range entryPoints {
// for _, config := range configurations { dynamicConfig.entryPoints[value] = true
// for _, frontend := range config.Frontends { }
// for _, entrypointName := range frontend.EntryPoints { for key, config := range dynConf {
// dynamicConfig.entrypoints[entrypointName] = true for name := range config.HTTP.Routers {
// } dynamicConfig.routers[fmt.Sprintf("%s@%s", name, key)] = true
// } }
//
// for backendName, backend := range config.Backends { for serviceName, service := range config.HTTP.Services {
// dynamicConfig.backends[backendName] = make(map[string]bool) dynamicConfig.services[fmt.Sprintf("%s@%s", serviceName, key)] = make(map[string]bool)
// for _, server := range backend.Servers { for _, server := range service.LoadBalancer.Servers {
// dynamicConfig.backends[backendName][server.URL] = true dynamicConfig.services[fmt.Sprintf("%s@%s", serviceName, key)][server.URL] = true
// } }
// } }
// } }
promState.SetDynamicConfig(dynamicConfig) promState.SetDynamicConfig(dynamicConfig)
} }
@ -279,15 +306,15 @@ func (ps *prometheusState) Collect(ch chan<- stdprometheus.Metric) {
func (ps *prometheusState) isOutdated(collector *collector) bool { func (ps *prometheusState) isOutdated(collector *collector) bool {
labels := collector.labels labels := collector.labels
if entrypointName, ok := labels["entrypoint"]; ok && !ps.dynamicConfig.hasEntrypoint(entrypointName) { if entrypointName, ok := labels["entrypoint"]; ok && !ps.dynamicConfig.hasEntryPoint(entrypointName) {
return true return true
} }
if backendName, ok := labels["backend"]; ok { if serviceName, ok := labels["service"]; ok {
if !ps.dynamicConfig.hasBackend(backendName) { if !ps.dynamicConfig.hasService(serviceName) {
return true return true
} }
if url, ok := labels["url"]; ok && !ps.dynamicConfig.hasServerURL(backendName, url) { if url, ok := labels["url"]; ok && !ps.dynamicConfig.hasServerURL(serviceName, url) {
return true return true
} }
} }
@ -297,33 +324,35 @@ func (ps *prometheusState) isOutdated(collector *collector) bool {
func newDynamicConfig() *dynamicConfig { func newDynamicConfig() *dynamicConfig {
return &dynamicConfig{ return &dynamicConfig{
entrypoints: make(map[string]bool), entryPoints: make(map[string]bool),
backends: make(map[string]map[string]bool), routers: make(map[string]bool),
services: make(map[string]map[string]bool),
} }
} }
// dynamicConfig holds the current configuration for entrypoints, backends, // dynamicConfig holds the current configuration for entryPoints, services,
// and server URLs in an optimized way to check for existence. This provides // and server URLs in an optimized way to check for existence. This provides
// a performant way to check whether the collected metrics belong to the // a performant way to check whether the collected metrics belong to the
// current configuration or to an outdated one. // current configuration or to an outdated one.
type dynamicConfig struct { type dynamicConfig struct {
entrypoints map[string]bool entryPoints map[string]bool
backends map[string]map[string]bool routers map[string]bool
services map[string]map[string]bool
} }
func (d *dynamicConfig) hasEntrypoint(entrypointName string) bool { func (d *dynamicConfig) hasEntryPoint(entrypointName string) bool {
_, ok := d.entrypoints[entrypointName] _, ok := d.entryPoints[entrypointName]
return ok return ok
} }
func (d *dynamicConfig) hasBackend(backendName string) bool { func (d *dynamicConfig) hasService(serviceName string) bool {
_, ok := d.backends[backendName] _, ok := d.services[serviceName]
return ok return ok
} }
func (d *dynamicConfig) hasServerURL(backendName, serverURL string) bool { func (d *dynamicConfig) hasServerURL(serviceName, serverURL string) bool {
if backend, hasBackend := d.backends[backendName]; hasBackend { if service, hasService := d.services[serviceName]; hasService {
_, ok := backend[serverURL] _, ok := service[serverURL]
return ok return ok
} }
return false return false
@ -479,7 +508,7 @@ func (h *histogram) Observe(value float64) {
labels := h.labelNamesValues.ToLabels() labels := h.labelNamesValues.ToLabels()
collector := h.hv.With(labels) collector := h.hv.With(labels)
collector.Observe(value) collector.Observe(value)
h.collectors <- newCollector(h.name, labels, collector, func() { h.collectors <- newCollector(h.name, labels, h.hv, func() {
h.hv.Delete(labels) h.hv.Delete(labels)
}) })
} }

View file

@ -30,61 +30,61 @@ func TestRegisterPromState(t *testing.T) {
{ {
desc: "Register once", desc: "Register once",
prometheusSlice: []*types.Prometheus{{}}, prometheusSlice: []*types.Prometheus{{}},
expectedNbRegistries: 1,
initPromState: true, initPromState: true,
unregisterPromState: false,
expectedNbRegistries: 1,
}, },
{ {
desc: "Register once with no promState init", desc: "Register once with no promState init",
prometheusSlice: []*types.Prometheus{{}}, prometheusSlice: []*types.Prometheus{{}},
expectedNbRegistries: 0, initPromState: false,
unregisterPromState: false,
expectedNbRegistries: 1,
}, },
{ {
desc: "Register twice", desc: "Register twice",
prometheusSlice: []*types.Prometheus{{}, {}}, prometheusSlice: []*types.Prometheus{{}, {}},
expectedNbRegistries: 2,
initPromState: true, initPromState: true,
unregisterPromState: false,
expectedNbRegistries: 2,
}, },
{ {
desc: "Register twice with no promstate init", desc: "Register twice with no promstate init",
prometheusSlice: []*types.Prometheus{{}, {}}, prometheusSlice: []*types.Prometheus{{}, {}},
expectedNbRegistries: 0, initPromState: false,
unregisterPromState: false,
expectedNbRegistries: 2,
}, },
{ {
desc: "Register twice with unregister", desc: "Register twice with unregister",
prometheusSlice: []*types.Prometheus{{}, {}}, prometheusSlice: []*types.Prometheus{{}, {}},
initPromState: true,
unregisterPromState: true, unregisterPromState: true,
expectedNbRegistries: 2, expectedNbRegistries: 2,
initPromState: true,
},
{
desc: "Register twice with unregister but no promstate init",
prometheusSlice: []*types.Prometheus{{}, {}},
unregisterPromState: true,
expectedNbRegistries: 0,
}, },
} }
for _, test := range testCases { for _, test := range testCases {
test := test
t.Run(test.desc, func(t *testing.T) {
actualNbRegistries := 0 actualNbRegistries := 0
for _, prom := range test.prometheusSlice { for _, prom := range test.prometheusSlice {
if test.initPromState { if test.initPromState {
initStandardRegistry(prom) initStandardRegistry(prom)
} }
if registerPromState(context.Background()) { if registerPromState(context.Background()) {
actualNbRegistries++ actualNbRegistries++
} }
if test.unregisterPromState { if test.unregisterPromState {
prometheus.Unregister(promState) promRegistry.Unregister(promState)
} }
promState.reset() promState.reset()
} }
prometheus.Unregister(promState) promRegistry.Unregister(promState)
assert.Equal(t, test.expectedNbRegistries, actualNbRegistries) assert.Equal(t, test.expectedNbRegistries, actualNbRegistries)
})
} }
} }
@ -99,13 +99,15 @@ func (ps *prometheusState) reset() {
} }
func TestPrometheus(t *testing.T) { func TestPrometheus(t *testing.T) {
promState = newPrometheusState()
promRegistry = prometheus.NewRegistry()
// Reset state of global promState. // Reset state of global promState.
defer promState.reset() defer promState.reset()
prometheusRegistry := RegisterPrometheus(context.Background(), &types.Prometheus{}) prometheusRegistry := RegisterPrometheus(context.Background(), &types.Prometheus{AddEntryPointsLabels: true, AddServicesLabels: true})
defer prometheus.Unregister(promState) defer promRegistry.Unregister(promState)
if !prometheusRegistry.IsEnabled() { if !prometheusRegistry.IsEpEnabled() || !prometheusRegistry.IsSvcEnabled() {
t.Errorf("PrometheusRegistry should return true for IsEnabled()") t.Errorf("PrometheusRegistry should return true for IsEnabled()")
} }
@ -115,44 +117,44 @@ func TestPrometheus(t *testing.T) {
prometheusRegistry.LastConfigReloadFailureGauge().Set(float64(time.Now().Unix())) prometheusRegistry.LastConfigReloadFailureGauge().Set(float64(time.Now().Unix()))
prometheusRegistry. prometheusRegistry.
EntrypointReqsCounter(). EntryPointReqsCounter().
With("code", strconv.Itoa(http.StatusOK), "method", http.MethodGet, "protocol", "http", "entrypoint", "http"). With("code", strconv.Itoa(http.StatusOK), "method", http.MethodGet, "protocol", "http", "entrypoint", "http").
Add(1) Add(1)
prometheusRegistry. prometheusRegistry.
EntrypointReqDurationHistogram(). EntryPointReqDurationHistogram().
With("code", strconv.Itoa(http.StatusOK), "method", http.MethodGet, "protocol", "http", "entrypoint", "http"). With("code", strconv.Itoa(http.StatusOK), "method", http.MethodGet, "protocol", "http", "entrypoint", "http").
Observe(1) Observe(1)
prometheusRegistry. prometheusRegistry.
EntrypointOpenConnsGauge(). EntryPointOpenConnsGauge().
With("method", http.MethodGet, "protocol", "http", "entrypoint", "http"). With("method", http.MethodGet, "protocol", "http", "entrypoint", "http").
Set(1) Set(1)
prometheusRegistry. prometheusRegistry.
BackendReqsCounter(). ServiceReqsCounter().
With("backend", "backend1", "code", strconv.Itoa(http.StatusOK), "method", http.MethodGet, "protocol", "http"). With("service", "service1", "code", strconv.Itoa(http.StatusOK), "method", http.MethodGet, "protocol", "http").
Add(1) Add(1)
prometheusRegistry. prometheusRegistry.
BackendReqDurationHistogram(). ServiceReqDurationHistogram().
With("backend", "backend1", "code", strconv.Itoa(http.StatusOK), "method", http.MethodGet, "protocol", "http"). With("service", "service1", "code", strconv.Itoa(http.StatusOK), "method", http.MethodGet, "protocol", "http").
Observe(10000) Observe(10000)
prometheusRegistry. prometheusRegistry.
BackendOpenConnsGauge(). ServiceOpenConnsGauge().
With("backend", "backend1", "method", http.MethodGet, "protocol", "http"). With("service", "service1", "method", http.MethodGet, "protocol", "http").
Set(1) Set(1)
prometheusRegistry. prometheusRegistry.
BackendRetriesCounter(). ServiceRetriesCounter().
With("backend", "backend1"). With("service", "service1").
Add(1) Add(1)
prometheusRegistry. prometheusRegistry.
BackendServerUpGauge(). ServiceServerUpGauge().
With("backend", "backend1", "url", "http://127.0.0.10:80"). With("service", "service1", "url", "http://127.0.0.10:80").
Set(1) Set(1)
delayForTrackingCompletion() delayForTrackingCompletion()
metricsFamilies := mustScrape() metricsFamilies := mustScrape()
tests := []struct { testCases := []struct {
name string name string
labels map[string]string labels map[string]string
assert func(*dto.MetricFamily) assert func(*dto.MetricFamily)
@ -174,86 +176,89 @@ func TestPrometheus(t *testing.T) {
assert: buildTimestampAssert(t, configLastReloadFailureName), assert: buildTimestampAssert(t, configLastReloadFailureName),
}, },
{ {
name: entrypointReqsTotalName, name: entryPointReqsTotalName,
labels: map[string]string{ labels: map[string]string{
"code": "200", "code": "200",
"method": http.MethodGet, "method": http.MethodGet,
"protocol": "http", "protocol": "http",
"entrypoint": "http", "entrypoint": "http",
}, },
assert: buildCounterAssert(t, entrypointReqsTotalName, 1), assert: buildCounterAssert(t, entryPointReqsTotalName, 1),
}, },
{ {
name: entrypointReqDurationName, name: entryPointReqDurationName,
labels: map[string]string{ labels: map[string]string{
"code": "200", "code": "200",
"method": http.MethodGet, "method": http.MethodGet,
"protocol": "http", "protocol": "http",
"entrypoint": "http", "entrypoint": "http",
}, },
assert: buildHistogramAssert(t, entrypointReqDurationName, 1), assert: buildHistogramAssert(t, entryPointReqDurationName, 1),
}, },
{ {
name: entrypointOpenConnsName, name: entryPointOpenConnsName,
labels: map[string]string{ labels: map[string]string{
"method": http.MethodGet, "method": http.MethodGet,
"protocol": "http", "protocol": "http",
"entrypoint": "http", "entrypoint": "http",
}, },
assert: buildGaugeAssert(t, entrypointOpenConnsName, 1), assert: buildGaugeAssert(t, entryPointOpenConnsName, 1),
}, },
{ {
name: backendReqsTotalName, name: serviceReqsTotalName,
labels: map[string]string{ labels: map[string]string{
"code": "200", "code": "200",
"method": http.MethodGet, "method": http.MethodGet,
"protocol": "http", "protocol": "http",
"backend": "backend1", "service": "service1",
}, },
assert: buildCounterAssert(t, backendReqsTotalName, 1), assert: buildCounterAssert(t, serviceReqsTotalName, 1),
}, },
{ {
name: backendReqDurationName, name: serviceReqDurationName,
labels: map[string]string{ labels: map[string]string{
"code": "200", "code": "200",
"method": http.MethodGet, "method": http.MethodGet,
"protocol": "http", "protocol": "http",
"backend": "backend1", "service": "service1",
}, },
assert: buildHistogramAssert(t, backendReqDurationName, 1), assert: buildHistogramAssert(t, serviceReqDurationName, 1),
}, },
{ {
name: backendOpenConnsName, name: serviceOpenConnsName,
labels: map[string]string{ labels: map[string]string{
"method": http.MethodGet, "method": http.MethodGet,
"protocol": "http", "protocol": "http",
"backend": "backend1", "service": "service1",
}, },
assert: buildGaugeAssert(t, backendOpenConnsName, 1), assert: buildGaugeAssert(t, serviceOpenConnsName, 1),
}, },
{ {
name: backendRetriesTotalName, name: serviceRetriesTotalName,
labels: map[string]string{ labels: map[string]string{
"backend": "backend1", "service": "service1",
}, },
assert: buildGreaterThanCounterAssert(t, backendRetriesTotalName, 1), assert: buildGreaterThanCounterAssert(t, serviceRetriesTotalName, 1),
}, },
{ {
name: backendServerUpName, name: serviceServerUpName,
labels: map[string]string{ labels: map[string]string{
"backend": "backend1", "service": "service1",
"url": "http://127.0.0.10:80", "url": "http://127.0.0.10:80",
}, },
assert: buildGaugeAssert(t, backendServerUpName, 1), assert: buildGaugeAssert(t, serviceServerUpName, 1),
}, },
} }
for _, test := range tests { for _, test := range testCases {
test := test
t.Run(test.name, func(t *testing.T) {
family := findMetricFamily(test.name, metricsFamilies) family := findMetricFamily(test.name, metricsFamilies)
if family == nil { if family == nil {
t.Errorf("gathered metrics do not contain %q", test.name) t.Errorf("gathered metrics do not contain %q", test.name)
continue return
} }
for _, label := range family.Metric[0].Label { for _, label := range family.Metric[0].Label {
val, ok := test.labels[*label.Name] val, ok := test.labels[*label.Name]
if !ok { if !ok {
@ -263,18 +268,19 @@ func TestPrometheus(t *testing.T) {
} }
} }
test.assert(family) test.assert(family)
})
} }
} }
func TestPrometheusMetricRemoval(t *testing.T) { func TestPrometheusMetricRemoval(t *testing.T) {
// FIXME metrics promState = newPrometheusState()
t.Skip("waiting for metrics") promRegistry = prometheus.NewRegistry()
// Reset state of global promState. // Reset state of global promState.
defer promState.reset() defer promState.reset()
prometheusRegistry := RegisterPrometheus(context.Background(), &types.Prometheus{}) prometheusRegistry := RegisterPrometheus(context.Background(), &types.Prometheus{AddEntryPointsLabels: true, AddServicesLabels: true})
defer prometheus.Unregister(promState) defer promRegistry.Unregister(promState)
configurations := make(dynamic.Configurations) configurations := make(dynamic.Configurations)
configurations["providerName"] = &dynamic.Configuration{ configurations["providerName"] = &dynamic.Configuration{
@ -289,78 +295,78 @@ func TestPrometheusMetricRemoval(t *testing.T) {
), ),
} }
OnConfigurationUpdate(configurations) OnConfigurationUpdate(configurations, []string{"entrypoint1"})
// Register some metrics manually that are not part of the active configuration. // Register some metrics manually that are not part of the active configuration.
// Those metrics should be part of the /metrics output on the first scrape but // Those metrics should be part of the /metrics output on the first scrape but
// should be removed after that scrape. // should be removed after that scrape.
prometheusRegistry. prometheusRegistry.
EntrypointReqsCounter(). EntryPointReqsCounter().
With("entrypoint", "entrypoint2", "code", strconv.Itoa(http.StatusOK), "method", http.MethodGet, "protocol", "http"). With("entrypoint", "entrypoint2", "code", strconv.Itoa(http.StatusOK), "method", http.MethodGet, "protocol", "http").
Add(1) Add(1)
prometheusRegistry. prometheusRegistry.
BackendReqsCounter(). ServiceReqsCounter().
With("backend", "backend2", "code", strconv.Itoa(http.StatusOK), "method", http.MethodGet, "protocol", "http"). With("service", "service2", "code", strconv.Itoa(http.StatusOK), "method", http.MethodGet, "protocol", "http").
Add(1) Add(1)
prometheusRegistry. prometheusRegistry.
BackendServerUpGauge(). ServiceServerUpGauge().
With("backend", "backend1", "url", "http://localhost:9999"). With("service", "service1", "url", "http://localhost:9999").
Set(1) Set(1)
delayForTrackingCompletion() delayForTrackingCompletion()
assertMetricsExist(t, mustScrape(), entrypointReqsTotalName, backendReqsTotalName, backendServerUpName) assertMetricsExist(t, mustScrape(), entryPointReqsTotalName, serviceReqsTotalName, serviceServerUpName)
assertMetricsAbsent(t, mustScrape(), entrypointReqsTotalName, backendReqsTotalName, backendServerUpName) assertMetricsAbsent(t, mustScrape(), entryPointReqsTotalName, serviceReqsTotalName, serviceServerUpName)
// To verify that metrics belonging to active configurations are not removed // To verify that metrics belonging to active configurations are not removed
// here the counter examples. // here the counter examples.
prometheusRegistry. prometheusRegistry.
EntrypointReqsCounter(). EntryPointReqsCounter().
With("entrypoint", "entrypoint1", "code", strconv.Itoa(http.StatusOK), "method", http.MethodGet, "protocol", "http"). With("entrypoint", "entrypoint1", "code", strconv.Itoa(http.StatusOK), "method", http.MethodGet, "protocol", "http").
Add(1) Add(1)
delayForTrackingCompletion() delayForTrackingCompletion()
assertMetricsExist(t, mustScrape(), entrypointReqsTotalName) assertMetricsExist(t, mustScrape(), entryPointReqsTotalName)
assertMetricsExist(t, mustScrape(), entrypointReqsTotalName) assertMetricsExist(t, mustScrape(), entryPointReqsTotalName)
} }
func TestPrometheusRemovedMetricsReset(t *testing.T) { func TestPrometheusRemovedMetricsReset(t *testing.T) {
// Reset state of global promState. // Reset state of global promState.
defer promState.reset() defer promState.reset()
prometheusRegistry := RegisterPrometheus(context.Background(), &types.Prometheus{}) prometheusRegistry := RegisterPrometheus(context.Background(), &types.Prometheus{AddEntryPointsLabels: true, AddServicesLabels: true})
defer prometheus.Unregister(promState) defer promRegistry.Unregister(promState)
labelNamesValues := []string{ labelNamesValues := []string{
"backend", "backend", "service", "service",
"code", strconv.Itoa(http.StatusOK), "code", strconv.Itoa(http.StatusOK),
"method", http.MethodGet, "method", http.MethodGet,
"protocol", "http", "protocol", "http",
} }
prometheusRegistry. prometheusRegistry.
BackendReqsCounter(). ServiceReqsCounter().
With(labelNamesValues...). With(labelNamesValues...).
Add(3) Add(3)
delayForTrackingCompletion() delayForTrackingCompletion()
metricsFamilies := mustScrape() metricsFamilies := mustScrape()
assertCounterValue(t, 3, findMetricFamily(backendReqsTotalName, metricsFamilies), labelNamesValues...) assertCounterValue(t, 3, findMetricFamily(serviceReqsTotalName, metricsFamilies), labelNamesValues...)
// There is no dynamic configuration and so this metric will be deleted // There is no dynamic configuration and so this metric will be deleted
// after the first scrape. // after the first scrape.
assertMetricsAbsent(t, mustScrape(), backendReqsTotalName) assertMetricsAbsent(t, mustScrape(), serviceReqsTotalName)
prometheusRegistry. prometheusRegistry.
BackendReqsCounter(). ServiceReqsCounter().
With(labelNamesValues...). With(labelNamesValues...).
Add(1) Add(1)
delayForTrackingCompletion() delayForTrackingCompletion()
metricsFamilies = mustScrape() metricsFamilies = mustScrape()
assertCounterValue(t, 1, findMetricFamily(backendReqsTotalName, metricsFamilies), labelNamesValues...) assertCounterValue(t, 1, findMetricFamily(serviceReqsTotalName, metricsFamilies), labelNamesValues...)
} }
// Tracking and gathering the metrics happens concurrently. // Tracking and gathering the metrics happens concurrently.
@ -374,7 +380,7 @@ func delayForTrackingCompletion() {
} }
func mustScrape() []*dto.MetricFamily { func mustScrape() []*dto.MetricFamily {
families, err := prometheus.DefaultGatherer.Gather() families, err := promRegistry.Gather()
if err != nil { if err != nil {
panic(fmt.Sprintf("could not gather metrics families: %s", err)) panic(fmt.Sprintf("could not gather metrics families: %s", err))
} }

View file

@ -19,18 +19,18 @@ var statsdClient = statsd.New("traefik.", kitlog.LoggerFunc(func(keyvals ...inte
var statsdTicker *time.Ticker var statsdTicker *time.Ticker
const ( const (
statsdMetricsBackendReqsName = "backend.request.total" statsdMetricsServiceReqsName = "service.request.total"
statsdMetricsBackendLatencyName = "backend.request.duration" statsdMetricsServiceLatencyName = "service.request.duration"
statsdRetriesTotalName = "backend.retries.total" statsdRetriesTotalName = "service.retries.total"
statsdConfigReloadsName = "config.reload.total" statsdConfigReloadsName = "config.reload.total"
statsdConfigReloadsFailureName = statsdConfigReloadsName + ".failure" statsdConfigReloadsFailureName = statsdConfigReloadsName + ".failure"
statsdLastConfigReloadSuccessName = "config.reload.lastSuccessTimestamp" statsdLastConfigReloadSuccessName = "config.reload.lastSuccessTimestamp"
statsdLastConfigReloadFailureName = "config.reload.lastFailureTimestamp" statsdLastConfigReloadFailureName = "config.reload.lastFailureTimestamp"
statsdEntrypointReqsName = "entrypoint.request.total" statsdEntryPointReqsName = "entrypoint.request.total"
statsdEntrypointReqDurationName = "entrypoint.request.duration" statsdEntryPointReqDurationName = "entrypoint.request.duration"
statsdEntrypointOpenConnsName = "entrypoint.connections.open" statsdEntryPointOpenConnsName = "entrypoint.connections.open"
statsdOpenConnsName = "backend.connections.open" statsdOpenConnsName = "service.connections.open"
statsdServerUpName = "backend.server.up" statsdServerUpName = "service.server.up"
) )
// RegisterStatsd registers the metrics pusher if this didn't happen yet and creates a statsd Registry instance. // RegisterStatsd registers the metrics pusher if this didn't happen yet and creates a statsd Registry instance.
@ -39,21 +39,30 @@ func RegisterStatsd(ctx context.Context, config *types.Statsd) Registry {
statsdTicker = initStatsdTicker(ctx, config) statsdTicker = initStatsdTicker(ctx, config)
} }
return &standardRegistry{ registry := &standardRegistry{
enabled: true,
configReloadsCounter: statsdClient.NewCounter(statsdConfigReloadsName, 1.0), configReloadsCounter: statsdClient.NewCounter(statsdConfigReloadsName, 1.0),
configReloadsFailureCounter: statsdClient.NewCounter(statsdConfigReloadsFailureName, 1.0), configReloadsFailureCounter: statsdClient.NewCounter(statsdConfigReloadsFailureName, 1.0),
lastConfigReloadSuccessGauge: statsdClient.NewGauge(statsdLastConfigReloadSuccessName), lastConfigReloadSuccessGauge: statsdClient.NewGauge(statsdLastConfigReloadSuccessName),
lastConfigReloadFailureGauge: statsdClient.NewGauge(statsdLastConfigReloadFailureName), lastConfigReloadFailureGauge: statsdClient.NewGauge(statsdLastConfigReloadFailureName),
entrypointReqsCounter: statsdClient.NewCounter(statsdEntrypointReqsName, 1.0),
entrypointReqDurationHistogram: statsdClient.NewTiming(statsdEntrypointReqDurationName, 1.0),
entrypointOpenConnsGauge: statsdClient.NewGauge(statsdEntrypointOpenConnsName),
backendReqsCounter: statsdClient.NewCounter(statsdMetricsBackendReqsName, 1.0),
backendReqDurationHistogram: statsdClient.NewTiming(statsdMetricsBackendLatencyName, 1.0),
backendRetriesCounter: statsdClient.NewCounter(statsdRetriesTotalName, 1.0),
backendOpenConnsGauge: statsdClient.NewGauge(statsdOpenConnsName),
backendServerUpGauge: statsdClient.NewGauge(statsdServerUpName),
} }
if config.AddEntryPointsLabels {
registry.epEnabled = config.AddEntryPointsLabels
registry.entryPointReqsCounter = statsdClient.NewCounter(statsdEntryPointReqsName, 1.0)
registry.entryPointReqDurationHistogram = statsdClient.NewTiming(statsdEntryPointReqDurationName, 1.0)
registry.entryPointOpenConnsGauge = statsdClient.NewGauge(statsdEntryPointOpenConnsName)
}
if config.AddServicesLabels {
registry.svcEnabled = config.AddServicesLabels
registry.serviceReqsCounter = statsdClient.NewCounter(statsdMetricsServiceReqsName, 1.0)
registry.serviceReqDurationHistogram = statsdClient.NewTiming(statsdMetricsServiceLatencyName, 1.0)
registry.serviceRetriesCounter = statsdClient.NewCounter(statsdRetriesTotalName, 1.0)
registry.serviceOpenConnsGauge = statsdClient.NewGauge(statsdOpenConnsName)
registry.serviceServerUpGauge = statsdClient.NewGauge(statsdServerUpName)
}
return registry
} }
// initStatsdTicker initializes metrics pusher and creates a statsdClient if not created already // initStatsdTicker initializes metrics pusher and creates a statsdClient if not created already
@ -66,7 +75,7 @@ func initStatsdTicker(ctx context.Context, config *types.Statsd) *time.Ticker {
report := time.NewTicker(time.Duration(config.PushInterval)) report := time.NewTicker(time.Duration(config.PushInterval))
safe.Go(func() { safe.Go(func() {
statsdClient.SendLoop(report.C, "udp", address) statsdClient.SendLoop(ctx, report.C, "udp", address)
}) })
return report return report

View file

@ -15,37 +15,37 @@ func TestStatsD(t *testing.T) {
// This is needed to make sure that UDP Listener listens for data a bit longer, otherwise it will quit after a millisecond // This is needed to make sure that UDP Listener listens for data a bit longer, otherwise it will quit after a millisecond
udp.Timeout = 5 * time.Second udp.Timeout = 5 * time.Second
statsdRegistry := RegisterStatsd(context.Background(), &types.Statsd{Address: ":18125", PushInterval: types.Duration(time.Second)}) statsdRegistry := RegisterStatsd(context.Background(), &types.Statsd{Address: ":18125", PushInterval: types.Duration(time.Second), AddEntryPointsLabels: true, AddServicesLabels: true})
defer StopStatsd() defer StopStatsd()
if !statsdRegistry.IsEnabled() { if !statsdRegistry.IsEpEnabled() || !statsdRegistry.IsSvcEnabled() {
t.Errorf("Statsd registry should return true for IsEnabled()") t.Errorf("Statsd registry should return true for IsEnabled()")
} }
expected := []string{ expected := []string{
// We are only validating counts, as it is nearly impossible to validate latency, since it varies every run // We are only validating counts, as it is nearly impossible to validate latency, since it varies every run
"traefik.backend.request.total:2.000000|c\n", "traefik.service.request.total:2.000000|c\n",
"traefik.backend.retries.total:2.000000|c\n", "traefik.service.retries.total:2.000000|c\n",
"traefik.backend.request.duration:10000.000000|ms", "traefik.service.request.duration:10000.000000|ms",
"traefik.config.reload.total:1.000000|c\n", "traefik.config.reload.total:1.000000|c\n",
"traefik.config.reload.total:1.000000|c\n", "traefik.config.reload.total:1.000000|c\n",
"traefik.entrypoint.request.total:1.000000|c\n", "traefik.entrypoint.request.total:1.000000|c\n",
"traefik.entrypoint.request.duration:10000.000000|ms", "traefik.entrypoint.request.duration:10000.000000|ms",
"traefik.entrypoint.connections.open:1.000000|g\n", "traefik.entrypoint.connections.open:1.000000|g\n",
"traefik.backend.server.up:1.000000|g\n", "traefik.service.server.up:1.000000|g\n",
} }
udp.ShouldReceiveAll(t, expected, func() { udp.ShouldReceiveAll(t, expected, func() {
statsdRegistry.BackendReqsCounter().With("service", "test", "code", string(http.StatusOK), "method", http.MethodGet).Add(1) statsdRegistry.ServiceReqsCounter().With("service", "test", "code", string(http.StatusOK), "method", http.MethodGet).Add(1)
statsdRegistry.BackendReqsCounter().With("service", "test", "code", string(http.StatusNotFound), "method", http.MethodGet).Add(1) statsdRegistry.ServiceReqsCounter().With("service", "test", "code", string(http.StatusNotFound), "method", http.MethodGet).Add(1)
statsdRegistry.BackendRetriesCounter().With("service", "test").Add(1) statsdRegistry.ServiceRetriesCounter().With("service", "test").Add(1)
statsdRegistry.BackendRetriesCounter().With("service", "test").Add(1) statsdRegistry.ServiceRetriesCounter().With("service", "test").Add(1)
statsdRegistry.BackendReqDurationHistogram().With("service", "test", "code", string(http.StatusOK)).Observe(10000) statsdRegistry.ServiceReqDurationHistogram().With("service", "test", "code", string(http.StatusOK)).Observe(10000)
statsdRegistry.ConfigReloadsCounter().Add(1) statsdRegistry.ConfigReloadsCounter().Add(1)
statsdRegistry.ConfigReloadsFailureCounter().Add(1) statsdRegistry.ConfigReloadsFailureCounter().Add(1)
statsdRegistry.EntrypointReqsCounter().With("entrypoint", "test").Add(1) statsdRegistry.EntryPointReqsCounter().With("entrypoint", "test").Add(1)
statsdRegistry.EntrypointReqDurationHistogram().With("entrypoint", "test").Observe(10000) statsdRegistry.EntryPointReqDurationHistogram().With("entrypoint", "test").Observe(10000)
statsdRegistry.EntrypointOpenConnsGauge().With("entrypoint", "test").Set(1) statsdRegistry.EntryPointOpenConnsGauge().With("entrypoint", "test").Set(1)
statsdRegistry.BackendServerUpGauge().With("backend:test", "url", "http://127.0.0.1").Set(1) statsdRegistry.ServiceServerUpGauge().With("service:test", "url", "http://127.0.0.1").Set(1)
}) })
} }

View file

@ -0,0 +1,158 @@
package metrics
import (
"context"
"net/http"
"strconv"
"strings"
"sync/atomic"
"time"
"unicode/utf8"
"github.com/containous/alice"
"github.com/containous/traefik/pkg/log"
"github.com/containous/traefik/pkg/metrics"
"github.com/containous/traefik/pkg/middlewares"
"github.com/containous/traefik/pkg/middlewares/retry"
gokitmetrics "github.com/go-kit/kit/metrics"
)
const (
protoHTTP = "http"
protoSSE = "sse"
protoWebsocket = "websocket"
typeName = "Metrics"
nameEntrypoint = "metrics-entrypoint"
nameService = "metrics-service"
)
type metricsMiddleware struct {
// Important: Since this int64 field is using sync/atomic, it has to be at the top of the struct due to a bug on 32-bit platform
// See: https://golang.org/pkg/sync/atomic/ for more information
openConns int64
next http.Handler
reqsCounter gokitmetrics.Counter
reqDurationHistogram gokitmetrics.Histogram
openConnsGauge gokitmetrics.Gauge
baseLabels []string
}
// NewEntryPointMiddleware creates a new metrics middleware for an Entrypoint.
func NewEntryPointMiddleware(ctx context.Context, next http.Handler, registry metrics.Registry, entryPointName string) http.Handler {
middlewares.GetLogger(ctx, nameEntrypoint, typeName).Debug("Creating middleware")
return &metricsMiddleware{
next: next,
reqsCounter: registry.EntryPointReqsCounter(),
reqDurationHistogram: registry.EntryPointReqDurationHistogram(),
openConnsGauge: registry.EntryPointOpenConnsGauge(),
baseLabels: []string{"entrypoint", entryPointName},
}
}
// NewServiceMiddleware creates a new metrics middleware for a Service.
func NewServiceMiddleware(ctx context.Context, next http.Handler, registry metrics.Registry, serviceName string) http.Handler {
middlewares.GetLogger(ctx, nameService, typeName).Debug("Creating middleware")
return &metricsMiddleware{
next: next,
reqsCounter: registry.ServiceReqsCounter(),
reqDurationHistogram: registry.ServiceReqDurationHistogram(),
openConnsGauge: registry.ServiceOpenConnsGauge(),
baseLabels: []string{"service", serviceName},
}
}
// WrapEntryPointHandler Wraps metrics entrypoint to alice.Constructor.
func WrapEntryPointHandler(ctx context.Context, registry metrics.Registry, entryPointName string) alice.Constructor {
return func(next http.Handler) (http.Handler, error) {
return NewEntryPointMiddleware(ctx, next, registry, entryPointName), nil
}
}
// WrapServiceHandler Wraps metrics service to alice.Constructor.
func WrapServiceHandler(ctx context.Context, registry metrics.Registry, serviceName string) alice.Constructor {
return func(next http.Handler) (http.Handler, error) {
return NewServiceMiddleware(ctx, next, registry, serviceName), nil
}
}
func (m *metricsMiddleware) ServeHTTP(rw http.ResponseWriter, req *http.Request) {
labels := []string{"method", getMethod(req), "protocol", getRequestProtocol(req)}
labels = append(labels, m.baseLabels...)
openConns := atomic.AddInt64(&m.openConns, 1)
m.openConnsGauge.With(labels...).Set(float64(openConns))
defer func(labelValues []string) {
openConns := atomic.AddInt64(&m.openConns, -1)
m.openConnsGauge.With(labelValues...).Set(float64(openConns))
}(labels)
start := time.Now()
recorder := &responseRecorder{rw, http.StatusOK}
m.next.ServeHTTP(recorder, req)
labels = append(labels, "code", strconv.Itoa(recorder.statusCode))
m.reqsCounter.With(labels...).Add(1)
m.reqDurationHistogram.With(labels...).Observe(time.Since(start).Seconds())
}
func getRequestProtocol(req *http.Request) string {
switch {
case isWebsocketRequest(req):
return protoWebsocket
case isSSERequest(req):
return protoSSE
default:
return protoHTTP
}
}
// isWebsocketRequest determines if the specified HTTP request is a websocket handshake request.
func isWebsocketRequest(req *http.Request) bool {
return containsHeader(req, "Connection", "upgrade") && containsHeader(req, "Upgrade", "websocket")
}
// isSSERequest determines if the specified HTTP request is a request for an event subscription.
func isSSERequest(req *http.Request) bool {
return containsHeader(req, "Accept", "text/event-stream")
}
func containsHeader(req *http.Request, name, value string) bool {
items := strings.Split(req.Header.Get(name), ",")
for _, item := range items {
if value == strings.ToLower(strings.TrimSpace(item)) {
return true
}
}
return false
}
func getMethod(r *http.Request) string {
if !utf8.ValidString(r.Method) {
log.Warnf("Invalid HTTP method encoding: %s", r.Method)
return "NON_UTF8_HTTP_METHOD"
}
return r.Method
}
type retryMetrics interface {
ServiceRetriesCounter() gokitmetrics.Counter
}
// NewRetryListener instantiates a MetricsRetryListener with the given retryMetrics.
func NewRetryListener(retryMetrics retryMetrics, serviceName string) retry.Listener {
return &RetryListener{retryMetrics: retryMetrics, serviceName: serviceName}
}
// RetryListener is an implementation of the RetryListener interface to
// record RequestMetrics about retry attempts.
type RetryListener struct {
retryMetrics retryMetrics
serviceName string
}
// Retried tracks the retry in the RequestMetrics implementation.
func (m *RetryListener) Retried(req *http.Request, attempt int) {
m.retryMetrics.ServiceRetriesCounter().With("service", m.serviceName).Add(1)
}

View file

@ -0,0 +1,58 @@
package metrics
import (
"net/http"
"net/http/httptest"
"reflect"
"testing"
"github.com/go-kit/kit/metrics"
)
// CollectingCounter is a metrics.Counter implementation that enables access to the CounterValue and LastLabelValues.
type CollectingCounter struct {
CounterValue float64
LastLabelValues []string
}
// With is there to satisfy the metrics.Counter interface.
func (c *CollectingCounter) With(labelValues ...string) metrics.Counter {
c.LastLabelValues = labelValues
return c
}
// Add is there to satisfy the metrics.Counter interface.
func (c *CollectingCounter) Add(delta float64) {
c.CounterValue += delta
}
func TestMetricsRetryListener(t *testing.T) {
req := httptest.NewRequest(http.MethodGet, "/", nil)
retryMetrics := newCollectingRetryMetrics()
retryListener := NewRetryListener(retryMetrics, "serviceName")
retryListener.Retried(req, 1)
retryListener.Retried(req, 2)
wantCounterValue := float64(2)
if retryMetrics.retriesCounter.CounterValue != wantCounterValue {
t.Errorf("got counter value of %f, want %f", retryMetrics.retriesCounter.CounterValue, wantCounterValue)
}
wantLabelValues := []string{"service", "serviceName"}
if !reflect.DeepEqual(retryMetrics.retriesCounter.LastLabelValues, wantLabelValues) {
t.Errorf("wrong label values %v used, want %v", retryMetrics.retriesCounter.LastLabelValues, wantLabelValues)
}
}
// collectingRetryMetrics is an implementation of the retryMetrics interface that can be used inside tests to collect the times Add() was called.
type collectingRetryMetrics struct {
retriesCounter *CollectingCounter
}
func newCollectingRetryMetrics() *collectingRetryMetrics {
return &collectingRetryMetrics{retriesCounter: &CollectingCounter{}}
}
func (m *collectingRetryMetrics) ServiceRetriesCounter() metrics.Counter {
return m.retriesCounter
}

View file

@ -0,0 +1,37 @@
package metrics
import (
"bufio"
"net"
"net/http"
)
// responseRecorder captures information from the response and preserves it for
// later analysis.
type responseRecorder struct {
http.ResponseWriter
statusCode int
}
// WriteHeader captures the status code for later retrieval.
func (r *responseRecorder) WriteHeader(status int) {
r.ResponseWriter.WriteHeader(status)
r.statusCode = status
}
// Hijack hijacks the connection
func (r *responseRecorder) Hijack() (net.Conn, *bufio.ReadWriter, error) {
return r.ResponseWriter.(http.Hijacker).Hijack()
}
// CloseNotify returns a channel that receives at most a
// single value (true) when the client connection has gone
// away.
func (r *responseRecorder) CloseNotify() <-chan bool {
return r.ResponseWriter.(http.CloseNotifier).CloseNotify()
}
// Flush sends any buffered data to the client.
func (r *responseRecorder) Flush() {
r.ResponseWriter.(http.Flusher).Flush()
}

View file

@ -306,7 +306,7 @@ func TestRouterManager_Get(t *testing.T) {
Middlewares: test.middlewaresConfig, Middlewares: test.middlewaresConfig,
}, },
}) })
serviceManager := service.NewManager(rtConf.Services, http.DefaultTransport) serviceManager := service.NewManager(rtConf.Services, http.DefaultTransport, nil)
middlewaresBuilder := middleware.NewBuilder(rtConf.Middlewares, serviceManager) middlewaresBuilder := middleware.NewBuilder(rtConf.Middlewares, serviceManager)
responseModifierFactory := responsemodifiers.NewBuilder(rtConf.Middlewares) responseModifierFactory := responsemodifiers.NewBuilder(rtConf.Middlewares)
routerManager := NewManager(rtConf, serviceManager, middlewaresBuilder, responseModifierFactory) routerManager := NewManager(rtConf, serviceManager, middlewaresBuilder, responseModifierFactory)
@ -407,7 +407,7 @@ func TestAccessLog(t *testing.T) {
Middlewares: test.middlewaresConfig, Middlewares: test.middlewaresConfig,
}, },
}) })
serviceManager := service.NewManager(rtConf.Services, http.DefaultTransport) serviceManager := service.NewManager(rtConf.Services, http.DefaultTransport, nil)
middlewaresBuilder := middleware.NewBuilder(rtConf.Middlewares, serviceManager) middlewaresBuilder := middleware.NewBuilder(rtConf.Middlewares, serviceManager)
responseModifierFactory := responsemodifiers.NewBuilder(rtConf.Middlewares) responseModifierFactory := responsemodifiers.NewBuilder(rtConf.Middlewares)
routerManager := NewManager(rtConf, serviceManager, middlewaresBuilder, responseModifierFactory) routerManager := NewManager(rtConf, serviceManager, middlewaresBuilder, responseModifierFactory)
@ -693,7 +693,7 @@ func TestRuntimeConfiguration(t *testing.T) {
Middlewares: test.middlewareConfig, Middlewares: test.middlewareConfig,
}, },
}) })
serviceManager := service.NewManager(rtConf.Services, http.DefaultTransport) serviceManager := service.NewManager(rtConf.Services, http.DefaultTransport, nil)
middlewaresBuilder := middleware.NewBuilder(rtConf.Middlewares, serviceManager) middlewaresBuilder := middleware.NewBuilder(rtConf.Middlewares, serviceManager)
responseModifierFactory := responsemodifiers.NewBuilder(map[string]*runtime.MiddlewareInfo{}) responseModifierFactory := responsemodifiers.NewBuilder(map[string]*runtime.MiddlewareInfo{})
routerManager := NewManager(rtConf, serviceManager, middlewaresBuilder, responseModifierFactory) routerManager := NewManager(rtConf, serviceManager, middlewaresBuilder, responseModifierFactory)
@ -767,7 +767,7 @@ func BenchmarkRouterServe(b *testing.B) {
Middlewares: map[string]*dynamic.Middleware{}, Middlewares: map[string]*dynamic.Middleware{},
}, },
}) })
serviceManager := service.NewManager(rtConf.Services, &staticTransport{res}) serviceManager := service.NewManager(rtConf.Services, &staticTransport{res}, nil)
middlewaresBuilder := middleware.NewBuilder(rtConf.Middlewares, serviceManager) middlewaresBuilder := middleware.NewBuilder(rtConf.Middlewares, serviceManager)
responseModifierFactory := responsemodifiers.NewBuilder(rtConf.Middlewares) responseModifierFactory := responsemodifiers.NewBuilder(rtConf.Middlewares)
routerManager := NewManager(rtConf, serviceManager, middlewaresBuilder, responseModifierFactory) routerManager := NewManager(rtConf, serviceManager, middlewaresBuilder, responseModifierFactory)
@ -808,7 +808,7 @@ func BenchmarkService(b *testing.B) {
Services: serviceConfig, Services: serviceConfig,
}, },
}) })
serviceManager := service.NewManager(rtConf.Services, &staticTransport{res}) serviceManager := service.NewManager(rtConf.Services, &staticTransport{res}, nil)
w := httptest.NewRecorder() w := httptest.NewRecorder()
req := testhelpers.MustNewRequest(http.MethodGet, "http://foo.bar/", nil) req := testhelpers.MustNewRequest(http.MethodGet, "http://foo.bar/", nil)

View file

@ -12,7 +12,9 @@ import (
"github.com/containous/traefik/pkg/config/dynamic" "github.com/containous/traefik/pkg/config/dynamic"
"github.com/containous/traefik/pkg/config/runtime" "github.com/containous/traefik/pkg/config/runtime"
"github.com/containous/traefik/pkg/log" "github.com/containous/traefik/pkg/log"
"github.com/containous/traefik/pkg/metrics"
"github.com/containous/traefik/pkg/middlewares/accesslog" "github.com/containous/traefik/pkg/middlewares/accesslog"
metricsmiddleware "github.com/containous/traefik/pkg/middlewares/metrics"
"github.com/containous/traefik/pkg/middlewares/requestdecorator" "github.com/containous/traefik/pkg/middlewares/requestdecorator"
"github.com/containous/traefik/pkg/middlewares/tracing" "github.com/containous/traefik/pkg/middlewares/tracing"
"github.com/containous/traefik/pkg/responsemodifiers" "github.com/containous/traefik/pkg/responsemodifiers"
@ -49,7 +51,13 @@ func (s *Server) loadConfiguration(configMsg dynamic.Message) {
listener(*configMsg.Configuration) listener(*configMsg.Configuration)
} }
s.postLoadConfiguration() if s.metricsRegistry.IsEpEnabled() || s.metricsRegistry.IsSvcEnabled() {
var entrypoints []string
for key := range s.entryPointsTCP {
entrypoints = append(entrypoints, key)
}
metrics.OnConfigurationUpdate(newConfigurations, entrypoints)
}
} }
// loadConfigurationTCP returns a new gorilla.mux Route from the specified global configuration and the dynamic // loadConfigurationTCP returns a new gorilla.mux Route from the specified global configuration and the dynamic
@ -89,7 +97,7 @@ func (s *Server) createTCPRouters(ctx context.Context, configuration *runtime.Co
// createHTTPHandlers returns, for the given configuration and entryPoints, the HTTP handlers for non-TLS connections, and for the TLS ones. the given configuration must not be nil. its fields will get mutated. // createHTTPHandlers returns, for the given configuration and entryPoints, the HTTP handlers for non-TLS connections, and for the TLS ones. the given configuration must not be nil. its fields will get mutated.
func (s *Server) createHTTPHandlers(ctx context.Context, configuration *runtime.Configuration, entryPoints []string) (map[string]http.Handler, map[string]http.Handler) { func (s *Server) createHTTPHandlers(ctx context.Context, configuration *runtime.Configuration, entryPoints []string) (map[string]http.Handler, map[string]http.Handler) {
serviceManager := service.NewManager(configuration.Services, s.defaultRoundTripper) serviceManager := service.NewManager(configuration.Services, s.defaultRoundTripper, s.metricsRegistry)
middlewaresBuilder := middleware.NewBuilder(configuration.Middlewares, serviceManager) middlewaresBuilder := middleware.NewBuilder(configuration.Middlewares, serviceManager)
responseModifierFactory := responsemodifiers.NewBuilder(configuration.Middlewares) responseModifierFactory := responsemodifiers.NewBuilder(configuration.Middlewares)
routerManager := router.NewManager(configuration, serviceManager, middlewaresBuilder, responseModifierFactory) routerManager := router.NewManager(configuration, serviceManager, middlewaresBuilder, responseModifierFactory)
@ -128,6 +136,10 @@ func (s *Server) createHTTPHandlers(ctx context.Context, configuration *runtime.
chain = chain.Append(tracing.WrapEntryPointHandler(ctx, s.tracer, entryPointName)) chain = chain.Append(tracing.WrapEntryPointHandler(ctx, s.tracer, entryPointName))
} }
if s.metricsRegistry.IsEpEnabled() {
chain = chain.Append(metricsmiddleware.WrapEntryPointHandler(ctx, s.metricsRegistry, entryPointName))
}
chain = chain.Append(requestdecorator.WrapHandler(s.requestDecorator)) chain = chain.Append(requestdecorator.WrapHandler(s.requestDecorator))
handler, err := chain.Then(internalMuxRouter.NotFoundHandler) handler, err := chain.Then(internalMuxRouter.NotFoundHandler)
@ -266,15 +278,6 @@ func (s *Server) throttleProviderConfigReload(throttle time.Duration, publish ch
} }
} }
func (s *Server) postLoadConfiguration() {
// FIXME metrics
// if s.metricsRegistry.IsEnabled() {
// activeConfig := s.currentConfigurations.Get().(config.Configurations)
// metrics.OnConfigurationUpdate(activeConfig)
// }
}
func buildDefaultHTTPRouter() *mux.Router { func buildDefaultHTTPRouter() *mux.Router {
rt := mux.NewRouter() rt := mux.NewRouter()
rt.NotFoundHandler = http.HandlerFunc(http.NotFound) rt.NotFoundHandler = http.HandlerFunc(http.NotFound)

View file

@ -13,8 +13,10 @@ import (
"github.com/containous/traefik/pkg/config/runtime" "github.com/containous/traefik/pkg/config/runtime"
"github.com/containous/traefik/pkg/healthcheck" "github.com/containous/traefik/pkg/healthcheck"
"github.com/containous/traefik/pkg/log" "github.com/containous/traefik/pkg/log"
"github.com/containous/traefik/pkg/metrics"
"github.com/containous/traefik/pkg/middlewares/accesslog" "github.com/containous/traefik/pkg/middlewares/accesslog"
"github.com/containous/traefik/pkg/middlewares/emptybackendhandler" "github.com/containous/traefik/pkg/middlewares/emptybackendhandler"
metricsMiddle "github.com/containous/traefik/pkg/middlewares/metrics"
"github.com/containous/traefik/pkg/middlewares/pipelining" "github.com/containous/traefik/pkg/middlewares/pipelining"
"github.com/containous/traefik/pkg/server/cookie" "github.com/containous/traefik/pkg/server/cookie"
"github.com/containous/traefik/pkg/server/internal" "github.com/containous/traefik/pkg/server/internal"
@ -27,8 +29,9 @@ const (
) )
// NewManager creates a new Manager // NewManager creates a new Manager
func NewManager(configs map[string]*runtime.ServiceInfo, defaultRoundTripper http.RoundTripper) *Manager { func NewManager(configs map[string]*runtime.ServiceInfo, defaultRoundTripper http.RoundTripper, metricsRegistry metrics.Registry) *Manager {
return &Manager{ return &Manager{
metricsRegistry: metricsRegistry,
bufferPool: newBufferPool(), bufferPool: newBufferPool(),
defaultRoundTripper: defaultRoundTripper, defaultRoundTripper: defaultRoundTripper,
balancers: make(map[string][]healthcheck.BalancerHandler), balancers: make(map[string][]healthcheck.BalancerHandler),
@ -38,6 +41,7 @@ func NewManager(configs map[string]*runtime.ServiceInfo, defaultRoundTripper htt
// Manager The service manager // Manager The service manager
type Manager struct { type Manager struct {
metricsRegistry metrics.Registry
bufferPool httputil.BufferPool bufferPool httputil.BufferPool
defaultRoundTripper http.RoundTripper defaultRoundTripper http.RoundTripper
balancers map[string][]healthcheck.BalancerHandler balancers map[string][]healthcheck.BalancerHandler
@ -87,8 +91,12 @@ func (m *Manager) getLoadBalancerServiceHandler(
alHandler := func(next http.Handler) (http.Handler, error) { alHandler := func(next http.Handler) (http.Handler, error) {
return accesslog.NewFieldHandler(next, accesslog.ServiceName, serviceName, accesslog.AddServiceFields), nil return accesslog.NewFieldHandler(next, accesslog.ServiceName, serviceName, accesslog.AddServiceFields), nil
} }
chain := alice.New()
if m.metricsRegistry != nil && m.metricsRegistry.IsSvcEnabled() {
chain = chain.Append(metricsMiddle.WrapServiceHandler(ctx, m.metricsRegistry, serviceName))
}
handler, err := alice.New().Append(alHandler).Then(pipelining.New(ctx, fwd, "pipelining")) handler, err := chain.Append(alHandler).Then(pipelining.New(ctx, fwd, "pipelining"))
if err != nil { if err != nil {
return nil, err return nil, err
} }

View file

@ -80,7 +80,7 @@ func TestGetLoadBalancer(t *testing.T) {
} }
func TestGetLoadBalancerServiceHandler(t *testing.T) { func TestGetLoadBalancerServiceHandler(t *testing.T) {
sm := NewManager(nil, http.DefaultTransport) sm := NewManager(nil, http.DefaultTransport, nil)
server1 := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { server1 := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
w.Header().Set("X-From", "first") w.Header().Set("X-From", "first")
@ -332,7 +332,7 @@ func TestManager_Build(t *testing.T) {
t.Run(test.desc, func(t *testing.T) { t.Run(test.desc, func(t *testing.T) {
t.Parallel() t.Parallel()
manager := NewManager(test.configs, http.DefaultTransport) manager := NewManager(test.configs, http.DefaultTransport, nil)
ctx := context.Background() ctx := context.Background()
if len(test.providerName) > 0 { if len(test.providerName) > 0 {

View file

@ -4,7 +4,7 @@ import (
"time" "time"
) )
// Metrics provides options to expose and send Traefik metrics to different third party monitoring systems // Metrics provides options to expose and send Traefik metrics to different third party monitoring systems.
type Metrics struct { type Metrics struct {
Prometheus *Prometheus `description:"Prometheus metrics exporter type." json:"prometheus,omitempty" toml:"prometheus,omitempty" yaml:"prometheus,omitempty" export:"true" label:"allowEmpty"` Prometheus *Prometheus `description:"Prometheus metrics exporter type." json:"prometheus,omitempty" toml:"prometheus,omitempty" yaml:"prometheus,omitempty" export:"true" label:"allowEmpty"`
DataDog *DataDog `description:"DataDog metrics exporter type." json:"dataDog,omitempty" toml:"dataDog,omitempty" yaml:"dataDog,omitempty" export:"true" label:"allowEmpty"` DataDog *DataDog `description:"DataDog metrics exporter type." json:"dataDog,omitempty" toml:"dataDog,omitempty" yaml:"dataDog,omitempty" export:"true" label:"allowEmpty"`
@ -12,44 +12,56 @@ type Metrics struct {
InfluxDB *InfluxDB `description:"InfluxDB metrics exporter type." json:"influxDB,omitempty" toml:"influxDB,omitempty" yaml:"influxDB,omitempty" label:"allowEmpty"` InfluxDB *InfluxDB `description:"InfluxDB metrics exporter type." json:"influxDB,omitempty" toml:"influxDB,omitempty" yaml:"influxDB,omitempty" label:"allowEmpty"`
} }
// Prometheus can contain specific configuration used by the Prometheus Metrics exporter // Prometheus can contain specific configuration used by the Prometheus Metrics exporter.
type Prometheus struct { type Prometheus struct {
Buckets []float64 `description:"Buckets for latency metrics." json:"buckets,omitempty" toml:"buckets,omitempty" yaml:"buckets,omitempty" export:"true"` Buckets []float64 `description:"Buckets for latency metrics." json:"buckets,omitempty" toml:"buckets,omitempty" yaml:"buckets,omitempty" export:"true"`
EntryPoint string `description:"EntryPoint." json:"entryPoint,omitempty" toml:"entryPoint,omitempty" yaml:"entryPoint,omitempty" export:"true"` EntryPoint string `description:"EntryPoint." json:"entryPoint,omitempty" toml:"entryPoint,omitempty" yaml:"entryPoint,omitempty" export:"true"`
Middlewares []string `description:"Middlewares." json:"middlewares,omitempty" toml:"middlewares,omitempty" yaml:"middlewares,omitempty" export:"true"` Middlewares []string `description:"Middlewares." json:"middlewares,omitempty" toml:"middlewares,omitempty" yaml:"middlewares,omitempty" export:"true"`
AddEntryPointsLabels bool `description:"Enable metrics on entry points." json:"addEntryPointsLabels,omitempty" toml:"addEntryPointsLabels,omitempty" yaml:"addEntryPointsLabels,omitempty" export:"true"`
AddServicesLabels bool `description:"Enable metrics on services." json:"addServicesLabels,omitempty" toml:"addServicesLabels,omitempty" yaml:"addServicesLabels,omitempty" export:"true"`
} }
// SetDefaults sets the default values. // SetDefaults sets the default values.
func (p *Prometheus) SetDefaults() { func (p *Prometheus) SetDefaults() {
p.Buckets = []float64{0.1, 0.3, 1.2, 5} p.Buckets = []float64{0.1, 0.3, 1.2, 5}
p.EntryPoint = "traefik" p.EntryPoint = "traefik"
p.AddEntryPointsLabels = true
p.AddServicesLabels = true
} }
// DataDog contains address and metrics pushing interval configuration // DataDog contains address and metrics pushing interval configuration.
type DataDog struct { type DataDog struct {
Address string `description:"DataDog's address." json:"address,omitempty" toml:"address,omitempty" yaml:"address,omitempty"` Address string `description:"DataDog's address." json:"address,omitempty" toml:"address,omitempty" yaml:"address,omitempty"`
PushInterval Duration `description:"DataDog push interval." json:"pushInterval,omitempty" toml:"pushInterval,omitempty" yaml:"pushInterval,omitempty" export:"true"` PushInterval Duration `description:"DataDog push interval." json:"pushInterval,omitempty" toml:"pushInterval,omitempty" yaml:"pushInterval,omitempty" export:"true"`
AddEntryPointsLabels bool `description:"Enable metrics on entry points." json:"addEntryPointsLabels,omitempty" toml:"addEntryPointsLabels,omitempty" yaml:"addEntryPointsLabels,omitempty" export:"true"`
AddServicesLabels bool `description:"Enable metrics on services." json:"addServicesLabels,omitempty" toml:"addServicesLabels,omitempty" yaml:"addServicesLabels,omitempty" export:"true"`
} }
// SetDefaults sets the default values. // SetDefaults sets the default values.
func (d *DataDog) SetDefaults() { func (d *DataDog) SetDefaults() {
d.Address = "localhost:8125" d.Address = "localhost:8125"
d.PushInterval = Duration(10 * time.Second) d.PushInterval = Duration(10 * time.Second)
d.AddEntryPointsLabels = true
d.AddServicesLabels = true
} }
// Statsd contains address and metrics pushing interval configuration // Statsd contains address and metrics pushing interval configuration.
type Statsd struct { type Statsd struct {
Address string `description:"StatsD address." json:"address,omitempty" toml:"address,omitempty" yaml:"address,omitempty"` Address string `description:"StatsD address." json:"address,omitempty" toml:"address,omitempty" yaml:"address,omitempty"`
PushInterval Duration `description:"StatsD push interval." json:"pushInterval,omitempty" toml:"pushInterval,omitempty" yaml:"pushInterval,omitempty" export:"true"` PushInterval Duration `description:"StatsD push interval." json:"pushInterval,omitempty" toml:"pushInterval,omitempty" yaml:"pushInterval,omitempty" export:"true"`
AddEntryPointsLabels bool `description:"Enable metrics on entry points." json:"addEntryPointsLabels,omitempty" toml:"addEntryPointsLabels,omitempty" yaml:"addEntryPointsLabels,omitempty" export:"true"`
AddServicesLabels bool `description:"Enable metrics on services." json:"addServicesLabels,omitempty" toml:"addServicesLabels,omitempty" yaml:"addServicesLabels,omitempty" export:"true"`
} }
// SetDefaults sets the default values. // SetDefaults sets the default values.
func (s *Statsd) SetDefaults() { func (s *Statsd) SetDefaults() {
s.Address = "localhost:8125" s.Address = "localhost:8125"
s.PushInterval = Duration(10 * time.Second) s.PushInterval = Duration(10 * time.Second)
s.AddEntryPointsLabels = true
s.AddServicesLabels = true
} }
// InfluxDB contains address, login and metrics pushing interval configuration // InfluxDB contains address, login and metrics pushing interval configuration.
type InfluxDB struct { type InfluxDB struct {
Address string `description:"InfluxDB address." json:"address,omitempty" toml:"address,omitempty" yaml:"address,omitempty"` Address string `description:"InfluxDB address." json:"address,omitempty" toml:"address,omitempty" yaml:"address,omitempty"`
Protocol string `description:"InfluxDB address protocol (udp or http)." json:"protocol,omitempty" toml:"protocol,omitempty" yaml:"protocol,omitempty"` Protocol string `description:"InfluxDB address protocol (udp or http)." json:"protocol,omitempty" toml:"protocol,omitempty" yaml:"protocol,omitempty"`
@ -58,6 +70,8 @@ type InfluxDB struct {
RetentionPolicy string `description:"InfluxDB retention policy used when protocol is http." json:"retentionPolicy,omitempty" toml:"retentionPolicy,omitempty" yaml:"retentionPolicy,omitempty" export:"true"` RetentionPolicy string `description:"InfluxDB retention policy used when protocol is http." json:"retentionPolicy,omitempty" toml:"retentionPolicy,omitempty" yaml:"retentionPolicy,omitempty" export:"true"`
Username string `description:"InfluxDB username (only with http)." json:"username,omitempty" toml:"username,omitempty" yaml:"username,omitempty" export:"true"` Username string `description:"InfluxDB username (only with http)." json:"username,omitempty" toml:"username,omitempty" yaml:"username,omitempty" export:"true"`
Password string `description:"InfluxDB password (only with http)." json:"password,omitempty" toml:"password,omitempty" yaml:"password,omitempty" export:"true"` Password string `description:"InfluxDB password (only with http)." json:"password,omitempty" toml:"password,omitempty" yaml:"password,omitempty" export:"true"`
AddEntryPointsLabels bool `description:"Enable metrics on entry points." json:"addEntryPointsLabels,omitempty" toml:"addEntryPointsLabels,omitempty" yaml:"addEntryPointsLabels,omitempty" export:"true"`
AddServicesLabels bool `description:"Enable metrics on services." json:"addServicesLabels,omitempty" toml:"addServicesLabels,omitempty" yaml:"addServicesLabels,omitempty" export:"true"`
} }
// SetDefaults sets the default values. // SetDefaults sets the default values.
@ -65,9 +79,11 @@ func (i *InfluxDB) SetDefaults() {
i.Address = "localhost:8089" i.Address = "localhost:8089"
i.Protocol = "udp" i.Protocol = "udp"
i.PushInterval = Duration(10 * time.Second) i.PushInterval = Duration(10 * time.Second)
i.AddEntryPointsLabels = true
i.AddServicesLabels = true
} }
// Statistics provides options for monitoring request and response stats // Statistics provides options for monitoring request and response stats.
type Statistics struct { type Statistics struct {
RecentErrors int `description:"Number of recent errors logged." json:"recentErrors,omitempty" toml:"recentErrors,omitempty" yaml:"recentErrors,omitempty" export:"true"` RecentErrors int `description:"Number of recent errors logged." json:"recentErrors,omitempty" toml:"recentErrors,omitempty" yaml:"recentErrors,omitempty" export:"true"`
} }

View file

@ -1,9 +1,10 @@
package log package log
import ( import (
"runtime"
"strconv"
"strings"
"time" "time"
"github.com/go-stack/stack"
) )
// A Valuer generates a log value. When passed to With or WithPrefix in a // A Valuer generates a log value. When passed to With or WithPrefix in a
@ -81,7 +82,14 @@ func (tf timeFormat) MarshalText() (text []byte, err error) {
// Caller returns a Valuer that returns a file and line from a specified depth // Caller returns a Valuer that returns a file and line from a specified depth
// in the callstack. Users will probably want to use DefaultCaller. // in the callstack. Users will probably want to use DefaultCaller.
func Caller(depth int) Valuer { func Caller(depth int) Valuer {
return func() interface{} { return stack.Caller(depth) } return func() interface{} {
_, file, line, _ := runtime.Caller(depth)
idx := strings.LastIndexByte(file, '/')
// using idx+1 below handles both of following cases:
// idx == -1 because no "/" was found, or
// idx >= 0 and we want to start at the character after the found "/".
return file[idx+1:] + ":" + strconv.Itoa(line)
}
} }
var ( var (

View file

@ -11,8 +11,10 @@
package dogstatsd package dogstatsd
import ( import (
"context"
"fmt" "fmt"
"io" "io"
"math/rand"
"strings" "strings"
"sync" "sync"
"sync/atomic" "sync/atomic"
@ -72,7 +74,7 @@ func (d *Dogstatsd) NewCounter(name string, sampleRate float64) *Counter {
d.rates.Set(name, sampleRate) d.rates.Set(name, sampleRate)
return &Counter{ return &Counter{
name: name, name: name,
obs: d.counters.Observe, obs: sampleObservations(d.counters.Observe, sampleRate),
} }
} }
@ -94,7 +96,7 @@ func (d *Dogstatsd) NewTiming(name string, sampleRate float64) *Timing {
d.rates.Set(name, sampleRate) d.rates.Set(name, sampleRate)
return &Timing{ return &Timing{
name: name, name: name,
obs: d.timings.Observe, obs: sampleObservations(d.timings.Observe, sampleRate),
} }
} }
@ -104,29 +106,34 @@ func (d *Dogstatsd) NewHistogram(name string, sampleRate float64) *Histogram {
d.rates.Set(name, sampleRate) d.rates.Set(name, sampleRate)
return &Histogram{ return &Histogram{
name: name, name: name,
obs: d.histograms.Observe, obs: sampleObservations(d.histograms.Observe, sampleRate),
} }
} }
// WriteLoop is a helper method that invokes WriteTo to the passed writer every // WriteLoop is a helper method that invokes WriteTo to the passed writer every
// time the passed channel fires. This method blocks until the channel is // time the passed channel fires. This method blocks until ctx is canceled,
// closed, so clients probably want to run it in its own goroutine. For typical // so clients probably want to run it in its own goroutine. For typical
// usage, create a time.Ticker and pass its C channel to this method. // usage, create a time.Ticker and pass its C channel to this method.
func (d *Dogstatsd) WriteLoop(c <-chan time.Time, w io.Writer) { func (d *Dogstatsd) WriteLoop(ctx context.Context, c <-chan time.Time, w io.Writer) {
for range c { for {
select {
case <-c:
if _, err := d.WriteTo(w); err != nil { if _, err := d.WriteTo(w); err != nil {
d.logger.Log("during", "WriteTo", "err", err) d.logger.Log("during", "WriteTo", "err", err)
} }
case <-ctx.Done():
return
}
} }
} }
// SendLoop is a helper method that wraps WriteLoop, passing a managed // SendLoop is a helper method that wraps WriteLoop, passing a managed
// connection to the network and address. Like WriteLoop, this method blocks // connection to the network and address. Like WriteLoop, this method blocks
// until the channel is closed, so clients probably want to start it in its own // until ctx is canceled, so clients probably want to start it in its own
// goroutine. For typical usage, create a time.Ticker and pass its C channel to // goroutine. For typical usage, create a time.Ticker and pass its C channel to
// this method. // this method.
func (d *Dogstatsd) SendLoop(c <-chan time.Time, network, address string) { func (d *Dogstatsd) SendLoop(ctx context.Context, c <-chan time.Time, network, address string) {
d.WriteLoop(c, conn.NewDefaultManager(network, address, d.logger)) d.WriteLoop(ctx, c, conn.NewDefaultManager(network, address, d.logger))
} }
// WriteTo flushes the buffered content of the metrics to the writer, in // WriteTo flushes the buffered content of the metrics to the writer, in
@ -233,6 +240,19 @@ func (d *Dogstatsd) tagValues(labelValues []string) string {
type observeFunc func(name string, lvs lv.LabelValues, value float64) type observeFunc func(name string, lvs lv.LabelValues, value float64)
// sampleObservations returns a modified observeFunc that samples observations.
func sampleObservations(obs observeFunc, sampleRate float64) observeFunc {
if sampleRate >= 1 {
return obs
}
return func(name string, lvs lv.LabelValues, value float64) {
if rand.Float64() > sampleRate {
return
}
obs(name, lvs, value)
}
}
// Counter is a DogStatsD counter. Observations are forwarded to a Dogstatsd // Counter is a DogStatsD counter. Observations are forwarded to a Dogstatsd
// object, and aggregated (summed) per timeseries. // object, and aggregated (summed) per timeseries.
type Counter struct { type Counter struct {

View file

@ -4,9 +4,10 @@
package influx package influx
import ( import (
"context"
"time" "time"
influxdb "github.com/influxdata/influxdb/client/v2" influxdb "github.com/influxdata/influxdb1-client/v2"
"github.com/go-kit/kit/log" "github.com/go-kit/kit/log"
"github.com/go-kit/kit/metrics" "github.com/go-kit/kit/metrics"
@ -88,11 +89,16 @@ type BatchPointsWriter interface {
// time the passed channel fires. This method blocks until the channel is // time the passed channel fires. This method blocks until the channel is
// closed, so clients probably want to run it in its own goroutine. For typical // closed, so clients probably want to run it in its own goroutine. For typical
// usage, create a time.Ticker and pass its C channel to this method. // usage, create a time.Ticker and pass its C channel to this method.
func (in *Influx) WriteLoop(c <-chan time.Time, w BatchPointsWriter) { func (in *Influx) WriteLoop(ctx context.Context, c <-chan time.Time, w BatchPointsWriter) {
for range c { for {
select {
case <-c:
if err := in.WriteTo(w); err != nil { if err := in.WriteTo(w); err != nil {
in.logger.Log("during", "WriteTo", "err", err) in.logger.Log("during", "WriteTo", "err", err)
} }
case <-ctx.Done():
return
}
} }
} }

View file

@ -79,7 +79,7 @@ type pair struct{ label, value string }
func (n *node) observe(lvs LabelValues, value float64) { func (n *node) observe(lvs LabelValues, value float64) {
n.mtx.Lock() n.mtx.Lock()
defer n.mtx.Unlock() defer n.mtx.Unlock()
if len(lvs) == 0 { if len(lvs) <= 0 {
n.observations = append(n.observations, value) n.observations = append(n.observations, value)
return return
} }
@ -101,7 +101,7 @@ func (n *node) observe(lvs LabelValues, value float64) {
func (n *node) add(lvs LabelValues, delta float64) { func (n *node) add(lvs LabelValues, delta float64) {
n.mtx.Lock() n.mtx.Lock()
defer n.mtx.Unlock() defer n.mtx.Unlock()
if len(lvs) == 0 { if len(lvs) <= 0 {
var value float64 var value float64
if len(n.observations) > 0 { if len(n.observations) > 0 {
value = last(n.observations) + delta value = last(n.observations) + delta

View file

@ -9,6 +9,7 @@
package statsd package statsd
import ( import (
"context"
"fmt" "fmt"
"io" "io"
"time" "time"
@ -89,24 +90,29 @@ func (s *Statsd) NewTiming(name string, sampleRate float64) *Timing {
} }
// WriteLoop is a helper method that invokes WriteTo to the passed writer every // WriteLoop is a helper method that invokes WriteTo to the passed writer every
// time the passed channel fires. This method blocks until the channel is // time the passed channel fires. This method blocks until ctx is canceled,
// closed, so clients probably want to run it in its own goroutine. For typical // so clients probably want to run it in its own goroutine. For typical
// usage, create a time.Ticker and pass its C channel to this method. // usage, create a time.Ticker and pass its C channel to this method.
func (s *Statsd) WriteLoop(c <-chan time.Time, w io.Writer) { func (s *Statsd) WriteLoop(ctx context.Context, c <-chan time.Time, w io.Writer) {
for range c { for {
select {
case <-c:
if _, err := s.WriteTo(w); err != nil { if _, err := s.WriteTo(w); err != nil {
s.logger.Log("during", "WriteTo", "err", err) s.logger.Log("during", "WriteTo", "err", err)
} }
case <-ctx.Done():
return
}
} }
} }
// SendLoop is a helper method that wraps WriteLoop, passing a managed // SendLoop is a helper method that wraps WriteLoop, passing a managed
// connection to the network and address. Like WriteLoop, this method blocks // connection to the network and address. Like WriteLoop, this method blocks
// until the channel is closed, so clients probably want to start it in its own // until ctx is canceled, so clients probably want to start it in its own
// goroutine. For typical usage, create a time.Ticker and pass its C channel to // goroutine. For typical usage, create a time.Ticker and pass its C channel to
// this method. // this method.
func (s *Statsd) SendLoop(c <-chan time.Time, network, address string) { func (s *Statsd) SendLoop(ctx context.Context, c <-chan time.Time, network, address string) {
s.WriteLoop(c, conn.NewDefaultManager(network, address, s.logger)) s.WriteLoop(ctx, c, conn.NewDefaultManager(network, address, s.logger))
} }
// WriteTo flushes the buffered content of the metrics to the writer, in // WriteTo flushes the buffered content of the metrics to the writer, in

View file

@ -2,6 +2,7 @@ package conn
import ( import (
"errors" "errors"
"math/rand"
"net" "net"
"time" "time"
@ -103,7 +104,7 @@ func (m *Manager) loop() {
case conn = <-connc: case conn = <-connc:
if conn == nil { if conn == nil {
// didn't work // didn't work
backoff = exponential(backoff) // wait longer backoff = Exponential(backoff) // wait longer
reconnectc = m.after(backoff) // try again reconnectc = m.after(backoff) // try again
} else { } else {
// worked! // worked!
@ -132,12 +133,18 @@ func dial(d Dialer, network, address string, logger log.Logger) net.Conn {
return conn return conn
} }
func exponential(d time.Duration) time.Duration { // Exponential takes a duration and returns another one that is twice as long, +/- 50%. It is
// used to provide backoff for operations that may fail and should avoid thundering herds.
// See https://aws.amazon.com/blogs/architecture/exponential-backoff-and-jitter/ for rationale
func Exponential(d time.Duration) time.Duration {
d *= 2 d *= 2
jitter := rand.Float64() + 0.5
d = time.Duration(int64(float64(d.Nanoseconds()) * jitter))
if d > time.Minute { if d > time.Minute {
d = time.Minute d = time.Minute
} }
return d return d
} }
// ErrConnectionUnavailable is returned by the Manager's Write method when the // ErrConnectionUnavailable is returned by the Manager's Write method when the

View file

@ -1,322 +0,0 @@
// Package stack implements utilities to capture, manipulate, and format call
// stacks. It provides a simpler API than package runtime.
//
// The implementation takes care of the minutia and special cases of
// interpreting the program counter (pc) values returned by runtime.Callers.
//
// Package stack's types implement fmt.Formatter, which provides a simple and
// flexible way to declaratively configure formatting when used with logging
// or error tracking packages.
package stack
import (
"bytes"
"errors"
"fmt"
"io"
"runtime"
"strconv"
"strings"
)
// Call records a single function invocation from a goroutine stack.
type Call struct {
fn *runtime.Func
pc uintptr
}
// Caller returns a Call from the stack of the current goroutine. The argument
// skip is the number of stack frames to ascend, with 0 identifying the
// calling function.
func Caller(skip int) Call {
var pcs [2]uintptr
n := runtime.Callers(skip+1, pcs[:])
var c Call
if n < 2 {
return c
}
c.pc = pcs[1]
if runtime.FuncForPC(pcs[0]).Name() != "runtime.sigpanic" {
c.pc--
}
c.fn = runtime.FuncForPC(c.pc)
return c
}
// String implements fmt.Stinger. It is equivalent to fmt.Sprintf("%v", c).
func (c Call) String() string {
return fmt.Sprint(c)
}
// MarshalText implements encoding.TextMarshaler. It formats the Call the same
// as fmt.Sprintf("%v", c).
func (c Call) MarshalText() ([]byte, error) {
if c.fn == nil {
return nil, ErrNoFunc
}
buf := bytes.Buffer{}
fmt.Fprint(&buf, c)
return buf.Bytes(), nil
}
// ErrNoFunc means that the Call has a nil *runtime.Func. The most likely
// cause is a Call with the zero value.
var ErrNoFunc = errors.New("no call stack information")
// Format implements fmt.Formatter with support for the following verbs.
//
// %s source file
// %d line number
// %n function name
// %v equivalent to %s:%d
//
// It accepts the '+' and '#' flags for most of the verbs as follows.
//
// %+s path of source file relative to the compile time GOPATH
// %#s full path of source file
// %+n import path qualified function name
// %+v equivalent to %+s:%d
// %#v equivalent to %#s:%d
func (c Call) Format(s fmt.State, verb rune) {
if c.fn == nil {
fmt.Fprintf(s, "%%!%c(NOFUNC)", verb)
return
}
switch verb {
case 's', 'v':
file, line := c.fn.FileLine(c.pc)
switch {
case s.Flag('#'):
// done
case s.Flag('+'):
file = file[pkgIndex(file, c.fn.Name()):]
default:
const sep = "/"
if i := strings.LastIndex(file, sep); i != -1 {
file = file[i+len(sep):]
}
}
io.WriteString(s, file)
if verb == 'v' {
buf := [7]byte{':'}
s.Write(strconv.AppendInt(buf[:1], int64(line), 10))
}
case 'd':
_, line := c.fn.FileLine(c.pc)
buf := [6]byte{}
s.Write(strconv.AppendInt(buf[:0], int64(line), 10))
case 'n':
name := c.fn.Name()
if !s.Flag('+') {
const pathSep = "/"
if i := strings.LastIndex(name, pathSep); i != -1 {
name = name[i+len(pathSep):]
}
const pkgSep = "."
if i := strings.Index(name, pkgSep); i != -1 {
name = name[i+len(pkgSep):]
}
}
io.WriteString(s, name)
}
}
// PC returns the program counter for this call frame; multiple frames may
// have the same PC value.
func (c Call) PC() uintptr {
return c.pc
}
// name returns the import path qualified name of the function containing the
// call.
func (c Call) name() string {
if c.fn == nil {
return "???"
}
return c.fn.Name()
}
func (c Call) file() string {
if c.fn == nil {
return "???"
}
file, _ := c.fn.FileLine(c.pc)
return file
}
func (c Call) line() int {
if c.fn == nil {
return 0
}
_, line := c.fn.FileLine(c.pc)
return line
}
// CallStack records a sequence of function invocations from a goroutine
// stack.
type CallStack []Call
// String implements fmt.Stinger. It is equivalent to fmt.Sprintf("%v", cs).
func (cs CallStack) String() string {
return fmt.Sprint(cs)
}
var (
openBracketBytes = []byte("[")
closeBracketBytes = []byte("]")
spaceBytes = []byte(" ")
)
// MarshalText implements encoding.TextMarshaler. It formats the CallStack the
// same as fmt.Sprintf("%v", cs).
func (cs CallStack) MarshalText() ([]byte, error) {
buf := bytes.Buffer{}
buf.Write(openBracketBytes)
for i, pc := range cs {
if pc.fn == nil {
return nil, ErrNoFunc
}
if i > 0 {
buf.Write(spaceBytes)
}
fmt.Fprint(&buf, pc)
}
buf.Write(closeBracketBytes)
return buf.Bytes(), nil
}
// Format implements fmt.Formatter by printing the CallStack as square brackets
// ([, ]) surrounding a space separated list of Calls each formatted with the
// supplied verb and options.
func (cs CallStack) Format(s fmt.State, verb rune) {
s.Write(openBracketBytes)
for i, pc := range cs {
if i > 0 {
s.Write(spaceBytes)
}
pc.Format(s, verb)
}
s.Write(closeBracketBytes)
}
// Trace returns a CallStack for the current goroutine with element 0
// identifying the calling function.
func Trace() CallStack {
var pcs [512]uintptr
n := runtime.Callers(2, pcs[:])
cs := make([]Call, n)
for i, pc := range pcs[:n] {
pcFix := pc
if i > 0 && cs[i-1].fn.Name() != "runtime.sigpanic" {
pcFix--
}
cs[i] = Call{
fn: runtime.FuncForPC(pcFix),
pc: pcFix,
}
}
return cs
}
// TrimBelow returns a slice of the CallStack with all entries below c
// removed.
func (cs CallStack) TrimBelow(c Call) CallStack {
for len(cs) > 0 && cs[0].pc != c.pc {
cs = cs[1:]
}
return cs
}
// TrimAbove returns a slice of the CallStack with all entries above c
// removed.
func (cs CallStack) TrimAbove(c Call) CallStack {
for len(cs) > 0 && cs[len(cs)-1].pc != c.pc {
cs = cs[:len(cs)-1]
}
return cs
}
// pkgIndex returns the index that results in file[index:] being the path of
// file relative to the compile time GOPATH, and file[:index] being the
// $GOPATH/src/ portion of file. funcName must be the name of a function in
// file as returned by runtime.Func.Name.
func pkgIndex(file, funcName string) int {
// As of Go 1.6.2 there is no direct way to know the compile time GOPATH
// at runtime, but we can infer the number of path segments in the GOPATH.
// We note that runtime.Func.Name() returns the function name qualified by
// the import path, which does not include the GOPATH. Thus we can trim
// segments from the beginning of the file path until the number of path
// separators remaining is one more than the number of path separators in
// the function name. For example, given:
//
// GOPATH /home/user
// file /home/user/src/pkg/sub/file.go
// fn.Name() pkg/sub.Type.Method
//
// We want to produce:
//
// file[:idx] == /home/user/src/
// file[idx:] == pkg/sub/file.go
//
// From this we can easily see that fn.Name() has one less path separator
// than our desired result for file[idx:]. We count separators from the
// end of the file path until it finds two more than in the function name
// and then move one character forward to preserve the initial path
// segment without a leading separator.
const sep = "/"
i := len(file)
for n := strings.Count(funcName, sep) + 2; n > 0; n-- {
i = strings.LastIndex(file[:i], sep)
if i == -1 {
i = -len(sep)
break
}
}
// get back to 0 or trim the leading separator
return i + len(sep)
}
var runtimePath string
func init() {
var pcs [1]uintptr
runtime.Callers(0, pcs[:])
fn := runtime.FuncForPC(pcs[0])
file, _ := fn.FileLine(pcs[0])
idx := pkgIndex(file, fn.Name())
runtimePath = file[:idx]
if runtime.GOOS == "windows" {
runtimePath = strings.ToLower(runtimePath)
}
}
func inGoroot(c Call) bool {
file := c.file()
if len(file) == 0 || file[0] == '?' {
return true
}
if runtime.GOOS == "windows" {
file = strings.ToLower(file)
}
return strings.HasPrefix(file, runtimePath) || strings.HasSuffix(file, "/_testmain.go")
}
// TrimRuntime returns a slice of the CallStack with the topmost entries from
// the go runtime removed. It considers any calls originating from unknown
// files, files under GOROOT, or _testmain.go as part of the runtime.
func (cs CallStack) TrimRuntime() CallStack {
for len(cs) > 0 && inGoroot(cs[len(cs)-1]) {
cs = cs[:len(cs)-1]
}
return cs
}

View file

@ -1,20 +0,0 @@
The MIT License (MIT)
Copyright (c) 2013-2016 Errplane Inc.
Permission is hereby granted, free of charge, to any person obtaining a copy of
this software and associated documentation files (the "Software"), to deal in
the Software without restriction, including without limitation the rights to
use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of
the Software, and to permit persons to whom the Software is furnished to do so,
subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS
FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR
COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.

View file

@ -1,25 +0,0 @@
# List
- bootstrap 3.3.5 [MIT LICENSE](https://github.com/twbs/bootstrap/blob/master/LICENSE)
- collectd.org [ISC LICENSE](https://github.com/collectd/go-collectd/blob/master/LICENSE)
- github.com/BurntSushi/toml [WTFPL LICENSE](https://github.com/BurntSushi/toml/blob/master/COPYING)
- github.com/bmizerany/pat [MIT LICENSE](https://github.com/bmizerany/pat#license)
- github.com/boltdb/bolt [MIT LICENSE](https://github.com/boltdb/bolt/blob/master/LICENSE)
- github.com/cespare/xxhash [MIT LICENSE](https://github.com/cespare/xxhash/blob/master/LICENSE.txt)
- github.com/clarkduvall/hyperloglog [MIT LICENSE](https://github.com/clarkduvall/hyperloglog/blob/master/LICENSE)
- github.com/davecgh/go-spew/spew [ISC LICENSE](https://github.com/davecgh/go-spew/blob/master/LICENSE)
- github.com/dgrijalva/jwt-go [MIT LICENSE](https://github.com/dgrijalva/jwt-go/blob/master/LICENSE)
- github.com/dgryski/go-bits [MIT LICENSE](https://github.com/dgryski/go-bits/blob/master/LICENSE)
- github.com/dgryski/go-bitstream [MIT LICENSE](https://github.com/dgryski/go-bitstream/blob/master/LICENSE)
- github.com/gogo/protobuf/proto [BSD LICENSE](https://github.com/gogo/protobuf/blob/master/LICENSE)
- github.com/golang/snappy [BSD LICENSE](https://github.com/golang/snappy/blob/master/LICENSE)
- github.com/google/go-cmp [BSD LICENSE](https://github.com/google/go-cmp/blob/master/LICENSE)
- github.com/influxdata/usage-client [MIT LICENSE](https://github.com/influxdata/usage-client/blob/master/LICENSE.txt)
- github.com/jwilder/encoding [MIT LICENSE](https://github.com/jwilder/encoding/blob/master/LICENSE)
- github.com/paulbellamy/ratecounter [MIT LICENSE](https://github.com/paulbellamy/ratecounter/blob/master/LICENSE)
- github.com/peterh/liner [MIT LICENSE](https://github.com/peterh/liner/blob/master/COPYING)
- github.com/rakyll/statik [APACHE LICENSE](https://github.com/rakyll/statik/blob/master/LICENSE)
- github.com/retailnext/hllpp [BSD LICENSE](https://github.com/retailnext/hllpp/blob/master/LICENSE)
- github.com/uber-go/atomic [MIT LICENSE](https://github.com/uber-go/atomic/blob/master/LICENSE.txt)
- github.com/uber-go/zap [MIT LICENSE](https://github.com/uber-go/zap/blob/master/LICENSE.txt)
- golang.org/x/crypto [BSD LICENSE](https://github.com/golang/crypto/blob/master/LICENSE)
- jquery 2.1.4 [MIT LICENSE](https://github.com/jquery/jquery/blob/master/LICENSE.txt)

View file

@ -1,48 +0,0 @@
package models
import (
"errors"
"strings"
)
// ConsistencyLevel represent a required replication criteria before a write can
// be returned as successful.
//
// The consistency level is handled in open-source InfluxDB but only applicable to clusters.
type ConsistencyLevel int
const (
// ConsistencyLevelAny allows for hinted handoff, potentially no write happened yet.
ConsistencyLevelAny ConsistencyLevel = iota
// ConsistencyLevelOne requires at least one data node acknowledged a write.
ConsistencyLevelOne
// ConsistencyLevelQuorum requires a quorum of data nodes to acknowledge a write.
ConsistencyLevelQuorum
// ConsistencyLevelAll requires all data nodes to acknowledge a write.
ConsistencyLevelAll
)
var (
// ErrInvalidConsistencyLevel is returned when parsing the string version
// of a consistency level.
ErrInvalidConsistencyLevel = errors.New("invalid consistency level")
)
// ParseConsistencyLevel converts a consistency level string to the corresponding ConsistencyLevel const.
func ParseConsistencyLevel(level string) (ConsistencyLevel, error) {
switch strings.ToLower(level) {
case "any":
return ConsistencyLevelAny, nil
case "one":
return ConsistencyLevelOne, nil
case "quorum":
return ConsistencyLevelQuorum, nil
case "all":
return ConsistencyLevelAll, nil
default:
return 0, ErrInvalidConsistencyLevel
}
}

View file

@ -1,6 +1,6 @@
The MIT License (MIT) MIT License
Copyright (c) 2014 Chris Hines Copyright (c) 2019 InfluxData
Permission is hereby granted, free of charge, to any person obtaining a copy Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal of this software and associated documentation files (the "Software"), to deal

View file

@ -1,4 +1,4 @@
package models // import "github.com/influxdata/influxdb/models" package models // import "github.com/influxdata/influxdb1-client/models"
// from stdlib hash/fnv/fnv.go // from stdlib hash/fnv/fnv.go
const ( const (

View file

@ -1,4 +1,4 @@
package models // import "github.com/influxdata/influxdb/models" package models // import "github.com/influxdata/influxdb1-client/models"
import ( import (
"reflect" "reflect"
@ -12,6 +12,12 @@ func parseIntBytes(b []byte, base int, bitSize int) (i int64, err error) {
return strconv.ParseInt(s, base, bitSize) return strconv.ParseInt(s, base, bitSize)
} }
// parseUintBytes is a zero-alloc wrapper around strconv.ParseUint.
func parseUintBytes(b []byte, base int, bitSize int) (i uint64, err error) {
s := unsafeBytesToString(b)
return strconv.ParseUint(s, base, bitSize)
}
// parseFloatBytes is a zero-alloc wrapper around strconv.ParseFloat. // parseFloatBytes is a zero-alloc wrapper around strconv.ParseFloat.
func parseFloatBytes(b []byte, bitSize int) (float64, error) { func parseFloatBytes(b []byte, bitSize int) (float64, error) {
s := unsafeBytesToString(b) s := unsafeBytesToString(b)

View file

@ -1,5 +1,5 @@
// Package models implements basic objects used throughout the TICK stack. // Package models implements basic objects used throughout the TICK stack.
package models // import "github.com/influxdata/influxdb/models" package models // import "github.com/influxdata/influxdb1-client/models"
import ( import (
"bytes" "bytes"
@ -12,20 +12,27 @@ import (
"strconv" "strconv"
"strings" "strings"
"time" "time"
"unicode"
"unicode/utf8"
"github.com/influxdata/influxdb/pkg/escape" "github.com/influxdata/influxdb1-client/pkg/escape"
) )
var ( type escapeSet struct {
measurementEscapeCodes = map[byte][]byte{ k [1]byte
',': []byte(`\,`), esc [2]byte
' ': []byte(`\ `),
} }
tagEscapeCodes = map[byte][]byte{ var (
',': []byte(`\,`), measurementEscapeCodes = [...]escapeSet{
' ': []byte(`\ `), {k: [1]byte{','}, esc: [2]byte{'\\', ','}},
'=': []byte(`\=`), {k: [1]byte{' '}, esc: [2]byte{'\\', ' '}},
}
tagEscapeCodes = [...]escapeSet{
{k: [1]byte{','}, esc: [2]byte{'\\', ','}},
{k: [1]byte{' '}, esc: [2]byte{'\\', ' '}},
{k: [1]byte{'='}, esc: [2]byte{'\\', '='}},
} }
// ErrPointMustHaveAField is returned when operating on a point that does not have any fields. // ErrPointMustHaveAField is returned when operating on a point that does not have any fields.
@ -43,6 +50,16 @@ const (
MaxKeyLength = 65535 MaxKeyLength = 65535
) )
// enableUint64Support will enable uint64 support if set to true.
var enableUint64Support = false
// EnableUintSupport manually enables uint support for the point parser.
// This function will be removed in the future and only exists for unit tests during the
// transition.
func EnableUintSupport() {
enableUint64Support = true
}
// Point defines the values that will be written to the database. // Point defines the values that will be written to the database.
type Point interface { type Point interface {
// Name return the measurement name for the point. // Name return the measurement name for the point.
@ -54,6 +71,9 @@ type Point interface {
// Tags returns the tag set for the point. // Tags returns the tag set for the point.
Tags() Tags Tags() Tags
// ForEachTag iterates over each tag invoking fn. If fn return false, iteration stops.
ForEachTag(fn func(k, v []byte) bool)
// AddTag adds or replaces a tag value for a point. // AddTag adds or replaces a tag value for a point.
AddTag(key, value string) AddTag(key, value string)
@ -137,6 +157,9 @@ const (
// Empty is used to indicate that there is no field. // Empty is used to indicate that there is no field.
Empty Empty
// Unsigned indicates the field's type is an unsigned integer.
Unsigned
) )
// FieldIterator provides a low-allocation interface to iterate through a point's fields. // FieldIterator provides a low-allocation interface to iterate through a point's fields.
@ -156,6 +179,9 @@ type FieldIterator interface {
// IntegerValue returns the integer value of the current field. // IntegerValue returns the integer value of the current field.
IntegerValue() (int64, error) IntegerValue() (int64, error)
// UnsignedValue returns the unsigned value of the current field.
UnsignedValue() (uint64, error)
// BooleanValue returns the boolean value of the current field. // BooleanValue returns the boolean value of the current field.
BooleanValue() (bool, error) BooleanValue() (bool, error)
@ -205,6 +231,12 @@ type point struct {
it fieldIterator it fieldIterator
} }
// type assertions
var (
_ Point = (*point)(nil)
_ FieldIterator = (*point)(nil)
)
const ( const (
// the number of characters for the largest possible int64 (9223372036854775807) // the number of characters for the largest possible int64 (9223372036854775807)
maxInt64Digits = 19 maxInt64Digits = 19
@ -212,6 +244,9 @@ const (
// the number of characters for the smallest possible int64 (-9223372036854775808) // the number of characters for the smallest possible int64 (-9223372036854775808)
minInt64Digits = 20 minInt64Digits = 20
// the number of characters for the largest possible uint64 (18446744073709551615)
maxUint64Digits = 20
// the number of characters required for the largest float64 before a range check // the number of characters required for the largest float64 before a range check
// would occur during parsing // would occur during parsing
maxFloat64Digits = 25 maxFloat64Digits = 25
@ -238,31 +273,46 @@ func ParsePointsString(buf string) ([]Point, error) {
// NOTE: to minimize heap allocations, the returned Tags will refer to subslices of buf. // NOTE: to minimize heap allocations, the returned Tags will refer to subslices of buf.
// This can have the unintended effect preventing buf from being garbage collected. // This can have the unintended effect preventing buf from being garbage collected.
func ParseKey(buf []byte) (string, Tags) { func ParseKey(buf []byte) (string, Tags) {
name, tags := ParseKeyBytes(buf)
return string(name), tags
}
func ParseKeyBytes(buf []byte) ([]byte, Tags) {
return ParseKeyBytesWithTags(buf, nil)
}
func ParseKeyBytesWithTags(buf []byte, tags Tags) ([]byte, Tags) {
// Ignore the error because scanMeasurement returns "missing fields" which we ignore // Ignore the error because scanMeasurement returns "missing fields" which we ignore
// when just parsing a key // when just parsing a key
state, i, _ := scanMeasurement(buf, 0) state, i, _ := scanMeasurement(buf, 0)
var tags Tags var name []byte
if state == tagKeyState { if state == tagKeyState {
tags = parseTags(buf) tags = parseTags(buf, tags)
// scanMeasurement returns the location of the comma if there are tags, strip that off // scanMeasurement returns the location of the comma if there are tags, strip that off
return string(buf[:i-1]), tags name = buf[:i-1]
} else {
name = buf[:i]
} }
return string(buf[:i]), tags return unescapeMeasurement(name), tags
} }
func ParseTags(buf []byte) (Tags, error) { func ParseTags(buf []byte) Tags {
return parseTags(buf), nil return parseTags(buf, nil)
} }
func ParseName(buf []byte) ([]byte, error) { func ParseName(buf []byte) []byte {
// Ignore the error because scanMeasurement returns "missing fields" which we ignore // Ignore the error because scanMeasurement returns "missing fields" which we ignore
// when just parsing a key // when just parsing a key
state, i, _ := scanMeasurement(buf, 0) state, i, _ := scanMeasurement(buf, 0)
var name []byte
if state == tagKeyState { if state == tagKeyState {
return buf[:i-1], nil name = buf[:i-1]
} else {
name = buf[:i]
} }
return buf[:i], nil
return unescapeMeasurement(name)
} }
// ParsePointsWithPrecision is similar to ParsePoints, but allows the // ParsePointsWithPrecision is similar to ParsePoints, but allows the
@ -285,7 +335,6 @@ func ParsePointsWithPrecision(buf []byte, defaultTime time.Time, precision strin
continue continue
} }
// lines which start with '#' are comments
start := skipWhitespace(block, 0) start := skipWhitespace(block, 0)
// If line is all whitespace, just skip it // If line is all whitespace, just skip it
@ -293,6 +342,7 @@ func ParsePointsWithPrecision(buf []byte, defaultTime time.Time, precision strin
continue continue
} }
// lines which start with '#' are comments
if block[start] == '#' { if block[start] == '#' {
continue continue
} }
@ -318,7 +368,7 @@ func ParsePointsWithPrecision(buf []byte, defaultTime time.Time, precision strin
} }
func parsePoint(buf []byte, defaultTime time.Time, precision string) (Point, error) { func parsePoint(buf []byte, defaultTime time.Time, precision string) (Point, error) {
// scan the first block which is measurement[,tag1=value1,tag2=value=2...] // scan the first block which is measurement[,tag1=value1,tag2=value2...]
pos, key, err := scanKey(buf, 0) pos, key, err := scanKey(buf, 0)
if err != nil { if err != nil {
return nil, err return nil, err
@ -345,7 +395,7 @@ func parsePoint(buf []byte, defaultTime time.Time, precision string) (Point, err
} }
var maxKeyErr error var maxKeyErr error
walkFields(fields, func(k, v []byte) bool { err = walkFields(fields, func(k, v []byte) bool {
if sz := seriesKeySize(key, k); sz > MaxKeyLength { if sz := seriesKeySize(key, k); sz > MaxKeyLength {
maxKeyErr = fmt.Errorf("max key length exceeded: %v > %v", sz, MaxKeyLength) maxKeyErr = fmt.Errorf("max key length exceeded: %v > %v", sz, MaxKeyLength)
return false return false
@ -353,6 +403,10 @@ func parsePoint(buf []byte, defaultTime time.Time, precision string) (Point, err
return true return true
}) })
if err != nil {
return nil, err
}
if maxKeyErr != nil { if maxKeyErr != nil {
return nil, maxKeyErr return nil, maxKeyErr
} }
@ -815,7 +869,7 @@ func isNumeric(b byte) bool {
// error if a invalid number is scanned. // error if a invalid number is scanned.
func scanNumber(buf []byte, i int) (int, error) { func scanNumber(buf []byte, i int) (int, error) {
start := i start := i
var isInt bool var isInt, isUnsigned bool
// Is negative number? // Is negative number?
if i < len(buf) && buf[i] == '-' { if i < len(buf) && buf[i] == '-' {
@ -841,10 +895,14 @@ func scanNumber(buf []byte, i int) (int, error) {
break break
} }
if buf[i] == 'i' && i > start && !isInt { if buf[i] == 'i' && i > start && !(isInt || isUnsigned) {
isInt = true isInt = true
i++ i++
continue continue
} else if buf[i] == 'u' && i > start && !(isInt || isUnsigned) {
isUnsigned = true
i++
continue
} }
if buf[i] == '.' { if buf[i] == '.' {
@ -879,7 +937,7 @@ func scanNumber(buf []byte, i int) (int, error) {
i++ i++
} }
if isInt && (decimal || scientific) { if (isInt || isUnsigned) && (decimal || scientific) {
return i, ErrInvalidNumber return i, ErrInvalidNumber
} }
@ -914,6 +972,26 @@ func scanNumber(buf []byte, i int) (int, error) {
return i, fmt.Errorf("unable to parse integer %s: %s", buf[start:i-1], err) return i, fmt.Errorf("unable to parse integer %s: %s", buf[start:i-1], err)
} }
} }
} else if isUnsigned {
// Return an error if uint64 support has not been enabled.
if !enableUint64Support {
return i, ErrInvalidNumber
}
// Make sure the last char is a 'u' for unsigned
if buf[i-1] != 'u' {
return i, ErrInvalidNumber
}
// Make sure the first char is not a '-' for unsigned
if buf[start] == '-' {
return i, ErrInvalidNumber
}
// Parse the uint to check bounds the number of digits could be larger than the max range
// We subtract 1 from the index to remove the `u` from our tests
if len(buf[start:i-1]) >= maxUint64Digits {
if _, err := parseUintBytes(buf[start:i-1], 10, 64); err != nil {
return i, fmt.Errorf("unable to parse unsigned %s: %s", buf[start:i-1], err)
}
}
} else { } else {
// Parse the float to check bounds if it's scientific or the number of digits could be larger than the max range // Parse the float to check bounds if it's scientific or the number of digits could be larger than the max range
if scientific || len(buf[start:i]) >= maxFloat64Digits || len(buf[start:i]) >= minFloat64Digits { if scientific || len(buf[start:i]) >= maxFloat64Digits || len(buf[start:i]) >= minFloat64Digits {
@ -1015,7 +1093,7 @@ func scanLine(buf []byte, i int) (int, []byte) {
} }
// skip past escaped characters // skip past escaped characters
if buf[i] == '\\' { if buf[i] == '\\' && i+2 < len(buf) {
i += 2 i += 2
continue continue
} }
@ -1144,24 +1222,34 @@ func scanFieldValue(buf []byte, i int) (int, []byte) {
return i, buf[start:i] return i, buf[start:i]
} }
func escapeMeasurement(in []byte) []byte { func EscapeMeasurement(in []byte) []byte {
for b, esc := range measurementEscapeCodes { for _, c := range measurementEscapeCodes {
in = bytes.Replace(in, []byte{b}, esc, -1) if bytes.IndexByte(in, c.k[0]) != -1 {
in = bytes.Replace(in, c.k[:], c.esc[:], -1)
}
} }
return in return in
} }
func unescapeMeasurement(in []byte) []byte { func unescapeMeasurement(in []byte) []byte {
for b, esc := range measurementEscapeCodes { if bytes.IndexByte(in, '\\') == -1 {
in = bytes.Replace(in, esc, []byte{b}, -1) return in
}
for i := range measurementEscapeCodes {
c := &measurementEscapeCodes[i]
if bytes.IndexByte(in, c.k[0]) != -1 {
in = bytes.Replace(in, c.esc[:], c.k[:], -1)
}
} }
return in return in
} }
func escapeTag(in []byte) []byte { func escapeTag(in []byte) []byte {
for b, esc := range tagEscapeCodes { for i := range tagEscapeCodes {
if bytes.IndexByte(in, b) != -1 { c := &tagEscapeCodes[i]
in = bytes.Replace(in, []byte{b}, esc, -1) if bytes.IndexByte(in, c.k[0]) != -1 {
in = bytes.Replace(in, c.k[:], c.esc[:], -1)
} }
} }
return in return in
@ -1172,9 +1260,10 @@ func unescapeTag(in []byte) []byte {
return in return in
} }
for b, esc := range tagEscapeCodes { for i := range tagEscapeCodes {
if bytes.IndexByte(in, b) != -1 { c := &tagEscapeCodes[i]
in = bytes.Replace(in, esc, []byte{b}, -1) if bytes.IndexByte(in, c.k[0]) != -1 {
in = bytes.Replace(in, c.esc[:], c.k[:], -1)
} }
} }
return in return in
@ -1226,7 +1315,8 @@ func unescapeStringField(in string) string {
} }
// NewPoint returns a new point with the given measurement name, tags, fields and timestamp. If // NewPoint returns a new point with the given measurement name, tags, fields and timestamp. If
// an unsupported field value (NaN) or out of range time is passed, this function returns an error. // an unsupported field value (NaN, or +/-Inf) or out of range time is passed, this function
// returns an error.
func NewPoint(name string, tags Tags, fields Fields, t time.Time) (Point, error) { func NewPoint(name string, tags Tags, fields Fields, t time.Time) (Point, error) {
key, err := pointKey(name, tags, fields, t) key, err := pointKey(name, tags, fields, t)
if err != nil { if err != nil {
@ -1257,11 +1347,17 @@ func pointKey(measurement string, tags Tags, fields Fields, t time.Time) ([]byte
switch value := value.(type) { switch value := value.(type) {
case float64: case float64:
// Ensure the caller validates and handles invalid field values // Ensure the caller validates and handles invalid field values
if math.IsInf(value, 0) {
return nil, fmt.Errorf("+/-Inf is an unsupported value for field %s", key)
}
if math.IsNaN(value) { if math.IsNaN(value) {
return nil, fmt.Errorf("NaN is an unsupported value for field %s", key) return nil, fmt.Errorf("NaN is an unsupported value for field %s", key)
} }
case float32: case float32:
// Ensure the caller validates and handles invalid field values // Ensure the caller validates and handles invalid field values
if math.IsInf(float64(value), 0) {
return nil, fmt.Errorf("+/-Inf is an unsupported value for field %s", key)
}
if math.IsNaN(float64(value)) { if math.IsNaN(float64(value)) {
return nil, fmt.Errorf("NaN is an unsupported value for field %s", key) return nil, fmt.Errorf("NaN is an unsupported value for field %s", key)
} }
@ -1315,6 +1411,11 @@ func NewPointFromBytes(b []byte) (Point, error) {
if err != nil { if err != nil {
return nil, fmt.Errorf("unable to unmarshal field %s: %s", string(iter.FieldKey()), err) return nil, fmt.Errorf("unable to unmarshal field %s: %s", string(iter.FieldKey()), err)
} }
case Unsigned:
_, err := iter.UnsignedValue()
if err != nil {
return nil, fmt.Errorf("unable to unmarshal field %s: %s", string(iter.FieldKey()), err)
}
case String: case String:
// Skip since this won't return an error // Skip since this won't return an error
case Boolean: case Boolean:
@ -1382,10 +1483,14 @@ func (p *point) Tags() Tags {
if p.cachedTags != nil { if p.cachedTags != nil {
return p.cachedTags return p.cachedTags
} }
p.cachedTags = parseTags(p.key) p.cachedTags = parseTags(p.key, nil)
return p.cachedTags return p.cachedTags
} }
func (p *point) ForEachTag(fn func(k, v []byte) bool) {
walkTags(p.key, fn)
}
func (p *point) HasTag(tag []byte) bool { func (p *point) HasTag(tag []byte) bool {
if len(p.key) == 0 { if len(p.key) == 0 {
return false return false
@ -1445,11 +1550,14 @@ func walkTags(buf []byte, fn func(key, value []byte) bool) {
// walkFields walks each field key and value via fn. If fn returns false, the iteration // walkFields walks each field key and value via fn. If fn returns false, the iteration
// is stopped. The values are the raw byte slices and not the converted types. // is stopped. The values are the raw byte slices and not the converted types.
func walkFields(buf []byte, fn func(key, value []byte) bool) { func walkFields(buf []byte, fn func(key, value []byte) bool) error {
var i int var i int
var key, val []byte var key, val []byte
for len(buf) > 0 { for len(buf) > 0 {
i, key = scanTo(buf, 0, '=') i, key = scanTo(buf, 0, '=')
if i > len(buf)-2 {
return fmt.Errorf("invalid value: field-key=%s", key)
}
buf = buf[i+1:] buf = buf[i+1:]
i, val = scanFieldValue(buf, 0) i, val = scanFieldValue(buf, 0)
buf = buf[i:] buf = buf[i:]
@ -1462,26 +1570,52 @@ func walkFields(buf []byte, fn func(key, value []byte) bool) {
buf = buf[1:] buf = buf[1:]
} }
} }
return nil
} }
func parseTags(buf []byte) Tags { // parseTags parses buf into the provided destination tags, returning destination
// Tags, which may have a different length and capacity.
func parseTags(buf []byte, dst Tags) Tags {
if len(buf) == 0 { if len(buf) == 0 {
return nil return nil
} }
tags := make(Tags, 0, bytes.Count(buf, []byte(","))) n := bytes.Count(buf, []byte(","))
if cap(dst) < n {
dst = make(Tags, n)
} else {
dst = dst[:n]
}
// Ensure existing behaviour when point has no tags and nil slice passed in.
if dst == nil {
dst = Tags{}
}
// Series keys can contain escaped commas, therefore the number of commas
// in a series key only gives an estimation of the upper bound on the number
// of tags.
var i int
walkTags(buf, func(key, value []byte) bool { walkTags(buf, func(key, value []byte) bool {
tags = append(tags, NewTag(key, value)) dst[i].Key, dst[i].Value = key, value
i++
return true return true
}) })
return tags return dst[:i]
} }
// MakeKey creates a key for a set of tags. // MakeKey creates a key for a set of tags.
func MakeKey(name []byte, tags Tags) []byte { func MakeKey(name []byte, tags Tags) []byte {
return AppendMakeKey(nil, name, tags)
}
// AppendMakeKey appends the key derived from name and tags to dst and returns the extended buffer.
func AppendMakeKey(dst []byte, name []byte, tags Tags) []byte {
// unescape the name and then re-escape it to avoid double escaping. // unescape the name and then re-escape it to avoid double escaping.
// The key should always be stored in escaped form. // The key should always be stored in escaped form.
return append(escapeMeasurement(unescapeMeasurement(name)), tags.HashKey()...) dst = append(dst, EscapeMeasurement(unescapeMeasurement(name))...)
dst = tags.AppendHashKey(dst)
return dst
} }
// SetTags replaces the tags for the point. // SetTags replaces the tags for the point.
@ -1630,10 +1764,7 @@ func (p *point) UnmarshalBinary(b []byte) error {
p.fields, b = b[:n], b[n:] p.fields, b = b[:n], b[n:]
// Read timestamp. // Read timestamp.
if err := p.time.UnmarshalBinary(b); err != nil { return p.time.UnmarshalBinary(b)
return err
}
return nil
} }
// PrecisionString returns a string representation of the point. If there // PrecisionString returns a string representation of the point. If there
@ -1678,6 +1809,12 @@ func (p *point) unmarshalBinary() (Fields, error) {
return nil, fmt.Errorf("unable to unmarshal field %s: %s", string(iter.FieldKey()), err) return nil, fmt.Errorf("unable to unmarshal field %s: %s", string(iter.FieldKey()), err)
} }
fields[string(iter.FieldKey())] = v fields[string(iter.FieldKey())] = v
case Unsigned:
v, err := iter.UnsignedValue()
if err != nil {
return nil, fmt.Errorf("unable to unmarshal field %s: %s", string(iter.FieldKey()), err)
}
fields[string(iter.FieldKey())] = v
case String: case String:
fields[string(iter.FieldKey())] = iter.StringValue() fields[string(iter.FieldKey())] = iter.StringValue()
case Boolean: case Boolean:
@ -1708,7 +1845,7 @@ func (p *point) UnixNano() int64 {
// string representations are no longer than size. Points with a single field or // string representations are no longer than size. Points with a single field or
// a point without a timestamp may exceed the requested size. // a point without a timestamp may exceed the requested size.
func (p *point) Split(size int) []Point { func (p *point) Split(size int) []Point {
if p.time.IsZero() || len(p.String()) <= size { if p.time.IsZero() || p.StringSize() <= size {
return []Point{p} return []Point{p}
} }
@ -1803,6 +1940,82 @@ func NewTags(m map[string]string) Tags {
return a return a
} }
// HashKey hashes all of a tag's keys.
func (a Tags) HashKey() []byte {
return a.AppendHashKey(nil)
}
func (a Tags) needsEscape() bool {
for i := range a {
t := &a[i]
for j := range tagEscapeCodes {
c := &tagEscapeCodes[j]
if bytes.IndexByte(t.Key, c.k[0]) != -1 || bytes.IndexByte(t.Value, c.k[0]) != -1 {
return true
}
}
}
return false
}
// AppendHashKey appends the result of hashing all of a tag's keys and values to dst and returns the extended buffer.
func (a Tags) AppendHashKey(dst []byte) []byte {
// Empty maps marshal to empty bytes.
if len(a) == 0 {
return dst
}
// Type invariant: Tags are sorted
sz := 0
var escaped Tags
if a.needsEscape() {
var tmp [20]Tag
if len(a) < len(tmp) {
escaped = tmp[:len(a)]
} else {
escaped = make(Tags, len(a))
}
for i := range a {
t := &a[i]
nt := &escaped[i]
nt.Key = escapeTag(t.Key)
nt.Value = escapeTag(t.Value)
sz += len(nt.Key) + len(nt.Value)
}
} else {
sz = a.Size()
escaped = a
}
sz += len(escaped) + (len(escaped) * 2) // separators
// Generate marshaled bytes.
if cap(dst)-len(dst) < sz {
nd := make([]byte, len(dst), len(dst)+sz)
copy(nd, dst)
dst = nd
}
buf := dst[len(dst) : len(dst)+sz]
idx := 0
for i := range escaped {
k := &escaped[i]
if len(k.Value) == 0 {
continue
}
buf[idx] = ','
idx++
copy(buf[idx:], k.Key)
idx += len(k.Key)
buf[idx] = '='
idx++
copy(buf[idx:], k.Value)
idx += len(k.Value)
}
return dst[:len(dst)+idx]
}
// String returns the string representation of the tags. // String returns the string representation of the tags.
func (a Tags) String() string { func (a Tags) String() string {
var buf bytes.Buffer var buf bytes.Buffer
@ -1822,8 +2035,8 @@ func (a Tags) String() string {
// for data structures or delimiters for example. // for data structures or delimiters for example.
func (a Tags) Size() int { func (a Tags) Size() int {
var total int var total int
for _, t := range a { for i := range a {
total += t.Size() total += a[i].Size()
} }
return total return total
} }
@ -1919,18 +2132,6 @@ func (a *Tags) SetString(key, value string) {
a.Set([]byte(key), []byte(value)) a.Set([]byte(key), []byte(value))
} }
// Delete removes a tag by key.
func (a *Tags) Delete(key []byte) {
for i, t := range *a {
if bytes.Equal(t.Key, key) {
copy((*a)[i:], (*a)[i+1:])
(*a)[len(*a)-1] = Tag{}
*a = (*a)[:len(*a)-1]
return
}
}
}
// Map returns a map representation of the tags. // Map returns a map representation of the tags.
func (a Tags) Map() map[string]string { func (a Tags) Map() map[string]string {
m := make(map[string]string, len(a)) m := make(map[string]string, len(a))
@ -1940,60 +2141,6 @@ func (a Tags) Map() map[string]string {
return m return m
} }
// Merge merges the tags combining the two. If both define a tag with the
// same key, the merged value overwrites the old value.
// A new map is returned.
func (a Tags) Merge(other map[string]string) Tags {
merged := make(map[string]string, len(a)+len(other))
for _, t := range a {
merged[string(t.Key)] = string(t.Value)
}
for k, v := range other {
merged[k] = v
}
return NewTags(merged)
}
// HashKey hashes all of a tag's keys.
func (a Tags) HashKey() []byte {
// Empty maps marshal to empty bytes.
if len(a) == 0 {
return nil
}
// Type invariant: Tags are sorted
escaped := make(Tags, 0, len(a))
sz := 0
for _, t := range a {
ek := escapeTag(t.Key)
ev := escapeTag(t.Value)
if len(ev) > 0 {
escaped = append(escaped, Tag{Key: ek, Value: ev})
sz += len(ek) + len(ev)
}
}
sz += len(escaped) + (len(escaped) * 2) // separators
// Generate marshaled bytes.
b := make([]byte, sz)
buf := b
idx := 0
for _, k := range escaped {
buf[idx] = ','
idx++
copy(buf[idx:idx+len(k.Key)], k.Key)
idx += len(k.Key)
buf[idx] = '='
idx++
copy(buf[idx:idx+len(k.Value)], k.Value)
idx += len(k.Value)
}
return b[:idx]
}
// CopyTags returns a shallow copy of tags. // CopyTags returns a shallow copy of tags.
func CopyTags(a Tags) Tags { func CopyTags(a Tags) Tags {
other := make(Tags, len(a)) other := make(Tags, len(a))
@ -2071,10 +2218,13 @@ func (p *point) Next() bool {
return true return true
} }
if strings.IndexByte(`0123456789-.nNiI`, c) >= 0 { if strings.IndexByte(`0123456789-.nNiIu`, c) >= 0 {
if p.it.valueBuf[len(p.it.valueBuf)-1] == 'i' { if p.it.valueBuf[len(p.it.valueBuf)-1] == 'i' {
p.it.fieldType = Integer p.it.fieldType = Integer
p.it.valueBuf = p.it.valueBuf[:len(p.it.valueBuf)-1] p.it.valueBuf = p.it.valueBuf[:len(p.it.valueBuf)-1]
} else if p.it.valueBuf[len(p.it.valueBuf)-1] == 'u' {
p.it.fieldType = Unsigned
p.it.valueBuf = p.it.valueBuf[:len(p.it.valueBuf)-1]
} else { } else {
p.it.fieldType = Float p.it.fieldType = Float
} }
@ -2110,6 +2260,15 @@ func (p *point) IntegerValue() (int64, error) {
return n, nil return n, nil
} }
// UnsignedValue returns the unsigned value of the current field.
func (p *point) UnsignedValue() (uint64, error) {
n, err := parseUintBytes(p.it.valueBuf, 10, 64)
if err != nil {
return 0, fmt.Errorf("unable to parse unsigned value %q: %v", p.it.valueBuf, err)
}
return n, nil
}
// BooleanValue returns the boolean value of the current field. // BooleanValue returns the boolean value of the current field.
func (p *point) BooleanValue() (bool, error) { func (p *point) BooleanValue() (bool, error) {
b, err := parseBoolBytes(p.it.valueBuf) b, err := parseBoolBytes(p.it.valueBuf)
@ -2192,6 +2351,9 @@ func appendField(b []byte, k string, v interface{}) []byte {
case int: case int:
b = strconv.AppendInt(b, int64(v), 10) b = strconv.AppendInt(b, int64(v), 10)
b = append(b, 'i') b = append(b, 'i')
case uint64:
b = strconv.AppendUint(b, v, 10)
b = append(b, 'u')
case uint32: case uint32:
b = strconv.AppendInt(b, int64(v), 10) b = strconv.AppendInt(b, int64(v), 10)
b = append(b, 'i') b = append(b, 'i')
@ -2201,10 +2363,9 @@ func appendField(b []byte, k string, v interface{}) []byte {
case uint8: case uint8:
b = strconv.AppendInt(b, int64(v), 10) b = strconv.AppendInt(b, int64(v), 10)
b = append(b, 'i') b = append(b, 'i')
// TODO: 'uint' should be considered just as "dangerous" as a uint64,
// perhaps the value should be checked and capped at MaxInt64? We could
// then include uint64 as an accepted value
case uint: case uint:
// TODO: 'uint' should be converted to writing as an unsigned integer,
// but we cannot since that would break backwards compatibility.
b = strconv.AppendInt(b, int64(v), 10) b = strconv.AppendInt(b, int64(v), 10)
b = append(b, 'i') b = append(b, 'i')
case float32: case float32:
@ -2224,8 +2385,29 @@ func appendField(b []byte, k string, v interface{}) []byte {
return b return b
} }
type byteSlices [][]byte // ValidKeyToken returns true if the token used for measurement, tag key, or tag
// value is a valid unicode string and only contains printable, non-replacement characters.
func ValidKeyToken(s string) bool {
if !utf8.ValidString(s) {
return false
}
for _, r := range s {
if !unicode.IsPrint(r) || r == unicode.ReplacementChar {
return false
}
}
return true
}
func (a byteSlices) Len() int { return len(a) } // ValidKeyTokens returns true if the measurement name and all tags are valid.
func (a byteSlices) Less(i, j int) bool { return bytes.Compare(a[i], a[j]) == -1 } func ValidKeyTokens(name string, tags Tags) bool {
func (a byteSlices) Swap(i, j int) { a[i], a[j] = a[j], a[i] } if !ValidKeyToken(name) {
return false
}
for _, tag := range tags {
if !ValidKeyToken(string(tag.Key)) || !ValidKeyToken(string(tag.Value)) {
return false
}
}
return true
}

View file

@ -0,0 +1,7 @@
// +build uint uint64
package models
func init() {
EnableUintSupport()
}

View file

@ -1,6 +1,6 @@
// Package escape contains utilities for escaping parts of InfluxQL // Package escape contains utilities for escaping parts of InfluxQL
// and InfluxDB line protocol. // and InfluxDB line protocol.
package escape // import "github.com/influxdata/influxdb/pkg/escape" package escape // import "github.com/influxdata/influxdb1-client/pkg/escape"
import ( import (
"bytes" "bytes"
@ -78,7 +78,11 @@ func Unescape(in []byte) []byte {
i := 0 i := 0
inLen := len(in) inLen := len(in)
var out []byte
// The output size will be no more than inLen. Preallocating the
// capacity of the output is faster and uses less memory than
// letting append() do its own (over)allocation.
out := make([]byte, 0, inLen)
for { for {
if i >= inLen { if i >= inLen {

View file

@ -1,5 +1,5 @@
// Package client (v2) is the current official Go client for InfluxDB. // Package client (v2) is the current official Go client for InfluxDB.
package client // import "github.com/influxdata/influxdb/client/v2" package client // import "github.com/influxdata/influxdb1-client/v2"
import ( import (
"bytes" "bytes"
@ -9,13 +9,15 @@ import (
"fmt" "fmt"
"io" "io"
"io/ioutil" "io/ioutil"
"mime"
"net/http" "net/http"
"net/url" "net/url"
"path"
"strconv" "strconv"
"strings" "strings"
"time" "time"
"github.com/influxdata/influxdb/models" "github.com/influxdata/influxdb1-client/models"
) )
// HTTPConfig is the config data needed to create an HTTP Client. // HTTPConfig is the config data needed to create an HTTP Client.
@ -43,6 +45,9 @@ type HTTPConfig struct {
// TLSConfig allows the user to set their own TLS config for the HTTP // TLSConfig allows the user to set their own TLS config for the HTTP
// Client. If set, this option overrides InsecureSkipVerify. // Client. If set, this option overrides InsecureSkipVerify.
TLSConfig *tls.Config TLSConfig *tls.Config
// Proxy configures the Proxy function on the HTTP client.
Proxy func(req *http.Request) (*url.URL, error)
} }
// BatchPointsConfig is the config data needed to create an instance of the BatchPoints struct. // BatchPointsConfig is the config data needed to create an instance of the BatchPoints struct.
@ -73,6 +78,10 @@ type Client interface {
// the UDP client. // the UDP client.
Query(q Query) (*Response, error) Query(q Query) (*Response, error)
// QueryAsChunk makes an InfluxDB Query on the database. This will fail if using
// the UDP client.
QueryAsChunk(q Query) (*ChunkedResponse, error)
// Close releases any resources a Client may be using. // Close releases any resources a Client may be using.
Close() error Close() error
} }
@ -97,6 +106,7 @@ func NewHTTPClient(conf HTTPConfig) (Client, error) {
TLSClientConfig: &tls.Config{ TLSClientConfig: &tls.Config{
InsecureSkipVerify: conf.InsecureSkipVerify, InsecureSkipVerify: conf.InsecureSkipVerify,
}, },
Proxy: conf.Proxy,
} }
if conf.TLSConfig != nil { if conf.TLSConfig != nil {
tr.TLSClientConfig = conf.TLSConfig tr.TLSClientConfig = conf.TLSConfig
@ -118,8 +128,9 @@ func NewHTTPClient(conf HTTPConfig) (Client, error) {
// Ping returns how long the request took, the version of the server it connected to, and an error if one occurred. // Ping returns how long the request took, the version of the server it connected to, and an error if one occurred.
func (c *client) Ping(timeout time.Duration) (time.Duration, string, error) { func (c *client) Ping(timeout time.Duration) (time.Duration, string, error) {
now := time.Now() now := time.Now()
u := c.url u := c.url
u.Path = "ping" u.Path = path.Join(u.Path, "ping")
req, err := http.NewRequest("GET", u.String(), nil) req, err := http.NewRequest("GET", u.String(), nil)
if err != nil { if err != nil {
@ -150,7 +161,7 @@ func (c *client) Ping(timeout time.Duration) (time.Duration, string, error) {
} }
if resp.StatusCode != http.StatusNoContent { if resp.StatusCode != http.StatusNoContent {
var err = fmt.Errorf(string(body)) var err = errors.New(string(body))
return 0, "", err return 0, "", err
} }
@ -168,7 +179,7 @@ func (c *client) Close() error {
// once the client is instantiated. // once the client is instantiated.
type client struct { type client struct {
// N.B - if url.UserInfo is accessed in future modifications to the // N.B - if url.UserInfo is accessed in future modifications to the
// methods on client, you will need to syncronise access to url. // methods on client, you will need to synchronize access to url.
url url.URL url url.URL
username string username string
password string password string
@ -318,8 +329,8 @@ func (p *Point) String() string {
// PrecisionString returns a line-protocol string of the Point, // PrecisionString returns a line-protocol string of the Point,
// with the timestamp formatted for the given precision. // with the timestamp formatted for the given precision.
func (p *Point) PrecisionString(precison string) string { func (p *Point) PrecisionString(precision string) string {
return p.pt.PrecisionString(precison) return p.pt.PrecisionString(precision)
} }
// Name returns the measurement name of the point. // Name returns the measurement name of the point.
@ -356,6 +367,9 @@ func (c *client) Write(bp BatchPoints) error {
var b bytes.Buffer var b bytes.Buffer
for _, p := range bp.Points() { for _, p := range bp.Points() {
if p == nil {
continue
}
if _, err := b.WriteString(p.pt.PrecisionString(bp.Precision())); err != nil { if _, err := b.WriteString(p.pt.PrecisionString(bp.Precision())); err != nil {
return err return err
} }
@ -366,7 +380,8 @@ func (c *client) Write(bp BatchPoints) error {
} }
u := c.url u := c.url
u.Path = "write" u.Path = path.Join(u.Path, "write")
req, err := http.NewRequest("POST", u.String(), &b) req, err := http.NewRequest("POST", u.String(), &b)
if err != nil { if err != nil {
return err return err
@ -396,7 +411,7 @@ func (c *client) Write(bp BatchPoints) error {
} }
if resp.StatusCode != http.StatusNoContent && resp.StatusCode != http.StatusOK { if resp.StatusCode != http.StatusNoContent && resp.StatusCode != http.StatusOK {
var err = fmt.Errorf(string(body)) var err = errors.New(string(body))
return err return err
} }
@ -407,6 +422,7 @@ func (c *client) Write(bp BatchPoints) error {
type Query struct { type Query struct {
Command string Command string
Database string Database string
RetentionPolicy string
Precision string Precision string
Chunked bool Chunked bool
ChunkSize int ChunkSize int
@ -424,6 +440,19 @@ func NewQuery(command, database, precision string) Query {
} }
} }
// NewQueryWithRP returns a query object.
// The database, retention policy, and precision arguments can be empty strings if they are not needed
// for the query. Setting the retention policy only works on InfluxDB versions 1.6 or greater.
func NewQueryWithRP(command, database, retentionPolicy, precision string) Query {
return Query{
Command: command,
Database: database,
RetentionPolicy: retentionPolicy,
Precision: precision,
Parameters: make(map[string]interface{}),
}
}
// NewQueryWithParameters returns a query object. // NewQueryWithParameters returns a query object.
// The database and precision arguments can be empty strings if they are not needed for the query. // The database and precision arguments can be empty strings if they are not needed for the query.
// parameters is a map of the parameter names used in the command to their values. // parameters is a map of the parameter names used in the command to their values.
@ -446,11 +475,11 @@ type Response struct {
// It returns nil if no errors occurred on any statements. // It returns nil if no errors occurred on any statements.
func (r *Response) Error() error { func (r *Response) Error() error {
if r.Err != "" { if r.Err != "" {
return fmt.Errorf(r.Err) return errors.New(r.Err)
} }
for _, result := range r.Results { for _, result := range r.Results {
if result.Err != "" { if result.Err != "" {
return fmt.Errorf(result.Err) return errors.New(result.Err)
} }
} }
return nil return nil
@ -471,55 +500,37 @@ type Result struct {
// Query sends a command to the server and returns the Response. // Query sends a command to the server and returns the Response.
func (c *client) Query(q Query) (*Response, error) { func (c *client) Query(q Query) (*Response, error) {
u := c.url req, err := c.createDefaultRequest(q)
u.Path = "query"
jsonParameters, err := json.Marshal(q.Parameters)
if err != nil { if err != nil {
return nil, err return nil, err
} }
req, err := http.NewRequest("POST", u.String(), nil)
if err != nil {
return nil, err
}
req.Header.Set("Content-Type", "")
req.Header.Set("User-Agent", c.useragent)
if c.username != "" {
req.SetBasicAuth(c.username, c.password)
}
params := req.URL.Query() params := req.URL.Query()
params.Set("q", q.Command)
params.Set("db", q.Database)
params.Set("params", string(jsonParameters))
if q.Chunked { if q.Chunked {
params.Set("chunked", "true") params.Set("chunked", "true")
if q.ChunkSize > 0 { if q.ChunkSize > 0 {
params.Set("chunk_size", strconv.Itoa(q.ChunkSize)) params.Set("chunk_size", strconv.Itoa(q.ChunkSize))
} }
}
if q.Precision != "" {
params.Set("epoch", q.Precision)
}
req.URL.RawQuery = params.Encode() req.URL.RawQuery = params.Encode()
}
resp, err := c.httpClient.Do(req) resp, err := c.httpClient.Do(req)
if err != nil { if err != nil {
return nil, err return nil, err
} }
defer resp.Body.Close() defer resp.Body.Close()
if err := checkResponse(resp); err != nil {
return nil, err
}
var response Response var response Response
if q.Chunked { if q.Chunked {
cr := NewChunkedResponse(resp.Body) cr := NewChunkedResponse(resp.Body)
for { for {
r, err := cr.NextResponse() r, err := cr.NextResponse()
if err != nil { if err != nil {
if err == io.EOF {
break
}
// If we got an error while decoding the response, send that back. // If we got an error while decoding the response, send that back.
return nil, err return nil, err
} }
@ -548,19 +559,108 @@ func (c *client) Query(q Query) (*Response, error) {
return nil, fmt.Errorf("unable to decode json: received status code %d err: %s", resp.StatusCode, decErr) return nil, fmt.Errorf("unable to decode json: received status code %d err: %s", resp.StatusCode, decErr)
} }
} }
// If we don't have an error in our json response, and didn't get statusOK // If we don't have an error in our json response, and didn't get statusOK
// then send back an error // then send back an error
if resp.StatusCode != http.StatusOK && response.Error() == nil { if resp.StatusCode != http.StatusOK && response.Error() == nil {
return &response, fmt.Errorf("received status code %d from server", return &response, fmt.Errorf("received status code %d from server", resp.StatusCode)
resp.StatusCode)
} }
return &response, nil return &response, nil
} }
// QueryAsChunk sends a command to the server and returns the Response.
func (c *client) QueryAsChunk(q Query) (*ChunkedResponse, error) {
req, err := c.createDefaultRequest(q)
if err != nil {
return nil, err
}
params := req.URL.Query()
params.Set("chunked", "true")
if q.ChunkSize > 0 {
params.Set("chunk_size", strconv.Itoa(q.ChunkSize))
}
req.URL.RawQuery = params.Encode()
resp, err := c.httpClient.Do(req)
if err != nil {
return nil, err
}
if err := checkResponse(resp); err != nil {
return nil, err
}
return NewChunkedResponse(resp.Body), nil
}
func checkResponse(resp *http.Response) error {
// If we lack a X-Influxdb-Version header, then we didn't get a response from influxdb
// but instead some other service. If the error code is also a 500+ code, then some
// downstream loadbalancer/proxy/etc had an issue and we should report that.
if resp.Header.Get("X-Influxdb-Version") == "" && resp.StatusCode >= http.StatusInternalServerError {
body, err := ioutil.ReadAll(resp.Body)
if err != nil || len(body) == 0 {
return fmt.Errorf("received status code %d from downstream server", resp.StatusCode)
}
return fmt.Errorf("received status code %d from downstream server, with response body: %q", resp.StatusCode, body)
}
// If we get an unexpected content type, then it is also not from influx direct and therefore
// we want to know what we received and what status code was returned for debugging purposes.
if cType, _, _ := mime.ParseMediaType(resp.Header.Get("Content-Type")); cType != "application/json" {
// Read up to 1kb of the body to help identify downstream errors and limit the impact of things
// like downstream serving a large file
body, err := ioutil.ReadAll(io.LimitReader(resp.Body, 1024))
if err != nil || len(body) == 0 {
return fmt.Errorf("expected json response, got empty body, with status: %v", resp.StatusCode)
}
return fmt.Errorf("expected json response, got %q, with status: %v and response body: %q", cType, resp.StatusCode, body)
}
return nil
}
func (c *client) createDefaultRequest(q Query) (*http.Request, error) {
u := c.url
u.Path = path.Join(u.Path, "query")
jsonParameters, err := json.Marshal(q.Parameters)
if err != nil {
return nil, err
}
req, err := http.NewRequest("POST", u.String(), nil)
if err != nil {
return nil, err
}
req.Header.Set("Content-Type", "")
req.Header.Set("User-Agent", c.useragent)
if c.username != "" {
req.SetBasicAuth(c.username, c.password)
}
params := req.URL.Query()
params.Set("q", q.Command)
params.Set("db", q.Database)
if q.RetentionPolicy != "" {
params.Set("rp", q.RetentionPolicy)
}
params.Set("params", string(jsonParameters))
if q.Precision != "" {
params.Set("epoch", q.Precision)
}
req.URL.RawQuery = params.Encode()
return req, nil
}
// duplexReader reads responses and writes it to another writer while // duplexReader reads responses and writes it to another writer while
// satisfying the reader interface. // satisfying the reader interface.
type duplexReader struct { type duplexReader struct {
r io.Reader r io.ReadCloser
w io.Writer w io.Writer
} }
@ -572,6 +672,11 @@ func (r *duplexReader) Read(p []byte) (n int, err error) {
return n, err return n, err
} }
// Close closes the response.
func (r *duplexReader) Close() error {
return r.r.Close()
}
// ChunkedResponse represents a response from the server that // ChunkedResponse represents a response from the server that
// uses chunking to stream the output. // uses chunking to stream the output.
type ChunkedResponse struct { type ChunkedResponse struct {
@ -582,8 +687,12 @@ type ChunkedResponse struct {
// NewChunkedResponse reads a stream and produces responses from the stream. // NewChunkedResponse reads a stream and produces responses from the stream.
func NewChunkedResponse(r io.Reader) *ChunkedResponse { func NewChunkedResponse(r io.Reader) *ChunkedResponse {
rc, ok := r.(io.ReadCloser)
if !ok {
rc = ioutil.NopCloser(r)
}
resp := &ChunkedResponse{} resp := &ChunkedResponse{}
resp.duplex = &duplexReader{r: r, w: &resp.buf} resp.duplex = &duplexReader{r: rc, w: &resp.buf}
resp.dec = json.NewDecoder(resp.duplex) resp.dec = json.NewDecoder(resp.duplex)
resp.dec.UseNumber() resp.dec.UseNumber()
return resp return resp
@ -592,10 +701,9 @@ func NewChunkedResponse(r io.Reader) *ChunkedResponse {
// NextResponse reads the next line of the stream and returns a response. // NextResponse reads the next line of the stream and returns a response.
func (r *ChunkedResponse) NextResponse() (*Response, error) { func (r *ChunkedResponse) NextResponse() (*Response, error) {
var response Response var response Response
if err := r.dec.Decode(&response); err != nil { if err := r.dec.Decode(&response); err != nil {
if err == io.EOF { if err == io.EOF {
return nil, nil return nil, err
} }
// A decoding error happened. This probably means the server crashed // A decoding error happened. This probably means the server crashed
// and sent a last-ditch error message to us. Ensure we have read the // and sent a last-ditch error message to us. Ensure we have read the
@ -607,3 +715,8 @@ func (r *ChunkedResponse) NextResponse() (*Response, error) {
r.buf.Reset() r.buf.Reset()
return &response, nil return &response, nil
} }
// Close closes the response.
func (r *ChunkedResponse) Close() error {
return r.duplex.Close()
}

View file

@ -107,6 +107,10 @@ func (uc *udpclient) Query(q Query) (*Response, error) {
return nil, fmt.Errorf("Querying via UDP is not supported") return nil, fmt.Errorf("Querying via UDP is not supported")
} }
func (uc *udpclient) QueryAsChunk(q Query) (*ChunkedResponse, error) {
return nil, fmt.Errorf("Querying via UDP is not supported")
}
func (uc *udpclient) Ping(timeout time.Duration) (time.Duration, string, error) { func (uc *udpclient) Ping(timeout time.Duration) (time.Duration, string, error) {
return 0, "", nil return 0, "", nil
} }

View file

@ -0,0 +1,29 @@
// Copyright 2019 The Prometheus Authors
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
// +build go1.12
package prometheus
import "runtime/debug"
// readBuildInfo is a wrapper around debug.ReadBuildInfo for Go 1.12+.
func readBuildInfo() (path, version, sum string) {
path, version, sum = "unknown", "unknown", "unknown"
if bi, ok := debug.ReadBuildInfo(); ok {
path = bi.Main.Path
version = bi.Main.Version
sum = bi.Main.Sum
}
return
}

View file

@ -0,0 +1,22 @@
// Copyright 2019 The Prometheus Authors
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
// +build !go1.12
package prometheus
// readBuildInfo is a wrapper around debug.ReadBuildInfo for Go versions before
// 1.12. Remove this whole file once the minimum supported Go version is 1.12.
func readBuildInfo() (path, version, sum string) {
return "unknown", "unknown", "unknown"
}

View file

@ -29,27 +29,72 @@ type Collector interface {
// collected by this Collector to the provided channel and returns once // collected by this Collector to the provided channel and returns once
// the last descriptor has been sent. The sent descriptors fulfill the // the last descriptor has been sent. The sent descriptors fulfill the
// consistency and uniqueness requirements described in the Desc // consistency and uniqueness requirements described in the Desc
// documentation. (It is valid if one and the same Collector sends // documentation.
// duplicate descriptors. Those duplicates are simply ignored. However, //
// two different Collectors must not send duplicate descriptors.) This // It is valid if one and the same Collector sends duplicate
// method idempotently sends the same descriptors throughout the // descriptors. Those duplicates are simply ignored. However, two
// lifetime of the Collector. If a Collector encounters an error while // different Collectors must not send duplicate descriptors.
// executing this method, it must send an invalid descriptor (created //
// with NewInvalidDesc) to signal the error to the registry. // Sending no descriptor at all marks the Collector as “unchecked”,
// i.e. no checks will be performed at registration time, and the
// Collector may yield any Metric it sees fit in its Collect method.
//
// This method idempotently sends the same descriptors throughout the
// lifetime of the Collector. It may be called concurrently and
// therefore must be implemented in a concurrency safe way.
//
// If a Collector encounters an error while executing this method, it
// must send an invalid descriptor (created with NewInvalidDesc) to
// signal the error to the registry.
Describe(chan<- *Desc) Describe(chan<- *Desc)
// Collect is called by the Prometheus registry when collecting // Collect is called by the Prometheus registry when collecting
// metrics. The implementation sends each collected metric via the // metrics. The implementation sends each collected metric via the
// provided channel and returns once the last metric has been sent. The // provided channel and returns once the last metric has been sent. The
// descriptor of each sent metric is one of those returned by // descriptor of each sent metric is one of those returned by Describe
// Describe. Returned metrics that share the same descriptor must differ // (unless the Collector is unchecked, see above). Returned metrics that
// in their variable label values. This method may be called // share the same descriptor must differ in their variable label
// concurrently and must therefore be implemented in a concurrency safe // values.
// way. Blocking occurs at the expense of total performance of rendering //
// all registered metrics. Ideally, Collector implementations support // This method may be called concurrently and must therefore be
// concurrent readers. // implemented in a concurrency safe way. Blocking occurs at the expense
// of total performance of rendering all registered metrics. Ideally,
// Collector implementations support concurrent readers.
Collect(chan<- Metric) Collect(chan<- Metric)
} }
// DescribeByCollect is a helper to implement the Describe method of a custom
// Collector. It collects the metrics from the provided Collector and sends
// their descriptors to the provided channel.
//
// If a Collector collects the same metrics throughout its lifetime, its
// Describe method can simply be implemented as:
//
// func (c customCollector) Describe(ch chan<- *Desc) {
// DescribeByCollect(c, ch)
// }
//
// However, this will not work if the metrics collected change dynamically over
// the lifetime of the Collector in a way that their combined set of descriptors
// changes as well. The shortcut implementation will then violate the contract
// of the Describe method. If a Collector sometimes collects no metrics at all
// (for example vectors like CounterVec, GaugeVec, etc., which only collect
// metrics after a metric with a fully specified label set has been accessed),
// it might even get registered as an unchecked Collector (cf. the Register
// method of the Registerer interface). Hence, only use this shortcut
// implementation of Describe if you are certain to fulfill the contract.
//
// The Collector example demonstrates a use of DescribeByCollect.
func DescribeByCollect(c Collector, descs chan<- *Desc) {
metrics := make(chan Metric)
go func() {
c.Collect(metrics)
close(metrics)
}()
for m := range metrics {
descs <- m.Desc()
}
}
// selfCollector implements Collector for a single Metric so that the Metric // selfCollector implements Collector for a single Metric so that the Metric
// collects itself. Add it as an anonymous field to a struct that implements // collects itself. Add it as an anonymous field to a struct that implements
// Metric, and call init with the Metric itself as an argument. // Metric, and call init with the Metric itself as an argument.

View file

@ -15,6 +15,10 @@ package prometheus
import ( import (
"errors" "errors"
"math"
"sync/atomic"
dto "github.com/prometheus/client_model/go"
) )
// Counter is a Metric that represents a single numerical value that only ever // Counter is a Metric that represents a single numerical value that only ever
@ -42,6 +46,14 @@ type Counter interface {
type CounterOpts Opts type CounterOpts Opts
// NewCounter creates a new Counter based on the provided CounterOpts. // NewCounter creates a new Counter based on the provided CounterOpts.
//
// The returned implementation tracks the counter value in two separate
// variables, a float64 and a uint64. The latter is used to track calls of the
// Inc method and calls of the Add method with a value that can be represented
// as a uint64. This allows atomic increments of the counter with optimal
// performance. (It is common to have an Inc call in very hot execution paths.)
// Both internal tracking values are added up in the Write method. This has to
// be taken into account when it comes to precision and overflow behavior.
func NewCounter(opts CounterOpts) Counter { func NewCounter(opts CounterOpts) Counter {
desc := NewDesc( desc := NewDesc(
BuildFQName(opts.Namespace, opts.Subsystem, opts.Name), BuildFQName(opts.Namespace, opts.Subsystem, opts.Name),
@ -49,20 +61,58 @@ func NewCounter(opts CounterOpts) Counter {
nil, nil,
opts.ConstLabels, opts.ConstLabels,
) )
result := &counter{value: value{desc: desc, valType: CounterValue, labelPairs: desc.constLabelPairs}} result := &counter{desc: desc, labelPairs: desc.constLabelPairs}
result.init(result) // Init self-collection. result.init(result) // Init self-collection.
return result return result
} }
type counter struct { type counter struct {
value // valBits contains the bits of the represented float64 value, while
// valInt stores values that are exact integers. Both have to go first
// in the struct to guarantee alignment for atomic operations.
// http://golang.org/pkg/sync/atomic/#pkg-note-BUG
valBits uint64
valInt uint64
selfCollector
desc *Desc
labelPairs []*dto.LabelPair
}
func (c *counter) Desc() *Desc {
return c.desc
} }
func (c *counter) Add(v float64) { func (c *counter) Add(v float64) {
if v < 0 { if v < 0 {
panic(errors.New("counter cannot decrease in value")) panic(errors.New("counter cannot decrease in value"))
} }
c.value.Add(v) ival := uint64(v)
if float64(ival) == v {
atomic.AddUint64(&c.valInt, ival)
return
}
for {
oldBits := atomic.LoadUint64(&c.valBits)
newBits := math.Float64bits(math.Float64frombits(oldBits) + v)
if atomic.CompareAndSwapUint64(&c.valBits, oldBits, newBits) {
return
}
}
}
func (c *counter) Inc() {
atomic.AddUint64(&c.valInt, 1)
}
func (c *counter) Write(out *dto.Metric) error {
fval := math.Float64frombits(atomic.LoadUint64(&c.valBits))
ival := atomic.LoadUint64(&c.valInt)
val := fval + float64(ival)
return populateMetric(CounterValue, val, c.labelPairs, out)
} }
// CounterVec is a Collector that bundles a set of Counters that all share the // CounterVec is a Collector that bundles a set of Counters that all share the
@ -70,16 +120,12 @@ func (c *counter) Add(v float64) {
// if you want to count the same thing partitioned by various dimensions // if you want to count the same thing partitioned by various dimensions
// (e.g. number of HTTP requests, partitioned by response code and // (e.g. number of HTTP requests, partitioned by response code and
// method). Create instances with NewCounterVec. // method). Create instances with NewCounterVec.
//
// CounterVec embeds MetricVec. See there for a full list of methods with
// detailed documentation.
type CounterVec struct { type CounterVec struct {
*MetricVec *metricVec
} }
// NewCounterVec creates a new CounterVec based on the provided CounterOpts and // NewCounterVec creates a new CounterVec based on the provided CounterOpts and
// partitioned by the given label names. At least one label name must be // partitioned by the given label names.
// provided.
func NewCounterVec(opts CounterOpts, labelNames []string) *CounterVec { func NewCounterVec(opts CounterOpts, labelNames []string) *CounterVec {
desc := NewDesc( desc := NewDesc(
BuildFQName(opts.Namespace, opts.Subsystem, opts.Name), BuildFQName(opts.Namespace, opts.Subsystem, opts.Name),
@ -88,34 +134,62 @@ func NewCounterVec(opts CounterOpts, labelNames []string) *CounterVec {
opts.ConstLabels, opts.ConstLabels,
) )
return &CounterVec{ return &CounterVec{
MetricVec: newMetricVec(desc, func(lvs ...string) Metric { metricVec: newMetricVec(desc, func(lvs ...string) Metric {
result := &counter{value: value{ if len(lvs) != len(desc.variableLabels) {
desc: desc, panic(makeInconsistentCardinalityError(desc.fqName, desc.variableLabels, lvs))
valType: CounterValue, }
labelPairs: makeLabelPairs(desc, lvs), result := &counter{desc: desc, labelPairs: makeLabelPairs(desc, lvs)}
}}
result.init(result) // Init self-collection. result.init(result) // Init self-collection.
return result return result
}), }),
} }
} }
// GetMetricWithLabelValues replaces the method of the same name in // GetMetricWithLabelValues returns the Counter for the given slice of label
// MetricVec. The difference is that this method returns a Counter and not a // values (same order as the VariableLabels in Desc). If that combination of
// Metric so that no type conversion is required. // label values is accessed for the first time, a new Counter is created.
func (m *CounterVec) GetMetricWithLabelValues(lvs ...string) (Counter, error) { //
metric, err := m.MetricVec.GetMetricWithLabelValues(lvs...) // It is possible to call this method without using the returned Counter to only
// create the new Counter but leave it at its starting value 0. See also the
// SummaryVec example.
//
// Keeping the Counter for later use is possible (and should be considered if
// performance is critical), but keep in mind that Reset, DeleteLabelValues and
// Delete can be used to delete the Counter from the CounterVec. In that case,
// the Counter will still exist, but it will not be exported anymore, even if a
// Counter with the same label values is created later.
//
// An error is returned if the number of label values is not the same as the
// number of VariableLabels in Desc (minus any curried labels).
//
// Note that for more than one label value, this method is prone to mistakes
// caused by an incorrect order of arguments. Consider GetMetricWith(Labels) as
// an alternative to avoid that type of mistake. For higher label numbers, the
// latter has a much more readable (albeit more verbose) syntax, but it comes
// with a performance overhead (for creating and processing the Labels map).
// See also the GaugeVec example.
func (v *CounterVec) GetMetricWithLabelValues(lvs ...string) (Counter, error) {
metric, err := v.metricVec.getMetricWithLabelValues(lvs...)
if metric != nil { if metric != nil {
return metric.(Counter), err return metric.(Counter), err
} }
return nil, err return nil, err
} }
// GetMetricWith replaces the method of the same name in MetricVec. The // GetMetricWith returns the Counter for the given Labels map (the label names
// difference is that this method returns a Counter and not a Metric so that no // must match those of the VariableLabels in Desc). If that label map is
// type conversion is required. // accessed for the first time, a new Counter is created. Implications of
func (m *CounterVec) GetMetricWith(labels Labels) (Counter, error) { // creating a Counter without using it and keeping the Counter for later use are
metric, err := m.MetricVec.GetMetricWith(labels) // the same as for GetMetricWithLabelValues.
//
// An error is returned if the number and names of the Labels are inconsistent
// with those of the VariableLabels in Desc (minus any curried labels).
//
// This method is used for the same purpose as
// GetMetricWithLabelValues(...string). See there for pros and cons of the two
// methods.
func (v *CounterVec) GetMetricWith(labels Labels) (Counter, error) {
metric, err := v.metricVec.getMetricWith(labels)
if metric != nil { if metric != nil {
return metric.(Counter), err return metric.(Counter), err
} }
@ -123,18 +197,57 @@ func (m *CounterVec) GetMetricWith(labels Labels) (Counter, error) {
} }
// WithLabelValues works as GetMetricWithLabelValues, but panics where // WithLabelValues works as GetMetricWithLabelValues, but panics where
// GetMetricWithLabelValues would have returned an error. By not returning an // GetMetricWithLabelValues would have returned an error. Not returning an
// error, WithLabelValues allows shortcuts like // error allows shortcuts like
// myVec.WithLabelValues("404", "GET").Add(42) // myVec.WithLabelValues("404", "GET").Add(42)
func (m *CounterVec) WithLabelValues(lvs ...string) Counter { func (v *CounterVec) WithLabelValues(lvs ...string) Counter {
return m.MetricVec.WithLabelValues(lvs...).(Counter) c, err := v.GetMetricWithLabelValues(lvs...)
if err != nil {
panic(err)
}
return c
} }
// With works as GetMetricWith, but panics where GetMetricWithLabels would have // With works as GetMetricWith, but panics where GetMetricWithLabels would have
// returned an error. By not returning an error, With allows shortcuts like // returned an error. Not returning an error allows shortcuts like
// myVec.With(Labels{"code": "404", "method": "GET"}).Add(42) // myVec.With(prometheus.Labels{"code": "404", "method": "GET"}).Add(42)
func (m *CounterVec) With(labels Labels) Counter { func (v *CounterVec) With(labels Labels) Counter {
return m.MetricVec.With(labels).(Counter) c, err := v.GetMetricWith(labels)
if err != nil {
panic(err)
}
return c
}
// CurryWith returns a vector curried with the provided labels, i.e. the
// returned vector has those labels pre-set for all labeled operations performed
// on it. The cardinality of the curried vector is reduced accordingly. The
// order of the remaining labels stays the same (just with the curried labels
// taken out of the sequence which is relevant for the
// (GetMetric)WithLabelValues methods). It is possible to curry a curried
// vector, but only with labels not yet used for currying before.
//
// The metrics contained in the CounterVec are shared between the curried and
// uncurried vectors. They are just accessed differently. Curried and uncurried
// vectors behave identically in terms of collection. Only one must be
// registered with a given registry (usually the uncurried version). The Reset
// method deletes all metrics, even if called on a curried vector.
func (v *CounterVec) CurryWith(labels Labels) (*CounterVec, error) {
vec, err := v.curryWith(labels)
if vec != nil {
return &CounterVec{vec}, err
}
return nil, err
}
// MustCurryWith works as CurryWith but panics where CurryWith would have
// returned an error.
func (v *CounterVec) MustCurryWith(labels Labels) *CounterVec {
vec, err := v.CurryWith(labels)
if err != nil {
panic(err)
}
return vec
} }
// CounterFunc is a Counter whose value is determined at collect time by calling a // CounterFunc is a Counter whose value is determined at collect time by calling a

View file

@ -25,19 +25,6 @@ import (
dto "github.com/prometheus/client_model/go" dto "github.com/prometheus/client_model/go"
) )
// reservedLabelPrefix is a prefix which is not legal in user-supplied
// label names.
const reservedLabelPrefix = "__"
// Labels represents a collection of label name -> value mappings. This type is
// commonly used with the With(Labels) and GetMetricWith(Labels) methods of
// metric vector Collectors, e.g.:
// myVec.With(Labels{"code": "404", "method": "GET"}).Add(42)
//
// The other use-case is the specification of constant label pairs in Opts or to
// create a Desc.
type Labels map[string]string
// Desc is the descriptor used by every Prometheus Metric. It is essentially // Desc is the descriptor used by every Prometheus Metric. It is essentially
// the immutable meta-data of a Metric. The normal Metric implementations // the immutable meta-data of a Metric. The normal Metric implementations
// included in this package manage their Desc under the hood. Users only have to // included in this package manage their Desc under the hood. Users only have to
@ -80,24 +67,19 @@ type Desc struct {
// NewDesc allocates and initializes a new Desc. Errors are recorded in the Desc // NewDesc allocates and initializes a new Desc. Errors are recorded in the Desc
// and will be reported on registration time. variableLabels and constLabels can // and will be reported on registration time. variableLabels and constLabels can
// be nil if no such labels should be set. fqName and help must not be empty. // be nil if no such labels should be set. fqName must not be empty.
// //
// variableLabels only contain the label names. Their label values are variable // variableLabels only contain the label names. Their label values are variable
// and therefore not part of the Desc. (They are managed within the Metric.) // and therefore not part of the Desc. (They are managed within the Metric.)
// //
// For constLabels, the label values are constant. Therefore, they are fully // For constLabels, the label values are constant. Therefore, they are fully
// specified in the Desc. See the Opts documentation for the implications of // specified in the Desc. See the Collector example for a usage pattern.
// constant labels.
func NewDesc(fqName, help string, variableLabels []string, constLabels Labels) *Desc { func NewDesc(fqName, help string, variableLabels []string, constLabels Labels) *Desc {
d := &Desc{ d := &Desc{
fqName: fqName, fqName: fqName,
help: help, help: help,
variableLabels: variableLabels, variableLabels: variableLabels,
} }
if help == "" {
d.err = errors.New("empty help string")
return d
}
if !model.IsValidMetricName(model.LabelValue(fqName)) { if !model.IsValidMetricName(model.LabelValue(fqName)) {
d.err = fmt.Errorf("%q is not a valid metric name", fqName) d.err = fmt.Errorf("%q is not a valid metric name", fqName)
return d return d
@ -111,7 +93,7 @@ func NewDesc(fqName, help string, variableLabels []string, constLabels Labels) *
// First add only the const label names and sort them... // First add only the const label names and sort them...
for labelName := range constLabels { for labelName := range constLabels {
if !checkLabelName(labelName) { if !checkLabelName(labelName) {
d.err = fmt.Errorf("%q is not a valid label name", labelName) d.err = fmt.Errorf("%q is not a valid label name for metric %q", labelName, fqName)
return d return d
} }
labelNames = append(labelNames, labelName) labelNames = append(labelNames, labelName)
@ -122,12 +104,18 @@ func NewDesc(fqName, help string, variableLabels []string, constLabels Labels) *
for _, labelName := range labelNames { for _, labelName := range labelNames {
labelValues = append(labelValues, constLabels[labelName]) labelValues = append(labelValues, constLabels[labelName])
} }
// Validate the const label values. They can't have a wrong cardinality, so
// use in len(labelValues) as expectedNumberOfValues.
if err := validateLabelValues(labelValues, len(labelValues)); err != nil {
d.err = err
return d
}
// Now add the variable label names, but prefix them with something that // Now add the variable label names, but prefix them with something that
// cannot be in a regular label name. That prevents matching the label // cannot be in a regular label name. That prevents matching the label
// dimension with a different mix between preset and variable labels. // dimension with a different mix between preset and variable labels.
for _, labelName := range variableLabels { for _, labelName := range variableLabels {
if !checkLabelName(labelName) { if !checkLabelName(labelName) {
d.err = fmt.Errorf("%q is not a valid label name", labelName) d.err = fmt.Errorf("%q is not a valid label name for metric %q", labelName, fqName)
return d return d
} }
labelNames = append(labelNames, "$"+labelName) labelNames = append(labelNames, "$"+labelName)
@ -137,6 +125,7 @@ func NewDesc(fqName, help string, variableLabels []string, constLabels Labels) *
d.err = errors.New("duplicate label names") d.err = errors.New("duplicate label names")
return d return d
} }
vh := hashNew() vh := hashNew()
for _, val := range labelValues { for _, val := range labelValues {
vh = hashAdd(vh, val) vh = hashAdd(vh, val)
@ -163,7 +152,7 @@ func NewDesc(fqName, help string, variableLabels []string, constLabels Labels) *
Value: proto.String(v), Value: proto.String(v),
}) })
} }
sort.Sort(LabelPairSorter(d.constLabelPairs)) sort.Sort(labelPairSorter(d.constLabelPairs))
return d return d
} }
@ -193,8 +182,3 @@ func (d *Desc) String() string {
d.variableLabels, d.variableLabels,
) )
} }
func checkLabelName(l string) bool {
return model.LabelName(l).IsValid() &&
!strings.HasPrefix(l, reservedLabelPrefix)
}

View file

@ -11,10 +11,12 @@
// See the License for the specific language governing permissions and // See the License for the specific language governing permissions and
// limitations under the License. // limitations under the License.
// Package prometheus provides metrics primitives to instrument code for // Package prometheus is the core instrumentation package. It provides metrics
// monitoring. It also offers a registry for metrics. Sub-packages allow to // primitives to instrument code for monitoring. It also offers a registry for
// expose the registered metrics via HTTP (package promhttp) or push them to a // metrics. Sub-packages allow to expose the registered metrics via HTTP
// Pushgateway (package push). // (package promhttp) or push them to a Pushgateway (package push). There is
// also a sub-package promauto, which provides metrics constructors with
// automatic registration.
// //
// All exported functions and methods are safe to be used concurrently unless // All exported functions and methods are safe to be used concurrently unless
// specified otherwise. // specified otherwise.
@ -26,6 +28,7 @@
// package main // package main
// //
// import ( // import (
// "log"
// "net/http" // "net/http"
// //
// "github.com/prometheus/client_golang/prometheus" // "github.com/prometheus/client_golang/prometheus"
@ -71,7 +74,10 @@
// The number of exported identifiers in this package might appear a bit // The number of exported identifiers in this package might appear a bit
// overwhelming. However, in addition to the basic plumbing shown in the example // overwhelming. However, in addition to the basic plumbing shown in the example
// above, you only need to understand the different metric types and their // above, you only need to understand the different metric types and their
// vector versions for basic usage. // vector versions for basic usage. Furthermore, if you are not concerned with
// fine-grained control of when and how to register metrics with the registry,
// have a look at the promauto package, which will effectively allow you to
// ignore registration altogether in simple cases.
// //
// Above, you have already touched the Counter and the Gauge. There are two more // Above, you have already touched the Counter and the Gauge. There are two more
// advanced metric types: the Summary and Histogram. A more thorough description // advanced metric types: the Summary and Histogram. A more thorough description
@ -115,7 +121,17 @@
// NewConstSummary (and their respective Must… versions). That will happen in // NewConstSummary (and their respective Must… versions). That will happen in
// the Collect method. The Describe method has to return separate Desc // the Collect method. The Describe method has to return separate Desc
// instances, representative of the “throw-away” metrics to be created later. // instances, representative of the “throw-away” metrics to be created later.
// NewDesc comes in handy to create those Desc instances. // NewDesc comes in handy to create those Desc instances. Alternatively, you
// could return no Desc at all, which will mark the Collector “unchecked”. No
// checks are performed at registration time, but metric consistency will still
// be ensured at scrape time, i.e. any inconsistencies will lead to scrape
// errors. Thus, with unchecked Collectors, the responsibility to not collect
// metrics that lead to inconsistencies in the total scrape result lies with the
// implementer of the Collector. While this is not a desirable state, it is
// sometimes necessary. The typical use case is a situation where the exact
// metrics to be returned by a Collector cannot be predicted at registration
// time, but the implementer has sufficient knowledge of the whole system to
// guarantee metric consistency.
// //
// The Collector example illustrates the use case. You can also look at the // The Collector example illustrates the use case. You can also look at the
// source code of the processCollector (mirroring process metrics), the // source code of the processCollector (mirroring process metrics), the
@ -144,7 +160,7 @@
// registry. // registry.
// //
// So far, everything we did operated on the so-called default registry, as it // So far, everything we did operated on the so-called default registry, as it
// can be found in the global DefaultRegistry variable. With NewRegistry, you // can be found in the global DefaultRegisterer variable. With NewRegistry, you
// can create a custom registry, or you can even implement the Registerer or // can create a custom registry, or you can even implement the Registerer or
// Gatherer interfaces yourself. The methods Register and Unregister work in the // Gatherer interfaces yourself. The methods Register and Unregister work in the
// same way on a custom registry as the global functions Register and Unregister // same way on a custom registry as the global functions Register and Unregister
@ -152,11 +168,11 @@
// //
// There are a number of uses for custom registries: You can use registries with // There are a number of uses for custom registries: You can use registries with
// special properties, see NewPedanticRegistry. You can avoid global state, as // special properties, see NewPedanticRegistry. You can avoid global state, as
// it is imposed by the DefaultRegistry. You can use multiple registries at the // it is imposed by the DefaultRegisterer. You can use multiple registries at
// same time to expose different metrics in different ways. You can use separate // the same time to expose different metrics in different ways. You can use
// registries for testing purposes. // separate registries for testing purposes.
// //
// Also note that the DefaultRegistry comes registered with a Collector for Go // Also note that the DefaultRegisterer comes registered with a Collector for Go
// runtime metrics (via NewGoCollector) and a Collector for process metrics (via // runtime metrics (via NewGoCollector) and a Collector for process metrics (via
// NewProcessCollector). With a custom registry, you are in control and decide // NewProcessCollector). With a custom registry, you are in control and decide
// yourself about the Collectors to register. // yourself about the Collectors to register.
@ -167,7 +183,6 @@
// method can then expose the gathered metrics in some way. Usually, the metrics // method can then expose the gathered metrics in some way. Usually, the metrics
// are served via HTTP on the /metrics endpoint. That's happening in the example // are served via HTTP on the /metrics endpoint. That's happening in the example
// above. The tools to expose metrics via HTTP are in the promhttp sub-package. // above. The tools to expose metrics via HTTP are in the promhttp sub-package.
// (The top-level functions in the prometheus package are deprecated.)
// //
// Pushing to the Pushgateway // Pushing to the Pushgateway
// //

View file

@ -1,3 +1,16 @@
// Copyright 2018 The Prometheus Authors
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package prometheus package prometheus
// Inline and byte-free variant of hash/fnv's fnv64a. // Inline and byte-free variant of hash/fnv's fnv64a.

View file

@ -13,6 +13,14 @@
package prometheus package prometheus
import (
"math"
"sync/atomic"
"time"
dto "github.com/prometheus/client_model/go"
)
// Gauge is a Metric that represents a single numerical value that can // Gauge is a Metric that represents a single numerical value that can
// arbitrarily go up and down. // arbitrarily go up and down.
// //
@ -48,13 +56,74 @@ type Gauge interface {
type GaugeOpts Opts type GaugeOpts Opts
// NewGauge creates a new Gauge based on the provided GaugeOpts. // NewGauge creates a new Gauge based on the provided GaugeOpts.
//
// The returned implementation is optimized for a fast Set method. If you have a
// choice for managing the value of a Gauge via Set vs. Inc/Dec/Add/Sub, pick
// the former. For example, the Inc method of the returned Gauge is slower than
// the Inc method of a Counter returned by NewCounter. This matches the typical
// scenarios for Gauges and Counters, where the former tends to be Set-heavy and
// the latter Inc-heavy.
func NewGauge(opts GaugeOpts) Gauge { func NewGauge(opts GaugeOpts) Gauge {
return newValue(NewDesc( desc := NewDesc(
BuildFQName(opts.Namespace, opts.Subsystem, opts.Name), BuildFQName(opts.Namespace, opts.Subsystem, opts.Name),
opts.Help, opts.Help,
nil, nil,
opts.ConstLabels, opts.ConstLabels,
), GaugeValue, 0) )
result := &gauge{desc: desc, labelPairs: desc.constLabelPairs}
result.init(result) // Init self-collection.
return result
}
type gauge struct {
// valBits contains the bits of the represented float64 value. It has
// to go first in the struct to guarantee alignment for atomic
// operations. http://golang.org/pkg/sync/atomic/#pkg-note-BUG
valBits uint64
selfCollector
desc *Desc
labelPairs []*dto.LabelPair
}
func (g *gauge) Desc() *Desc {
return g.desc
}
func (g *gauge) Set(val float64) {
atomic.StoreUint64(&g.valBits, math.Float64bits(val))
}
func (g *gauge) SetToCurrentTime() {
g.Set(float64(time.Now().UnixNano()) / 1e9)
}
func (g *gauge) Inc() {
g.Add(1)
}
func (g *gauge) Dec() {
g.Add(-1)
}
func (g *gauge) Add(val float64) {
for {
oldBits := atomic.LoadUint64(&g.valBits)
newBits := math.Float64bits(math.Float64frombits(oldBits) + val)
if atomic.CompareAndSwapUint64(&g.valBits, oldBits, newBits) {
return
}
}
}
func (g *gauge) Sub(val float64) {
g.Add(val * -1)
}
func (g *gauge) Write(out *dto.Metric) error {
val := math.Float64frombits(atomic.LoadUint64(&g.valBits))
return populateMetric(GaugeValue, val, g.labelPairs, out)
} }
// GaugeVec is a Collector that bundles a set of Gauges that all share the same // GaugeVec is a Collector that bundles a set of Gauges that all share the same
@ -63,12 +132,11 @@ func NewGauge(opts GaugeOpts) Gauge {
// (e.g. number of operations queued, partitioned by user and operation // (e.g. number of operations queued, partitioned by user and operation
// type). Create instances with NewGaugeVec. // type). Create instances with NewGaugeVec.
type GaugeVec struct { type GaugeVec struct {
*MetricVec *metricVec
} }
// NewGaugeVec creates a new GaugeVec based on the provided GaugeOpts and // NewGaugeVec creates a new GaugeVec based on the provided GaugeOpts and
// partitioned by the given label names. At least one label name must be // partitioned by the given label names.
// provided.
func NewGaugeVec(opts GaugeOpts, labelNames []string) *GaugeVec { func NewGaugeVec(opts GaugeOpts, labelNames []string) *GaugeVec {
desc := NewDesc( desc := NewDesc(
BuildFQName(opts.Namespace, opts.Subsystem, opts.Name), BuildFQName(opts.Namespace, opts.Subsystem, opts.Name),
@ -77,28 +145,62 @@ func NewGaugeVec(opts GaugeOpts, labelNames []string) *GaugeVec {
opts.ConstLabels, opts.ConstLabels,
) )
return &GaugeVec{ return &GaugeVec{
MetricVec: newMetricVec(desc, func(lvs ...string) Metric { metricVec: newMetricVec(desc, func(lvs ...string) Metric {
return newValue(desc, GaugeValue, 0, lvs...) if len(lvs) != len(desc.variableLabels) {
panic(makeInconsistentCardinalityError(desc.fqName, desc.variableLabels, lvs))
}
result := &gauge{desc: desc, labelPairs: makeLabelPairs(desc, lvs)}
result.init(result) // Init self-collection.
return result
}), }),
} }
} }
// GetMetricWithLabelValues replaces the method of the same name in // GetMetricWithLabelValues returns the Gauge for the given slice of label
// MetricVec. The difference is that this method returns a Gauge and not a // values (same order as the VariableLabels in Desc). If that combination of
// Metric so that no type conversion is required. // label values is accessed for the first time, a new Gauge is created.
func (m *GaugeVec) GetMetricWithLabelValues(lvs ...string) (Gauge, error) { //
metric, err := m.MetricVec.GetMetricWithLabelValues(lvs...) // It is possible to call this method without using the returned Gauge to only
// create the new Gauge but leave it at its starting value 0. See also the
// SummaryVec example.
//
// Keeping the Gauge for later use is possible (and should be considered if
// performance is critical), but keep in mind that Reset, DeleteLabelValues and
// Delete can be used to delete the Gauge from the GaugeVec. In that case, the
// Gauge will still exist, but it will not be exported anymore, even if a
// Gauge with the same label values is created later. See also the CounterVec
// example.
//
// An error is returned if the number of label values is not the same as the
// number of VariableLabels in Desc (minus any curried labels).
//
// Note that for more than one label value, this method is prone to mistakes
// caused by an incorrect order of arguments. Consider GetMetricWith(Labels) as
// an alternative to avoid that type of mistake. For higher label numbers, the
// latter has a much more readable (albeit more verbose) syntax, but it comes
// with a performance overhead (for creating and processing the Labels map).
func (v *GaugeVec) GetMetricWithLabelValues(lvs ...string) (Gauge, error) {
metric, err := v.metricVec.getMetricWithLabelValues(lvs...)
if metric != nil { if metric != nil {
return metric.(Gauge), err return metric.(Gauge), err
} }
return nil, err return nil, err
} }
// GetMetricWith replaces the method of the same name in MetricVec. The // GetMetricWith returns the Gauge for the given Labels map (the label names
// difference is that this method returns a Gauge and not a Metric so that no // must match those of the VariableLabels in Desc). If that label map is
// type conversion is required. // accessed for the first time, a new Gauge is created. Implications of
func (m *GaugeVec) GetMetricWith(labels Labels) (Gauge, error) { // creating a Gauge without using it and keeping the Gauge for later use are
metric, err := m.MetricVec.GetMetricWith(labels) // the same as for GetMetricWithLabelValues.
//
// An error is returned if the number and names of the Labels are inconsistent
// with those of the VariableLabels in Desc (minus any curried labels).
//
// This method is used for the same purpose as
// GetMetricWithLabelValues(...string). See there for pros and cons of the two
// methods.
func (v *GaugeVec) GetMetricWith(labels Labels) (Gauge, error) {
metric, err := v.metricVec.getMetricWith(labels)
if metric != nil { if metric != nil {
return metric.(Gauge), err return metric.(Gauge), err
} }
@ -106,18 +208,57 @@ func (m *GaugeVec) GetMetricWith(labels Labels) (Gauge, error) {
} }
// WithLabelValues works as GetMetricWithLabelValues, but panics where // WithLabelValues works as GetMetricWithLabelValues, but panics where
// GetMetricWithLabelValues would have returned an error. By not returning an // GetMetricWithLabelValues would have returned an error. Not returning an
// error, WithLabelValues allows shortcuts like // error allows shortcuts like
// myVec.WithLabelValues("404", "GET").Add(42) // myVec.WithLabelValues("404", "GET").Add(42)
func (m *GaugeVec) WithLabelValues(lvs ...string) Gauge { func (v *GaugeVec) WithLabelValues(lvs ...string) Gauge {
return m.MetricVec.WithLabelValues(lvs...).(Gauge) g, err := v.GetMetricWithLabelValues(lvs...)
if err != nil {
panic(err)
}
return g
} }
// With works as GetMetricWith, but panics where GetMetricWithLabels would have // With works as GetMetricWith, but panics where GetMetricWithLabels would have
// returned an error. By not returning an error, With allows shortcuts like // returned an error. Not returning an error allows shortcuts like
// myVec.With(Labels{"code": "404", "method": "GET"}).Add(42) // myVec.With(prometheus.Labels{"code": "404", "method": "GET"}).Add(42)
func (m *GaugeVec) With(labels Labels) Gauge { func (v *GaugeVec) With(labels Labels) Gauge {
return m.MetricVec.With(labels).(Gauge) g, err := v.GetMetricWith(labels)
if err != nil {
panic(err)
}
return g
}
// CurryWith returns a vector curried with the provided labels, i.e. the
// returned vector has those labels pre-set for all labeled operations performed
// on it. The cardinality of the curried vector is reduced accordingly. The
// order of the remaining labels stays the same (just with the curried labels
// taken out of the sequence which is relevant for the
// (GetMetric)WithLabelValues methods). It is possible to curry a curried
// vector, but only with labels not yet used for currying before.
//
// The metrics contained in the GaugeVec are shared between the curried and
// uncurried vectors. They are just accessed differently. Curried and uncurried
// vectors behave identically in terms of collection. Only one must be
// registered with a given registry (usually the uncurried version). The Reset
// method deletes all metrics, even if called on a curried vector.
func (v *GaugeVec) CurryWith(labels Labels) (*GaugeVec, error) {
vec, err := v.curryWith(labels)
if vec != nil {
return &GaugeVec{vec}, err
}
return nil, err
}
// MustCurryWith works as CurryWith but panics where CurryWith would have
// returned an error.
func (v *GaugeVec) MustCurryWith(labels Labels) *GaugeVec {
vec, err := v.CurryWith(labels)
if err != nil {
panic(err)
}
return vec
} }
// GaugeFunc is a Gauge whose value is determined at collect time by calling a // GaugeFunc is a Gauge whose value is determined at collect time by calling a

View file

@ -1,9 +1,22 @@
// Copyright 2018 The Prometheus Authors
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package prometheus package prometheus
import ( import (
"fmt"
"runtime" "runtime"
"runtime/debug" "runtime/debug"
"sync"
"time" "time"
) )
@ -11,13 +24,43 @@ type goCollector struct {
goroutinesDesc *Desc goroutinesDesc *Desc
threadsDesc *Desc threadsDesc *Desc
gcDesc *Desc gcDesc *Desc
goInfoDesc *Desc
// metrics to describe and collect // ms... are memstats related.
metrics memStatsMetrics msLast *runtime.MemStats // Previously collected memstats.
msLastTimestamp time.Time
msMtx sync.Mutex // Protects msLast and msLastTimestamp.
msMetrics memStatsMetrics
msRead func(*runtime.MemStats) // For mocking in tests.
msMaxWait time.Duration // Wait time for fresh memstats.
msMaxAge time.Duration // Maximum allowed age of old memstats.
} }
// NewGoCollector returns a collector which exports metrics about the current // NewGoCollector returns a collector that exports metrics about the current Go
// go process. // process. This includes memory stats. To collect those, runtime.ReadMemStats
// is called. This requires to “stop the world”, which usually only happens for
// garbage collection (GC). Take the following implications into account when
// deciding whether to use the Go collector:
//
// 1. The performance impact of stopping the world is the more relevant the more
// frequently metrics are collected. However, with Go1.9 or later the
// stop-the-world time per metrics collection is very short (~25µs) so that the
// performance impact will only matter in rare cases. However, with older Go
// versions, the stop-the-world duration depends on the heap size and can be
// quite significant (~1.7 ms/GiB as per
// https://go-review.googlesource.com/c/go/+/34937).
//
// 2. During an ongoing GC, nothing else can stop the world. Therefore, if the
// metrics collection happens to coincide with GC, it will only complete after
// GC has finished. Usually, GC is fast enough to not cause problems. However,
// with a very large heap, GC might take multiple seconds, which is enough to
// cause scrape timeouts in common setups. To avoid this problem, the Go
// collector will use the memstats from a previous collection if
// runtime.ReadMemStats takes more than 1s. However, if there are no previously
// collected memstats, or their collection is more than 5m ago, the collection
// will block until runtime.ReadMemStats succeeds. (The problem might be solved
// in Go1.13, see https://github.com/golang/go/issues/19812 for the related Go
// issue.)
func NewGoCollector() Collector { func NewGoCollector() Collector {
return &goCollector{ return &goCollector{
goroutinesDesc: NewDesc( goroutinesDesc: NewDesc(
@ -26,13 +69,21 @@ func NewGoCollector() Collector {
nil, nil), nil, nil),
threadsDesc: NewDesc( threadsDesc: NewDesc(
"go_threads", "go_threads",
"Number of OS threads created", "Number of OS threads created.",
nil, nil), nil, nil),
gcDesc: NewDesc( gcDesc: NewDesc(
"go_gc_duration_seconds", "go_gc_duration_seconds",
"A summary of the GC invocation durations.", "A summary of the GC invocation durations.",
nil, nil), nil, nil),
metrics: memStatsMetrics{ goInfoDesc: NewDesc(
"go_info",
"Information about the Go environment.",
nil, Labels{"version": runtime.Version()}),
msLast: &runtime.MemStats{},
msRead: runtime.ReadMemStats,
msMaxWait: time.Second,
msMaxAge: 5 * time.Minute,
msMetrics: memStatsMetrics{
{ {
desc: NewDesc( desc: NewDesc(
memstatNamespace("alloc_bytes"), memstatNamespace("alloc_bytes"),
@ -231,7 +282,7 @@ func NewGoCollector() Collector {
} }
func memstatNamespace(s string) string { func memstatNamespace(s string) string {
return fmt.Sprintf("go_memstats_%s", s) return "go_memstats_" + s
} }
// Describe returns all descriptions of the collector. // Describe returns all descriptions of the collector.
@ -239,13 +290,28 @@ func (c *goCollector) Describe(ch chan<- *Desc) {
ch <- c.goroutinesDesc ch <- c.goroutinesDesc
ch <- c.threadsDesc ch <- c.threadsDesc
ch <- c.gcDesc ch <- c.gcDesc
for _, i := range c.metrics { ch <- c.goInfoDesc
for _, i := range c.msMetrics {
ch <- i.desc ch <- i.desc
} }
} }
// Collect returns the current state of all metrics of the collector. // Collect returns the current state of all metrics of the collector.
func (c *goCollector) Collect(ch chan<- Metric) { func (c *goCollector) Collect(ch chan<- Metric) {
var (
ms = &runtime.MemStats{}
done = make(chan struct{})
)
// Start reading memstats first as it might take a while.
go func() {
c.msRead(ms)
c.msMtx.Lock()
c.msLast = ms
c.msLastTimestamp = time.Now()
c.msMtx.Unlock()
close(done)
}()
ch <- MustNewConstMetric(c.goroutinesDesc, GaugeValue, float64(runtime.NumGoroutine())) ch <- MustNewConstMetric(c.goroutinesDesc, GaugeValue, float64(runtime.NumGoroutine()))
n, _ := runtime.ThreadCreateProfile(nil) n, _ := runtime.ThreadCreateProfile(nil)
ch <- MustNewConstMetric(c.threadsDesc, GaugeValue, float64(n)) ch <- MustNewConstMetric(c.threadsDesc, GaugeValue, float64(n))
@ -259,11 +325,35 @@ func (c *goCollector) Collect(ch chan<- Metric) {
quantiles[float64(idx+1)/float64(len(stats.PauseQuantiles)-1)] = pq.Seconds() quantiles[float64(idx+1)/float64(len(stats.PauseQuantiles)-1)] = pq.Seconds()
} }
quantiles[0.0] = stats.PauseQuantiles[0].Seconds() quantiles[0.0] = stats.PauseQuantiles[0].Seconds()
ch <- MustNewConstSummary(c.gcDesc, uint64(stats.NumGC), float64(stats.PauseTotal.Seconds()), quantiles) ch <- MustNewConstSummary(c.gcDesc, uint64(stats.NumGC), stats.PauseTotal.Seconds(), quantiles)
ms := &runtime.MemStats{} ch <- MustNewConstMetric(c.goInfoDesc, GaugeValue, 1)
runtime.ReadMemStats(ms)
for _, i := range c.metrics { timer := time.NewTimer(c.msMaxWait)
select {
case <-done: // Our own ReadMemStats succeeded in time. Use it.
timer.Stop() // Important for high collection frequencies to not pile up timers.
c.msCollect(ch, ms)
return
case <-timer.C: // Time out, use last memstats if possible. Continue below.
}
c.msMtx.Lock()
if time.Since(c.msLastTimestamp) < c.msMaxAge {
// Last memstats are recent enough. Collect from them under the lock.
c.msCollect(ch, c.msLast)
c.msMtx.Unlock()
return
}
// If we are here, the last memstats are too old or don't exist. We have
// to wait until our own ReadMemStats finally completes. For that to
// happen, we have to release the lock.
c.msMtx.Unlock()
<-done
c.msCollect(ch, ms)
}
func (c *goCollector) msCollect(ch chan<- Metric, ms *runtime.MemStats) {
for _, i := range c.msMetrics {
ch <- MustNewConstMetric(i.desc, i.valType, i.eval(ms)) ch <- MustNewConstMetric(i.desc, i.valType, i.eval(ms))
} }
} }
@ -274,3 +364,33 @@ type memStatsMetrics []struct {
eval func(*runtime.MemStats) float64 eval func(*runtime.MemStats) float64
valType ValueType valType ValueType
} }
// NewBuildInfoCollector returns a collector collecting a single metric
// "go_build_info" with the constant value 1 and three labels "path", "version",
// and "checksum". Their label values contain the main module path, version, and
// checksum, respectively. The labels will only have meaningful values if the
// binary is built with Go module support and from source code retrieved from
// the source repository (rather than the local file system). This is usually
// accomplished by building from outside of GOPATH, specifying the full address
// of the main package, e.g. "GO111MODULE=on go run
// github.com/prometheus/client_golang/examples/random". If built without Go
// module support, all label values will be "unknown". If built with Go module
// support but using the source code from the local file system, the "path" will
// be set appropriately, but "checksum" will be empty and "version" will be
// "(devel)".
//
// This collector uses only the build information for the main module. See
// https://github.com/povilasv/prommod for an example of a collector for the
// module dependencies.
func NewBuildInfoCollector() Collector {
path, version, sum := readBuildInfo()
c := &selfCollector{MustNewConstMetric(
NewDesc(
"go_build_info",
"Build information about the main Go module.",
nil, Labels{"path": path, "version": version, "checksum": sum},
),
GaugeValue, 1)}
c.init(c.self)
return c
}

View file

@ -16,7 +16,9 @@ package prometheus
import ( import (
"fmt" "fmt"
"math" "math"
"runtime"
"sort" "sort"
"sync"
"sync/atomic" "sync/atomic"
"github.com/golang/protobuf/proto" "github.com/golang/protobuf/proto"
@ -108,8 +110,9 @@ func ExponentialBuckets(start, factor float64, count int) []float64 {
} }
// HistogramOpts bundles the options for creating a Histogram metric. It is // HistogramOpts bundles the options for creating a Histogram metric. It is
// mandatory to set Name and Help to a non-empty string. All other fields are // mandatory to set Name to a non-empty string. All other fields are optional
// optional and can safely be left at their zero value. // and can safely be left at their zero value, although it is strongly
// encouraged to set a Help string.
type HistogramOpts struct { type HistogramOpts struct {
// Namespace, Subsystem, and Name are components of the fully-qualified // Namespace, Subsystem, and Name are components of the fully-qualified
// name of the Histogram (created by joining these components with // name of the Histogram (created by joining these components with
@ -120,29 +123,22 @@ type HistogramOpts struct {
Subsystem string Subsystem string
Name string Name string
// Help provides information about this Histogram. Mandatory! // Help provides information about this Histogram.
// //
// Metrics with the same fully-qualified name must have the same Help // Metrics with the same fully-qualified name must have the same Help
// string. // string.
Help string Help string
// ConstLabels are used to attach fixed labels to this // ConstLabels are used to attach fixed labels to this metric. Metrics
// Histogram. Histograms with the same fully-qualified name must have the // with the same fully-qualified name must have the same label names in
// same label names in their ConstLabels. // their ConstLabels.
// //
// Note that in most cases, labels have a value that varies during the // ConstLabels are only used rarely. In particular, do not use them to
// lifetime of a process. Those labels are usually managed with a // attach the same labels to all your metrics. Those use cases are
// HistogramVec. ConstLabels serve only special purposes. One is for the // better covered by target labels set by the scraping Prometheus
// special case where the value of a label does not change during the // server, or by one specific metric (e.g. a build_info or a
// lifetime of a process, e.g. if the revision of the running binary is // machine_role metric). See also
// put into a label. Another, more advanced purpose is if more than one // https://prometheus.io/docs/instrumenting/writing_exporters/#target-labels,-not-static-scraped-labels
// Collector needs to collect Histograms with the same fully-qualified
// name. In that case, those Summaries must differ in the values of
// their ConstLabels. See the Collector examples.
//
// If the value of a label never changes (not even between binaries),
// that label most likely should not be a label at all (but part of the
// metric name).
ConstLabels Labels ConstLabels Labels
// Buckets defines the buckets into which observations are counted. Each // Buckets defines the buckets into which observations are counted. Each
@ -169,7 +165,7 @@ func NewHistogram(opts HistogramOpts) Histogram {
func newHistogram(desc *Desc, opts HistogramOpts, labelValues ...string) Histogram { func newHistogram(desc *Desc, opts HistogramOpts, labelValues ...string) Histogram {
if len(desc.variableLabels) != len(labelValues) { if len(desc.variableLabels) != len(labelValues) {
panic(errInconsistentCardinality) panic(makeInconsistentCardinalityError(desc.fqName, desc.variableLabels, labelValues))
} }
for _, n := range desc.variableLabels { for _, n := range desc.variableLabels {
@ -191,6 +187,7 @@ func newHistogram(desc *Desc, opts HistogramOpts, labelValues ...string) Histogr
desc: desc, desc: desc,
upperBounds: opts.Buckets, upperBounds: opts.Buckets,
labelPairs: makeLabelPairs(desc, labelValues), labelPairs: makeLabelPairs(desc, labelValues),
counts: [2]*histogramCounts{&histogramCounts{}, &histogramCounts{}},
} }
for i, upperBound := range h.upperBounds { for i, upperBound := range h.upperBounds {
if i < len(h.upperBounds)-1 { if i < len(h.upperBounds)-1 {
@ -207,29 +204,55 @@ func newHistogram(desc *Desc, opts HistogramOpts, labelValues ...string) Histogr
} }
} }
} }
// Finally we know the final length of h.upperBounds and can make counts. // Finally we know the final length of h.upperBounds and can make buckets
h.counts = make([]uint64, len(h.upperBounds)) // for both counts:
h.counts[0].buckets = make([]uint64, len(h.upperBounds))
h.counts[1].buckets = make([]uint64, len(h.upperBounds))
h.init(h) // Init self-collection. h.init(h) // Init self-collection.
return h return h
} }
type histogram struct { type histogramCounts struct {
// sumBits contains the bits of the float64 representing the sum of all // sumBits contains the bits of the float64 representing the sum of all
// observations. sumBits and count have to go first in the struct to // observations. sumBits and count have to go first in the struct to
// guarantee alignment for atomic operations. // guarantee alignment for atomic operations.
// http://golang.org/pkg/sync/atomic/#pkg-note-BUG // http://golang.org/pkg/sync/atomic/#pkg-note-BUG
sumBits uint64 sumBits uint64
count uint64 count uint64
buckets []uint64
}
type histogram struct {
// countAndHotIdx enables lock-free writes with use of atomic updates.
// The most significant bit is the hot index [0 or 1] of the count field
// below. Observe calls update the hot one. All remaining bits count the
// number of Observe calls. Observe starts by incrementing this counter,
// and finish by incrementing the count field in the respective
// histogramCounts, as a marker for completion.
//
// Calls of the Write method (which are non-mutating reads from the
// perspective of the histogram) swap the hotcold under the writeMtx
// lock. A cooldown is awaited (while locked) by comparing the number of
// observations with the initiation count. Once they match, then the
// last observation on the now cool one has completed. All cool fields must
// be merged into the new hot before releasing writeMtx.
//
// Fields with atomic access first! See alignment constraint:
// http://golang.org/pkg/sync/atomic/#pkg-note-BUG
countAndHotIdx uint64
selfCollector selfCollector
// Note that there is no mutex required.
desc *Desc desc *Desc
writeMtx sync.Mutex // Only used in the Write method.
// Two counts, one is "hot" for lock-free observations, the other is
// "cold" for writing out a dto.Metric. It has to be an array of
// pointers to guarantee 64bit alignment of the histogramCounts, see
// http://golang.org/pkg/sync/atomic/#pkg-note-BUG.
counts [2]*histogramCounts
upperBounds []float64 upperBounds []float64
counts []uint64
labelPairs []*dto.LabelPair labelPairs []*dto.LabelPair
} }
@ -248,36 +271,84 @@ func (h *histogram) Observe(v float64) {
// 100 buckets: 78.1 ns/op linear - binary 54.9 ns/op // 100 buckets: 78.1 ns/op linear - binary 54.9 ns/op
// 300 buckets: 154 ns/op linear - binary 61.6 ns/op // 300 buckets: 154 ns/op linear - binary 61.6 ns/op
i := sort.SearchFloat64s(h.upperBounds, v) i := sort.SearchFloat64s(h.upperBounds, v)
if i < len(h.counts) {
atomic.AddUint64(&h.counts[i], 1) // We increment h.countAndHotIdx so that the counter in the lower
// 63 bits gets incremented. At the same time, we get the new value
// back, which we can use to find the currently-hot counts.
n := atomic.AddUint64(&h.countAndHotIdx, 1)
hotCounts := h.counts[n>>63]
if i < len(h.upperBounds) {
atomic.AddUint64(&hotCounts.buckets[i], 1)
} }
atomic.AddUint64(&h.count, 1)
for { for {
oldBits := atomic.LoadUint64(&h.sumBits) oldBits := atomic.LoadUint64(&hotCounts.sumBits)
newBits := math.Float64bits(math.Float64frombits(oldBits) + v) newBits := math.Float64bits(math.Float64frombits(oldBits) + v)
if atomic.CompareAndSwapUint64(&h.sumBits, oldBits, newBits) { if atomic.CompareAndSwapUint64(&hotCounts.sumBits, oldBits, newBits) {
break break
} }
} }
// Increment count last as we take it as a signal that the observation
// is complete.
atomic.AddUint64(&hotCounts.count, 1)
} }
func (h *histogram) Write(out *dto.Metric) error { func (h *histogram) Write(out *dto.Metric) error {
his := &dto.Histogram{} // For simplicity, we protect this whole method by a mutex. It is not in
buckets := make([]*dto.Bucket, len(h.upperBounds)) // the hot path, i.e. Observe is called much more often than Write. The
// complication of making Write lock-free isn't worth it, if possible at
// all.
h.writeMtx.Lock()
defer h.writeMtx.Unlock()
his.SampleSum = proto.Float64(math.Float64frombits(atomic.LoadUint64(&h.sumBits))) // Adding 1<<63 switches the hot index (from 0 to 1 or from 1 to 0)
his.SampleCount = proto.Uint64(atomic.LoadUint64(&h.count)) // without touching the count bits. See the struct comments for a full
var count uint64 // description of the algorithm.
n := atomic.AddUint64(&h.countAndHotIdx, 1<<63)
// count is contained unchanged in the lower 63 bits.
count := n & ((1 << 63) - 1)
// The most significant bit tells us which counts is hot. The complement
// is thus the cold one.
hotCounts := h.counts[n>>63]
coldCounts := h.counts[(^n)>>63]
// Await cooldown.
for count != atomic.LoadUint64(&coldCounts.count) {
runtime.Gosched() // Let observations get work done.
}
his := &dto.Histogram{
Bucket: make([]*dto.Bucket, len(h.upperBounds)),
SampleCount: proto.Uint64(count),
SampleSum: proto.Float64(math.Float64frombits(atomic.LoadUint64(&coldCounts.sumBits))),
}
var cumCount uint64
for i, upperBound := range h.upperBounds { for i, upperBound := range h.upperBounds {
count += atomic.LoadUint64(&h.counts[i]) cumCount += atomic.LoadUint64(&coldCounts.buckets[i])
buckets[i] = &dto.Bucket{ his.Bucket[i] = &dto.Bucket{
CumulativeCount: proto.Uint64(count), CumulativeCount: proto.Uint64(cumCount),
UpperBound: proto.Float64(upperBound), UpperBound: proto.Float64(upperBound),
} }
} }
his.Bucket = buckets
out.Histogram = his out.Histogram = his
out.Label = h.labelPairs out.Label = h.labelPairs
// Finally add all the cold counts to the new hot counts and reset the cold counts.
atomic.AddUint64(&hotCounts.count, count)
atomic.StoreUint64(&coldCounts.count, 0)
for {
oldBits := atomic.LoadUint64(&hotCounts.sumBits)
newBits := math.Float64bits(math.Float64frombits(oldBits) + his.GetSampleSum())
if atomic.CompareAndSwapUint64(&hotCounts.sumBits, oldBits, newBits) {
atomic.StoreUint64(&coldCounts.sumBits, 0)
break
}
}
for i := range h.upperBounds {
atomic.AddUint64(&hotCounts.buckets[i], atomic.LoadUint64(&coldCounts.buckets[i]))
atomic.StoreUint64(&coldCounts.buckets[i], 0)
}
return nil return nil
} }
@ -287,12 +358,11 @@ func (h *histogram) Write(out *dto.Metric) error {
// (e.g. HTTP request latencies, partitioned by status code and method). Create // (e.g. HTTP request latencies, partitioned by status code and method). Create
// instances with NewHistogramVec. // instances with NewHistogramVec.
type HistogramVec struct { type HistogramVec struct {
*MetricVec *metricVec
} }
// NewHistogramVec creates a new HistogramVec based on the provided HistogramOpts and // NewHistogramVec creates a new HistogramVec based on the provided HistogramOpts and
// partitioned by the given label names. At least one label name must be // partitioned by the given label names.
// provided.
func NewHistogramVec(opts HistogramOpts, labelNames []string) *HistogramVec { func NewHistogramVec(opts HistogramOpts, labelNames []string) *HistogramVec {
desc := NewDesc( desc := NewDesc(
BuildFQName(opts.Namespace, opts.Subsystem, opts.Name), BuildFQName(opts.Namespace, opts.Subsystem, opts.Name),
@ -301,47 +371,116 @@ func NewHistogramVec(opts HistogramOpts, labelNames []string) *HistogramVec {
opts.ConstLabels, opts.ConstLabels,
) )
return &HistogramVec{ return &HistogramVec{
MetricVec: newMetricVec(desc, func(lvs ...string) Metric { metricVec: newMetricVec(desc, func(lvs ...string) Metric {
return newHistogram(desc, opts, lvs...) return newHistogram(desc, opts, lvs...)
}), }),
} }
} }
// GetMetricWithLabelValues replaces the method of the same name in // GetMetricWithLabelValues returns the Histogram for the given slice of label
// MetricVec. The difference is that this method returns a Histogram and not a // values (same order as the VariableLabels in Desc). If that combination of
// Metric so that no type conversion is required. // label values is accessed for the first time, a new Histogram is created.
func (m *HistogramVec) GetMetricWithLabelValues(lvs ...string) (Histogram, error) { //
metric, err := m.MetricVec.GetMetricWithLabelValues(lvs...) // It is possible to call this method without using the returned Histogram to only
// create the new Histogram but leave it at its starting value, a Histogram without
// any observations.
//
// Keeping the Histogram for later use is possible (and should be considered if
// performance is critical), but keep in mind that Reset, DeleteLabelValues and
// Delete can be used to delete the Histogram from the HistogramVec. In that case, the
// Histogram will still exist, but it will not be exported anymore, even if a
// Histogram with the same label values is created later. See also the CounterVec
// example.
//
// An error is returned if the number of label values is not the same as the
// number of VariableLabels in Desc (minus any curried labels).
//
// Note that for more than one label value, this method is prone to mistakes
// caused by an incorrect order of arguments. Consider GetMetricWith(Labels) as
// an alternative to avoid that type of mistake. For higher label numbers, the
// latter has a much more readable (albeit more verbose) syntax, but it comes
// with a performance overhead (for creating and processing the Labels map).
// See also the GaugeVec example.
func (v *HistogramVec) GetMetricWithLabelValues(lvs ...string) (Observer, error) {
metric, err := v.metricVec.getMetricWithLabelValues(lvs...)
if metric != nil { if metric != nil {
return metric.(Histogram), err return metric.(Observer), err
} }
return nil, err return nil, err
} }
// GetMetricWith replaces the method of the same name in MetricVec. The // GetMetricWith returns the Histogram for the given Labels map (the label names
// difference is that this method returns a Histogram and not a Metric so that no // must match those of the VariableLabels in Desc). If that label map is
// type conversion is required. // accessed for the first time, a new Histogram is created. Implications of
func (m *HistogramVec) GetMetricWith(labels Labels) (Histogram, error) { // creating a Histogram without using it and keeping the Histogram for later use
metric, err := m.MetricVec.GetMetricWith(labels) // are the same as for GetMetricWithLabelValues.
//
// An error is returned if the number and names of the Labels are inconsistent
// with those of the VariableLabels in Desc (minus any curried labels).
//
// This method is used for the same purpose as
// GetMetricWithLabelValues(...string). See there for pros and cons of the two
// methods.
func (v *HistogramVec) GetMetricWith(labels Labels) (Observer, error) {
metric, err := v.metricVec.getMetricWith(labels)
if metric != nil { if metric != nil {
return metric.(Histogram), err return metric.(Observer), err
} }
return nil, err return nil, err
} }
// WithLabelValues works as GetMetricWithLabelValues, but panics where // WithLabelValues works as GetMetricWithLabelValues, but panics where
// GetMetricWithLabelValues would have returned an error. By not returning an // GetMetricWithLabelValues would have returned an error. Not returning an
// error, WithLabelValues allows shortcuts like // error allows shortcuts like
// myVec.WithLabelValues("404", "GET").Observe(42.21) // myVec.WithLabelValues("404", "GET").Observe(42.21)
func (m *HistogramVec) WithLabelValues(lvs ...string) Histogram { func (v *HistogramVec) WithLabelValues(lvs ...string) Observer {
return m.MetricVec.WithLabelValues(lvs...).(Histogram) h, err := v.GetMetricWithLabelValues(lvs...)
if err != nil {
panic(err)
}
return h
} }
// With works as GetMetricWith, but panics where GetMetricWithLabels would have // With works as GetMetricWith but panics where GetMetricWithLabels would have
// returned an error. By not returning an error, With allows shortcuts like // returned an error. Not returning an error allows shortcuts like
// myVec.With(Labels{"code": "404", "method": "GET"}).Observe(42.21) // myVec.With(prometheus.Labels{"code": "404", "method": "GET"}).Observe(42.21)
func (m *HistogramVec) With(labels Labels) Histogram { func (v *HistogramVec) With(labels Labels) Observer {
return m.MetricVec.With(labels).(Histogram) h, err := v.GetMetricWith(labels)
if err != nil {
panic(err)
}
return h
}
// CurryWith returns a vector curried with the provided labels, i.e. the
// returned vector has those labels pre-set for all labeled operations performed
// on it. The cardinality of the curried vector is reduced accordingly. The
// order of the remaining labels stays the same (just with the curried labels
// taken out of the sequence which is relevant for the
// (GetMetric)WithLabelValues methods). It is possible to curry a curried
// vector, but only with labels not yet used for currying before.
//
// The metrics contained in the HistogramVec are shared between the curried and
// uncurried vectors. They are just accessed differently. Curried and uncurried
// vectors behave identically in terms of collection. Only one must be
// registered with a given registry (usually the uncurried version). The Reset
// method deletes all metrics, even if called on a curried vector.
func (v *HistogramVec) CurryWith(labels Labels) (ObserverVec, error) {
vec, err := v.curryWith(labels)
if vec != nil {
return &HistogramVec{vec}, err
}
return nil, err
}
// MustCurryWith works as CurryWith but panics where CurryWith would have
// returned an error.
func (v *HistogramVec) MustCurryWith(labels Labels) ObserverVec {
vec, err := v.CurryWith(labels)
if err != nil {
panic(err)
}
return vec
} }
type constHistogram struct { type constHistogram struct {
@ -393,7 +532,7 @@ func (h *constHistogram) Write(out *dto.Metric) error {
// bucket. // bucket.
// //
// NewConstHistogram returns an error if the length of labelValues is not // NewConstHistogram returns an error if the length of labelValues is not
// consistent with the variable labels in Desc. // consistent with the variable labels in Desc or if Desc is invalid.
func NewConstHistogram( func NewConstHistogram(
desc *Desc, desc *Desc,
count uint64, count uint64,
@ -401,8 +540,11 @@ func NewConstHistogram(
buckets map[float64]uint64, buckets map[float64]uint64,
labelValues ...string, labelValues ...string,
) (Metric, error) { ) (Metric, error) {
if len(desc.variableLabels) != len(labelValues) { if desc.err != nil {
return nil, errInconsistentCardinality return nil, desc.err
}
if err := validateLabelValues(labelValues, len(desc.variableLabels)); err != nil {
return nil, err
} }
return &constHistogram{ return &constHistogram{
desc: desc, desc: desc,

View file

@ -1,526 +0,0 @@
// Copyright 2014 The Prometheus Authors
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package prometheus
import (
"bufio"
"bytes"
"compress/gzip"
"fmt"
"io"
"net"
"net/http"
"strconv"
"strings"
"sync"
"time"
"github.com/prometheus/common/expfmt"
)
// TODO(beorn7): Remove this whole file. It is a partial mirror of
// promhttp/http.go (to avoid circular import chains) where everything HTTP
// related should live. The functions here are just for avoiding
// breakage. Everything is deprecated.
const (
contentTypeHeader = "Content-Type"
contentLengthHeader = "Content-Length"
contentEncodingHeader = "Content-Encoding"
acceptEncodingHeader = "Accept-Encoding"
)
var bufPool sync.Pool
func getBuf() *bytes.Buffer {
buf := bufPool.Get()
if buf == nil {
return &bytes.Buffer{}
}
return buf.(*bytes.Buffer)
}
func giveBuf(buf *bytes.Buffer) {
buf.Reset()
bufPool.Put(buf)
}
// Handler returns an HTTP handler for the DefaultGatherer. It is
// already instrumented with InstrumentHandler (using "prometheus" as handler
// name).
//
// Deprecated: Please note the issues described in the doc comment of
// InstrumentHandler. You might want to consider using promhttp.Handler instead
// (which is not instrumented).
func Handler() http.Handler {
return InstrumentHandler("prometheus", UninstrumentedHandler())
}
// UninstrumentedHandler returns an HTTP handler for the DefaultGatherer.
//
// Deprecated: Use promhttp.Handler instead. See there for further documentation.
func UninstrumentedHandler() http.Handler {
return http.HandlerFunc(func(w http.ResponseWriter, req *http.Request) {
mfs, err := DefaultGatherer.Gather()
if err != nil {
http.Error(w, "An error has occurred during metrics collection:\n\n"+err.Error(), http.StatusInternalServerError)
return
}
contentType := expfmt.Negotiate(req.Header)
buf := getBuf()
defer giveBuf(buf)
writer, encoding := decorateWriter(req, buf)
enc := expfmt.NewEncoder(writer, contentType)
var lastErr error
for _, mf := range mfs {
if err := enc.Encode(mf); err != nil {
lastErr = err
http.Error(w, "An error has occurred during metrics encoding:\n\n"+err.Error(), http.StatusInternalServerError)
return
}
}
if closer, ok := writer.(io.Closer); ok {
closer.Close()
}
if lastErr != nil && buf.Len() == 0 {
http.Error(w, "No metrics encoded, last error:\n\n"+err.Error(), http.StatusInternalServerError)
return
}
header := w.Header()
header.Set(contentTypeHeader, string(contentType))
header.Set(contentLengthHeader, fmt.Sprint(buf.Len()))
if encoding != "" {
header.Set(contentEncodingHeader, encoding)
}
w.Write(buf.Bytes())
})
}
// decorateWriter wraps a writer to handle gzip compression if requested. It
// returns the decorated writer and the appropriate "Content-Encoding" header
// (which is empty if no compression is enabled).
func decorateWriter(request *http.Request, writer io.Writer) (io.Writer, string) {
header := request.Header.Get(acceptEncodingHeader)
parts := strings.Split(header, ",")
for _, part := range parts {
part := strings.TrimSpace(part)
if part == "gzip" || strings.HasPrefix(part, "gzip;") {
return gzip.NewWriter(writer), "gzip"
}
}
return writer, ""
}
var instLabels = []string{"method", "code"}
type nower interface {
Now() time.Time
}
type nowFunc func() time.Time
func (n nowFunc) Now() time.Time {
return n()
}
var now nower = nowFunc(func() time.Time {
return time.Now()
})
func nowSeries(t ...time.Time) nower {
return nowFunc(func() time.Time {
defer func() {
t = t[1:]
}()
return t[0]
})
}
// InstrumentHandler wraps the given HTTP handler for instrumentation. It
// registers four metric collectors (if not already done) and reports HTTP
// metrics to the (newly or already) registered collectors: http_requests_total
// (CounterVec), http_request_duration_microseconds (Summary),
// http_request_size_bytes (Summary), http_response_size_bytes (Summary). Each
// has a constant label named "handler" with the provided handlerName as
// value. http_requests_total is a metric vector partitioned by HTTP method
// (label name "method") and HTTP status code (label name "code").
//
// Deprecated: InstrumentHandler has several issues:
//
// - It uses Summaries rather than Histograms. Summaries are not useful if
// aggregation across multiple instances is required.
//
// - It uses microseconds as unit, which is deprecated and should be replaced by
// seconds.
//
// - The size of the request is calculated in a separate goroutine. Since this
// calculator requires access to the request header, it creates a race with
// any writes to the header performed during request handling.
// httputil.ReverseProxy is a prominent example for a handler
// performing such writes.
//
// - It has additional issues with HTTP/2, cf.
// https://github.com/prometheus/client_golang/issues/272.
//
// Upcoming versions of this package will provide ways of instrumenting HTTP
// handlers that are more flexible and have fewer issues. Please prefer direct
// instrumentation in the meantime.
func InstrumentHandler(handlerName string, handler http.Handler) http.HandlerFunc {
return InstrumentHandlerFunc(handlerName, handler.ServeHTTP)
}
// InstrumentHandlerFunc wraps the given function for instrumentation. It
// otherwise works in the same way as InstrumentHandler (and shares the same
// issues).
//
// Deprecated: InstrumentHandlerFunc is deprecated for the same reasons as
// InstrumentHandler is.
func InstrumentHandlerFunc(handlerName string, handlerFunc func(http.ResponseWriter, *http.Request)) http.HandlerFunc {
return InstrumentHandlerFuncWithOpts(
SummaryOpts{
Subsystem: "http",
ConstLabels: Labels{"handler": handlerName},
Objectives: map[float64]float64{0.5: 0.05, 0.9: 0.01, 0.99: 0.001},
},
handlerFunc,
)
}
// InstrumentHandlerWithOpts works like InstrumentHandler (and shares the same
// issues) but provides more flexibility (at the cost of a more complex call
// syntax). As InstrumentHandler, this function registers four metric
// collectors, but it uses the provided SummaryOpts to create them. However, the
// fields "Name" and "Help" in the SummaryOpts are ignored. "Name" is replaced
// by "requests_total", "request_duration_microseconds", "request_size_bytes",
// and "response_size_bytes", respectively. "Help" is replaced by an appropriate
// help string. The names of the variable labels of the http_requests_total
// CounterVec are "method" (get, post, etc.), and "code" (HTTP status code).
//
// If InstrumentHandlerWithOpts is called as follows, it mimics exactly the
// behavior of InstrumentHandler:
//
// prometheus.InstrumentHandlerWithOpts(
// prometheus.SummaryOpts{
// Subsystem: "http",
// ConstLabels: prometheus.Labels{"handler": handlerName},
// },
// handler,
// )
//
// Technical detail: "requests_total" is a CounterVec, not a SummaryVec, so it
// cannot use SummaryOpts. Instead, a CounterOpts struct is created internally,
// and all its fields are set to the equally named fields in the provided
// SummaryOpts.
//
// Deprecated: InstrumentHandlerWithOpts is deprecated for the same reasons as
// InstrumentHandler is.
func InstrumentHandlerWithOpts(opts SummaryOpts, handler http.Handler) http.HandlerFunc {
return InstrumentHandlerFuncWithOpts(opts, handler.ServeHTTP)
}
// InstrumentHandlerFuncWithOpts works like InstrumentHandlerFunc (and shares
// the same issues) but provides more flexibility (at the cost of a more complex
// call syntax). See InstrumentHandlerWithOpts for details how the provided
// SummaryOpts are used.
//
// Deprecated: InstrumentHandlerFuncWithOpts is deprecated for the same reasons
// as InstrumentHandler is.
func InstrumentHandlerFuncWithOpts(opts SummaryOpts, handlerFunc func(http.ResponseWriter, *http.Request)) http.HandlerFunc {
reqCnt := NewCounterVec(
CounterOpts{
Namespace: opts.Namespace,
Subsystem: opts.Subsystem,
Name: "requests_total",
Help: "Total number of HTTP requests made.",
ConstLabels: opts.ConstLabels,
},
instLabels,
)
if err := Register(reqCnt); err != nil {
if are, ok := err.(AlreadyRegisteredError); ok {
reqCnt = are.ExistingCollector.(*CounterVec)
} else {
panic(err)
}
}
opts.Name = "request_duration_microseconds"
opts.Help = "The HTTP request latencies in microseconds."
reqDur := NewSummary(opts)
if err := Register(reqDur); err != nil {
if are, ok := err.(AlreadyRegisteredError); ok {
reqDur = are.ExistingCollector.(Summary)
} else {
panic(err)
}
}
opts.Name = "request_size_bytes"
opts.Help = "The HTTP request sizes in bytes."
reqSz := NewSummary(opts)
if err := Register(reqSz); err != nil {
if are, ok := err.(AlreadyRegisteredError); ok {
reqSz = are.ExistingCollector.(Summary)
} else {
panic(err)
}
}
opts.Name = "response_size_bytes"
opts.Help = "The HTTP response sizes in bytes."
resSz := NewSummary(opts)
if err := Register(resSz); err != nil {
if are, ok := err.(AlreadyRegisteredError); ok {
resSz = are.ExistingCollector.(Summary)
} else {
panic(err)
}
}
return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
now := time.Now()
delegate := &responseWriterDelegator{ResponseWriter: w}
out := computeApproximateRequestSize(r)
_, cn := w.(http.CloseNotifier)
_, fl := w.(http.Flusher)
_, hj := w.(http.Hijacker)
_, rf := w.(io.ReaderFrom)
var rw http.ResponseWriter
if cn && fl && hj && rf {
rw = &fancyResponseWriterDelegator{delegate}
} else {
rw = delegate
}
handlerFunc(rw, r)
elapsed := float64(time.Since(now)) / float64(time.Microsecond)
method := sanitizeMethod(r.Method)
code := sanitizeCode(delegate.status)
reqCnt.WithLabelValues(method, code).Inc()
reqDur.Observe(elapsed)
resSz.Observe(float64(delegate.written))
reqSz.Observe(float64(<-out))
})
}
func computeApproximateRequestSize(r *http.Request) <-chan int {
// Get URL length in current go routine for avoiding a race condition.
// HandlerFunc that runs in parallel may modify the URL.
s := 0
if r.URL != nil {
s += len(r.URL.String())
}
out := make(chan int, 1)
go func() {
s += len(r.Method)
s += len(r.Proto)
for name, values := range r.Header {
s += len(name)
for _, value := range values {
s += len(value)
}
}
s += len(r.Host)
// N.B. r.Form and r.MultipartForm are assumed to be included in r.URL.
if r.ContentLength != -1 {
s += int(r.ContentLength)
}
out <- s
close(out)
}()
return out
}
type responseWriterDelegator struct {
http.ResponseWriter
handler, method string
status int
written int64
wroteHeader bool
}
func (r *responseWriterDelegator) WriteHeader(code int) {
r.status = code
r.wroteHeader = true
r.ResponseWriter.WriteHeader(code)
}
func (r *responseWriterDelegator) Write(b []byte) (int, error) {
if !r.wroteHeader {
r.WriteHeader(http.StatusOK)
}
n, err := r.ResponseWriter.Write(b)
r.written += int64(n)
return n, err
}
type fancyResponseWriterDelegator struct {
*responseWriterDelegator
}
func (f *fancyResponseWriterDelegator) CloseNotify() <-chan bool {
return f.ResponseWriter.(http.CloseNotifier).CloseNotify()
}
func (f *fancyResponseWriterDelegator) Flush() {
f.ResponseWriter.(http.Flusher).Flush()
}
func (f *fancyResponseWriterDelegator) Hijack() (net.Conn, *bufio.ReadWriter, error) {
return f.ResponseWriter.(http.Hijacker).Hijack()
}
func (f *fancyResponseWriterDelegator) ReadFrom(r io.Reader) (int64, error) {
if !f.wroteHeader {
f.WriteHeader(http.StatusOK)
}
n, err := f.ResponseWriter.(io.ReaderFrom).ReadFrom(r)
f.written += n
return n, err
}
func sanitizeMethod(m string) string {
switch m {
case "GET", "get":
return "get"
case "PUT", "put":
return "put"
case "HEAD", "head":
return "head"
case "POST", "post":
return "post"
case "DELETE", "delete":
return "delete"
case "CONNECT", "connect":
return "connect"
case "OPTIONS", "options":
return "options"
case "NOTIFY", "notify":
return "notify"
default:
return strings.ToLower(m)
}
}
func sanitizeCode(s int) string {
switch s {
case 100:
return "100"
case 101:
return "101"
case 200:
return "200"
case 201:
return "201"
case 202:
return "202"
case 203:
return "203"
case 204:
return "204"
case 205:
return "205"
case 206:
return "206"
case 300:
return "300"
case 301:
return "301"
case 302:
return "302"
case 304:
return "304"
case 305:
return "305"
case 307:
return "307"
case 400:
return "400"
case 401:
return "401"
case 402:
return "402"
case 403:
return "403"
case 404:
return "404"
case 405:
return "405"
case 406:
return "406"
case 407:
return "407"
case 408:
return "408"
case 409:
return "409"
case 410:
return "410"
case 411:
return "411"
case 412:
return "412"
case 413:
return "413"
case 414:
return "414"
case 415:
return "415"
case 416:
return "416"
case 417:
return "417"
case 418:
return "418"
case 500:
return "500"
case 501:
return "501"
case 502:
return "502"
case 503:
return "503"
case 504:
return "504"
case 505:
return "505"
case 428:
return "428"
case 429:
return "429"
case 431:
return "431"
case 511:
return "511"
default:
return strconv.Itoa(s)
}
}

View file

@ -0,0 +1,85 @@
// Copyright 2018 The Prometheus Authors
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package internal
import (
"sort"
dto "github.com/prometheus/client_model/go"
)
// metricSorter is a sortable slice of *dto.Metric.
type metricSorter []*dto.Metric
func (s metricSorter) Len() int {
return len(s)
}
func (s metricSorter) Swap(i, j int) {
s[i], s[j] = s[j], s[i]
}
func (s metricSorter) Less(i, j int) bool {
if len(s[i].Label) != len(s[j].Label) {
// This should not happen. The metrics are
// inconsistent. However, we have to deal with the fact, as
// people might use custom collectors or metric family injection
// to create inconsistent metrics. So let's simply compare the
// number of labels in this case. That will still yield
// reproducible sorting.
return len(s[i].Label) < len(s[j].Label)
}
for n, lp := range s[i].Label {
vi := lp.GetValue()
vj := s[j].Label[n].GetValue()
if vi != vj {
return vi < vj
}
}
// We should never arrive here. Multiple metrics with the same
// label set in the same scrape will lead to undefined ingestion
// behavior. However, as above, we have to provide stable sorting
// here, even for inconsistent metrics. So sort equal metrics
// by their timestamp, with missing timestamps (implying "now")
// coming last.
if s[i].TimestampMs == nil {
return false
}
if s[j].TimestampMs == nil {
return true
}
return s[i].GetTimestampMs() < s[j].GetTimestampMs()
}
// NormalizeMetricFamilies returns a MetricFamily slice with empty
// MetricFamilies pruned and the remaining MetricFamilies sorted by name within
// the slice, with the contained Metrics sorted within each MetricFamily.
func NormalizeMetricFamilies(metricFamiliesByName map[string]*dto.MetricFamily) []*dto.MetricFamily {
for _, mf := range metricFamiliesByName {
sort.Sort(metricSorter(mf.Metric))
}
names := make([]string, 0, len(metricFamiliesByName))
for name, mf := range metricFamiliesByName {
if len(mf.Metric) > 0 {
names = append(names, name)
}
}
sort.Strings(names)
result := make([]*dto.MetricFamily, 0, len(names))
for _, name := range names {
result = append(result, metricFamiliesByName[name])
}
return result
}

View file

@ -0,0 +1,87 @@
// Copyright 2018 The Prometheus Authors
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package prometheus
import (
"errors"
"fmt"
"strings"
"unicode/utf8"
"github.com/prometheus/common/model"
)
// Labels represents a collection of label name -> value mappings. This type is
// commonly used with the With(Labels) and GetMetricWith(Labels) methods of
// metric vector Collectors, e.g.:
// myVec.With(Labels{"code": "404", "method": "GET"}).Add(42)
//
// The other use-case is the specification of constant label pairs in Opts or to
// create a Desc.
type Labels map[string]string
// reservedLabelPrefix is a prefix which is not legal in user-supplied
// label names.
const reservedLabelPrefix = "__"
var errInconsistentCardinality = errors.New("inconsistent label cardinality")
func makeInconsistentCardinalityError(fqName string, labels, labelValues []string) error {
return fmt.Errorf(
"%s: %q has %d variable labels named %q but %d values %q were provided",
errInconsistentCardinality, fqName,
len(labels), labels,
len(labelValues), labelValues,
)
}
func validateValuesInLabels(labels Labels, expectedNumberOfValues int) error {
if len(labels) != expectedNumberOfValues {
return fmt.Errorf(
"%s: expected %d label values but got %d in %#v",
errInconsistentCardinality, expectedNumberOfValues,
len(labels), labels,
)
}
for name, val := range labels {
if !utf8.ValidString(val) {
return fmt.Errorf("label %s: value %q is not valid UTF-8", name, val)
}
}
return nil
}
func validateLabelValues(vals []string, expectedNumberOfValues int) error {
if len(vals) != expectedNumberOfValues {
return fmt.Errorf(
"%s: expected %d label values but got %d in %#v",
errInconsistentCardinality, expectedNumberOfValues,
len(vals), vals,
)
}
for _, val := range vals {
if !utf8.ValidString(val) {
return fmt.Errorf("label value %q is not valid UTF-8", val)
}
}
return nil
}
func checkLabelName(l string) bool {
return model.LabelName(l).IsValid() && !strings.HasPrefix(l, reservedLabelPrefix)
}

View file

@ -15,6 +15,9 @@ package prometheus
import ( import (
"strings" "strings"
"time"
"github.com/golang/protobuf/proto"
dto "github.com/prometheus/client_model/go" dto "github.com/prometheus/client_model/go"
) )
@ -43,9 +46,8 @@ type Metric interface {
// While populating dto.Metric, it is the responsibility of the // While populating dto.Metric, it is the responsibility of the
// implementation to ensure validity of the Metric protobuf (like valid // implementation to ensure validity of the Metric protobuf (like valid
// UTF-8 strings or syntactically valid metric and label names). It is // UTF-8 strings or syntactically valid metric and label names). It is
// recommended to sort labels lexicographically. (Implementers may find // recommended to sort labels lexicographically. Callers of Write should
// LabelPairSorter useful for that.) Callers of Write should still make // still make sure of sorting if they depend on it.
// sure of sorting if they depend on it.
Write(*dto.Metric) error Write(*dto.Metric) error
// TODO(beorn7): The original rationale of passing in a pre-allocated // TODO(beorn7): The original rationale of passing in a pre-allocated
// dto.Metric protobuf to save allocations has disappeared. The // dto.Metric protobuf to save allocations has disappeared. The
@ -57,8 +59,9 @@ type Metric interface {
// implementation XXX has its own XXXOpts type, but in most cases, it is just be // implementation XXX has its own XXXOpts type, but in most cases, it is just be
// an alias of this type (which might change when the requirement arises.) // an alias of this type (which might change when the requirement arises.)
// //
// It is mandatory to set Name and Help to a non-empty string. All other fields // It is mandatory to set Name to a non-empty string. All other fields are
// are optional and can safely be left at their zero value. // optional and can safely be left at their zero value, although it is strongly
// encouraged to set a Help string.
type Opts struct { type Opts struct {
// Namespace, Subsystem, and Name are components of the fully-qualified // Namespace, Subsystem, and Name are components of the fully-qualified
// name of the Metric (created by joining these components with // name of the Metric (created by joining these components with
@ -69,7 +72,7 @@ type Opts struct {
Subsystem string Subsystem string
Name string Name string
// Help provides information about this metric. Mandatory! // Help provides information about this metric.
// //
// Metrics with the same fully-qualified name must have the same Help // Metrics with the same fully-qualified name must have the same Help
// string. // string.
@ -79,20 +82,12 @@ type Opts struct {
// with the same fully-qualified name must have the same label names in // with the same fully-qualified name must have the same label names in
// their ConstLabels. // their ConstLabels.
// //
// Note that in most cases, labels have a value that varies during the // ConstLabels are only used rarely. In particular, do not use them to
// lifetime of a process. Those labels are usually managed with a metric // attach the same labels to all your metrics. Those use cases are
// vector collector (like CounterVec, GaugeVec, UntypedVec). ConstLabels // better covered by target labels set by the scraping Prometheus
// serve only special purposes. One is for the special case where the // server, or by one specific metric (e.g. a build_info or a
// value of a label does not change during the lifetime of a process, // machine_role metric). See also
// e.g. if the revision of the running binary is put into a // https://prometheus.io/docs/instrumenting/writing_exporters/#target-labels,-not-static-scraped-labels
// label. Another, more advanced purpose is if more than one Collector
// needs to collect Metrics with the same fully-qualified name. In that
// case, those Metrics must differ in the values of their
// ConstLabels. See the Collector examples.
//
// If the value of a label never changes (not even between binaries),
// that label most likely should not be a label at all (but part of the
// metric name).
ConstLabels Labels ConstLabels Labels
} }
@ -118,37 +113,22 @@ func BuildFQName(namespace, subsystem, name string) string {
return name return name
} }
// LabelPairSorter implements sort.Interface. It is used to sort a slice of // labelPairSorter implements sort.Interface. It is used to sort a slice of
// dto.LabelPair pointers. This is useful for implementing the Write method of // dto.LabelPair pointers.
// custom metrics. type labelPairSorter []*dto.LabelPair
type LabelPairSorter []*dto.LabelPair
func (s LabelPairSorter) Len() int { func (s labelPairSorter) Len() int {
return len(s) return len(s)
} }
func (s LabelPairSorter) Swap(i, j int) { func (s labelPairSorter) Swap(i, j int) {
s[i], s[j] = s[j], s[i] s[i], s[j] = s[j], s[i]
} }
func (s LabelPairSorter) Less(i, j int) bool { func (s labelPairSorter) Less(i, j int) bool {
return s[i].GetName() < s[j].GetName() return s[i].GetName() < s[j].GetName()
} }
type hashSorter []uint64
func (s hashSorter) Len() int {
return len(s)
}
func (s hashSorter) Swap(i, j int) {
s[i], s[j] = s[j], s[i]
}
func (s hashSorter) Less(i, j int) bool {
return s[i] < s[j]
}
type invalidMetric struct { type invalidMetric struct {
desc *Desc desc *Desc
err error err error
@ -164,3 +144,31 @@ func NewInvalidMetric(desc *Desc, err error) Metric {
func (m *invalidMetric) Desc() *Desc { return m.desc } func (m *invalidMetric) Desc() *Desc { return m.desc }
func (m *invalidMetric) Write(*dto.Metric) error { return m.err } func (m *invalidMetric) Write(*dto.Metric) error { return m.err }
type timestampedMetric struct {
Metric
t time.Time
}
func (m timestampedMetric) Write(pb *dto.Metric) error {
e := m.Metric.Write(pb)
pb.TimestampMs = proto.Int64(m.t.Unix()*1000 + int64(m.t.Nanosecond()/1000000))
return e
}
// NewMetricWithTimestamp returns a new Metric wrapping the provided Metric in a
// way that it has an explicit timestamp set to the provided Time. This is only
// useful in rare cases as the timestamp of a Prometheus metric should usually
// be set by the Prometheus server during scraping. Exceptions include mirroring
// metrics with given timestamps from other metric
// sources.
//
// NewMetricWithTimestamp works best with MustNewConstMetric,
// MustNewConstHistogram, and MustNewConstSummary, see example.
//
// Currently, the exposition formats used by Prometheus are limited to
// millisecond resolution. Thus, the provided time will be rounded down to the
// next full millisecond value.
func NewMetricWithTimestamp(t time.Time, m Metric) Metric {
return timestampedMetric{Metric: m, t: t}
}

View file

@ -0,0 +1,52 @@
// Copyright 2017 The Prometheus Authors
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package prometheus
// Observer is the interface that wraps the Observe method, which is used by
// Histogram and Summary to add observations.
type Observer interface {
Observe(float64)
}
// The ObserverFunc type is an adapter to allow the use of ordinary
// functions as Observers. If f is a function with the appropriate
// signature, ObserverFunc(f) is an Observer that calls f.
//
// This adapter is usually used in connection with the Timer type, and there are
// two general use cases:
//
// The most common one is to use a Gauge as the Observer for a Timer.
// See the "Gauge" Timer example.
//
// The more advanced use case is to create a function that dynamically decides
// which Observer to use for observing the duration. See the "Complex" Timer
// example.
type ObserverFunc func(float64)
// Observe calls f(value). It implements Observer.
func (f ObserverFunc) Observe(value float64) {
f(value)
}
// ObserverVec is an interface implemented by `HistogramVec` and `SummaryVec`.
type ObserverVec interface {
GetMetricWith(Labels) (Observer, error)
GetMetricWithLabelValues(lvs ...string) (Observer, error)
With(Labels) Observer
WithLabelValues(...string) Observer
CurryWith(Labels) (ObserverVec, error)
MustCurryWith(Labels) ObserverVec
Collector
}

View file

@ -13,46 +13,61 @@
package prometheus package prometheus
import "github.com/prometheus/procfs" import (
"errors"
"os"
)
type processCollector struct { type processCollector struct {
pid int
collectFn func(chan<- Metric) collectFn func(chan<- Metric)
pidFn func() (int, error) pidFn func() (int, error)
reportErrors bool
cpuTotal *Desc cpuTotal *Desc
openFDs, maxFDs *Desc openFDs, maxFDs *Desc
vsize, rss *Desc vsize, maxVsize *Desc
rss *Desc
startTime *Desc startTime *Desc
} }
// ProcessCollectorOpts defines the behavior of a process metrics collector
// created with NewProcessCollector.
type ProcessCollectorOpts struct {
// PidFn returns the PID of the process the collector collects metrics
// for. It is called upon each collection. By default, the PID of the
// current process is used, as determined on construction time by
// calling os.Getpid().
PidFn func() (int, error)
// If non-empty, each of the collected metrics is prefixed by the
// provided string and an underscore ("_").
Namespace string
// If true, any error encountered during collection is reported as an
// invalid metric (see NewInvalidMetric). Otherwise, errors are ignored
// and the collected metrics will be incomplete. (Possibly, no metrics
// will be collected at all.) While that's usually not desired, it is
// appropriate for the common "mix-in" of process metrics, where process
// metrics are nice to have, but failing to collect them should not
// disrupt the collection of the remaining metrics.
ReportErrors bool
}
// NewProcessCollector returns a collector which exports the current state of // NewProcessCollector returns a collector which exports the current state of
// process metrics including cpu, memory and file descriptor usage as well as // process metrics including CPU, memory and file descriptor usage as well as
// the process start time for the given process id under the given namespace. // the process start time. The detailed behavior is defined by the provided
func NewProcessCollector(pid int, namespace string) Collector { // ProcessCollectorOpts. The zero value of ProcessCollectorOpts creates a
return NewProcessCollectorPIDFn( // collector for the current process with an empty namespace string and no error
func() (int, error) { return pid, nil }, // reporting.
namespace, //
) // The collector only works on operating systems with a Linux-style proc
} // filesystem and on Microsoft Windows. On other operating systems, it will not
// collect any metrics.
// NewProcessCollectorPIDFn returns a collector which exports the current state func NewProcessCollector(opts ProcessCollectorOpts) Collector {
// of process metrics including cpu, memory and file descriptor usage as well
// as the process start time under the given namespace. The given pidFn is
// called on each collect and is used to determine the process to export
// metrics for.
func NewProcessCollectorPIDFn(
pidFn func() (int, error),
namespace string,
) Collector {
ns := "" ns := ""
if len(namespace) > 0 { if len(opts.Namespace) > 0 {
ns = namespace + "_" ns = opts.Namespace + "_"
} }
c := processCollector{ c := &processCollector{
pidFn: pidFn, reportErrors: opts.ReportErrors,
collectFn: func(chan<- Metric) {},
cpuTotal: NewDesc( cpuTotal: NewDesc(
ns+"process_cpu_seconds_total", ns+"process_cpu_seconds_total",
"Total user and system CPU time spent in seconds.", "Total user and system CPU time spent in seconds.",
@ -73,6 +88,11 @@ func NewProcessCollectorPIDFn(
"Virtual memory size in bytes.", "Virtual memory size in bytes.",
nil, nil, nil, nil,
), ),
maxVsize: NewDesc(
ns+"process_virtual_memory_max_bytes",
"Maximum amount of virtual memory available in bytes.",
nil, nil,
),
rss: NewDesc( rss: NewDesc(
ns+"process_resident_memory_bytes", ns+"process_resident_memory_bytes",
"Resident memory size in bytes.", "Resident memory size in bytes.",
@ -85,12 +105,23 @@ func NewProcessCollectorPIDFn(
), ),
} }
// Set up process metric collection if supported by the runtime. if opts.PidFn == nil {
if _, err := procfs.NewStat(); err == nil { pid := os.Getpid()
c.collectFn = c.processCollect c.pidFn = func() (int, error) { return pid, nil }
} else {
c.pidFn = opts.PidFn
} }
return &c // Set up process metric collection if supported by the runtime.
if canCollectProcess() {
c.collectFn = c.processCollect
} else {
c.collectFn = func(ch chan<- Metric) {
c.reportError(ch, nil, errors.New("process metrics not supported on this platform"))
}
}
return c
} }
// Describe returns all descriptions of the collector. // Describe returns all descriptions of the collector.
@ -99,6 +130,7 @@ func (c *processCollector) Describe(ch chan<- *Desc) {
ch <- c.openFDs ch <- c.openFDs
ch <- c.maxFDs ch <- c.maxFDs
ch <- c.vsize ch <- c.vsize
ch <- c.maxVsize
ch <- c.rss ch <- c.rss
ch <- c.startTime ch <- c.startTime
} }
@ -108,33 +140,12 @@ func (c *processCollector) Collect(ch chan<- Metric) {
c.collectFn(ch) c.collectFn(ch)
} }
// TODO(ts): Bring back error reporting by reverting 7faf9e7 as soon as the func (c *processCollector) reportError(ch chan<- Metric, desc *Desc, err error) {
// client allows users to configure the error behavior. if !c.reportErrors {
func (c *processCollector) processCollect(ch chan<- Metric) {
pid, err := c.pidFn()
if err != nil {
return return
} }
if desc == nil {
p, err := procfs.NewProc(pid) desc = NewInvalidDesc(err)
if err != nil {
return
}
if stat, err := p.NewStat(); err == nil {
ch <- MustNewConstMetric(c.cpuTotal, CounterValue, stat.CPUTime())
ch <- MustNewConstMetric(c.vsize, GaugeValue, float64(stat.VirtualMemory()))
ch <- MustNewConstMetric(c.rss, GaugeValue, float64(stat.ResidentMemory()))
if startTime, err := stat.StartTime(); err == nil {
ch <- MustNewConstMetric(c.startTime, GaugeValue, startTime)
}
}
if fds, err := p.FileDescriptorsLen(); err == nil {
ch <- MustNewConstMetric(c.openFDs, GaugeValue, float64(fds))
}
if limits, err := p.NewLimits(); err == nil {
ch <- MustNewConstMetric(c.maxFDs, GaugeValue, float64(limits.OpenFiles))
} }
ch <- NewInvalidMetric(desc, err)
} }

View file

@ -0,0 +1,65 @@
// Copyright 2019 The Prometheus Authors
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
// +build !windows
package prometheus
import (
"github.com/prometheus/procfs"
)
func canCollectProcess() bool {
_, err := procfs.NewDefaultFS()
return err == nil
}
func (c *processCollector) processCollect(ch chan<- Metric) {
pid, err := c.pidFn()
if err != nil {
c.reportError(ch, nil, err)
return
}
p, err := procfs.NewProc(pid)
if err != nil {
c.reportError(ch, nil, err)
return
}
if stat, err := p.Stat(); err == nil {
ch <- MustNewConstMetric(c.cpuTotal, CounterValue, stat.CPUTime())
ch <- MustNewConstMetric(c.vsize, GaugeValue, float64(stat.VirtualMemory()))
ch <- MustNewConstMetric(c.rss, GaugeValue, float64(stat.ResidentMemory()))
if startTime, err := stat.StartTime(); err == nil {
ch <- MustNewConstMetric(c.startTime, GaugeValue, startTime)
} else {
c.reportError(ch, c.startTime, err)
}
} else {
c.reportError(ch, nil, err)
}
if fds, err := p.FileDescriptorsLen(); err == nil {
ch <- MustNewConstMetric(c.openFDs, GaugeValue, float64(fds))
} else {
c.reportError(ch, c.openFDs, err)
}
if limits, err := p.Limits(); err == nil {
ch <- MustNewConstMetric(c.maxFDs, GaugeValue, float64(limits.OpenFiles))
ch <- MustNewConstMetric(c.maxVsize, GaugeValue, float64(limits.AddressSpace))
} else {
c.reportError(ch, nil, err)
}
}

View file

@ -0,0 +1,112 @@
// Copyright 2019 The Prometheus Authors
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package prometheus
import (
"syscall"
"unsafe"
"golang.org/x/sys/windows"
)
func canCollectProcess() bool {
return true
}
var (
modpsapi = syscall.NewLazyDLL("psapi.dll")
modkernel32 = syscall.NewLazyDLL("kernel32.dll")
procGetProcessMemoryInfo = modpsapi.NewProc("GetProcessMemoryInfo")
procGetProcessHandleCount = modkernel32.NewProc("GetProcessHandleCount")
)
type processMemoryCounters struct {
// https://docs.microsoft.com/en-us/windows/desktop/api/psapi/ns-psapi-_process_memory_counters_ex
_ uint32
PageFaultCount uint32
PeakWorkingSetSize uint64
WorkingSetSize uint64
QuotaPeakPagedPoolUsage uint64
QuotaPagedPoolUsage uint64
QuotaPeakNonPagedPoolUsage uint64
QuotaNonPagedPoolUsage uint64
PagefileUsage uint64
PeakPagefileUsage uint64
PrivateUsage uint64
}
func getProcessMemoryInfo(handle windows.Handle) (processMemoryCounters, error) {
mem := processMemoryCounters{}
r1, _, err := procGetProcessMemoryInfo.Call(
uintptr(handle),
uintptr(unsafe.Pointer(&mem)),
uintptr(unsafe.Sizeof(mem)),
)
if r1 != 1 {
return mem, err
} else {
return mem, nil
}
}
func getProcessHandleCount(handle windows.Handle) (uint32, error) {
var count uint32
r1, _, err := procGetProcessHandleCount.Call(
uintptr(handle),
uintptr(unsafe.Pointer(&count)),
)
if r1 != 1 {
return 0, err
} else {
return count, nil
}
}
func (c *processCollector) processCollect(ch chan<- Metric) {
h, err := windows.GetCurrentProcess()
if err != nil {
c.reportError(ch, nil, err)
return
}
var startTime, exitTime, kernelTime, userTime windows.Filetime
err = windows.GetProcessTimes(h, &startTime, &exitTime, &kernelTime, &userTime)
if err != nil {
c.reportError(ch, nil, err)
return
}
ch <- MustNewConstMetric(c.startTime, GaugeValue, float64(startTime.Nanoseconds()/1e9))
ch <- MustNewConstMetric(c.cpuTotal, CounterValue, fileTimeToSeconds(kernelTime)+fileTimeToSeconds(userTime))
mem, err := getProcessMemoryInfo(h)
if err != nil {
c.reportError(ch, nil, err)
return
}
ch <- MustNewConstMetric(c.vsize, GaugeValue, float64(mem.PrivateUsage))
ch <- MustNewConstMetric(c.rss, GaugeValue, float64(mem.WorkingSetSize))
handles, err := getProcessHandleCount(h)
if err != nil {
c.reportError(ch, nil, err)
return
}
ch <- MustNewConstMetric(c.openFDs, GaugeValue, float64(handles))
ch <- MustNewConstMetric(c.maxFDs, GaugeValue, float64(16*1024*1024)) // Windows has a hard-coded max limit, not per-process.
}
func fileTimeToSeconds(ft windows.Filetime) float64 {
return float64(uint64(ft.HighDateTime)<<32+uint64(ft.LowDateTime)) / 1e7
}

View file

@ -0,0 +1,357 @@
// Copyright 2017 The Prometheus Authors
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package promhttp
import (
"bufio"
"io"
"net"
"net/http"
)
const (
closeNotifier = 1 << iota
flusher
hijacker
readerFrom
pusher
)
type delegator interface {
http.ResponseWriter
Status() int
Written() int64
}
type responseWriterDelegator struct {
http.ResponseWriter
status int
written int64
wroteHeader bool
observeWriteHeader func(int)
}
func (r *responseWriterDelegator) Status() int {
return r.status
}
func (r *responseWriterDelegator) Written() int64 {
return r.written
}
func (r *responseWriterDelegator) WriteHeader(code int) {
r.status = code
r.wroteHeader = true
r.ResponseWriter.WriteHeader(code)
if r.observeWriteHeader != nil {
r.observeWriteHeader(code)
}
}
func (r *responseWriterDelegator) Write(b []byte) (int, error) {
if !r.wroteHeader {
r.WriteHeader(http.StatusOK)
}
n, err := r.ResponseWriter.Write(b)
r.written += int64(n)
return n, err
}
type closeNotifierDelegator struct{ *responseWriterDelegator }
type flusherDelegator struct{ *responseWriterDelegator }
type hijackerDelegator struct{ *responseWriterDelegator }
type readerFromDelegator struct{ *responseWriterDelegator }
type pusherDelegator struct{ *responseWriterDelegator }
func (d closeNotifierDelegator) CloseNotify() <-chan bool {
//lint:ignore SA1019 http.CloseNotifier is deprecated but we don't want to
//remove support from client_golang yet.
return d.ResponseWriter.(http.CloseNotifier).CloseNotify()
}
func (d flusherDelegator) Flush() {
d.ResponseWriter.(http.Flusher).Flush()
}
func (d hijackerDelegator) Hijack() (net.Conn, *bufio.ReadWriter, error) {
return d.ResponseWriter.(http.Hijacker).Hijack()
}
func (d readerFromDelegator) ReadFrom(re io.Reader) (int64, error) {
if !d.wroteHeader {
d.WriteHeader(http.StatusOK)
}
n, err := d.ResponseWriter.(io.ReaderFrom).ReadFrom(re)
d.written += n
return n, err
}
func (d pusherDelegator) Push(target string, opts *http.PushOptions) error {
return d.ResponseWriter.(http.Pusher).Push(target, opts)
}
var pickDelegator = make([]func(*responseWriterDelegator) delegator, 32)
func init() {
// TODO(beorn7): Code generation would help here.
pickDelegator[0] = func(d *responseWriterDelegator) delegator { // 0
return d
}
pickDelegator[closeNotifier] = func(d *responseWriterDelegator) delegator { // 1
return closeNotifierDelegator{d}
}
pickDelegator[flusher] = func(d *responseWriterDelegator) delegator { // 2
return flusherDelegator{d}
}
pickDelegator[flusher+closeNotifier] = func(d *responseWriterDelegator) delegator { // 3
return struct {
*responseWriterDelegator
http.Flusher
http.CloseNotifier
}{d, flusherDelegator{d}, closeNotifierDelegator{d}}
}
pickDelegator[hijacker] = func(d *responseWriterDelegator) delegator { // 4
return hijackerDelegator{d}
}
pickDelegator[hijacker+closeNotifier] = func(d *responseWriterDelegator) delegator { // 5
return struct {
*responseWriterDelegator
http.Hijacker
http.CloseNotifier
}{d, hijackerDelegator{d}, closeNotifierDelegator{d}}
}
pickDelegator[hijacker+flusher] = func(d *responseWriterDelegator) delegator { // 6
return struct {
*responseWriterDelegator
http.Hijacker
http.Flusher
}{d, hijackerDelegator{d}, flusherDelegator{d}}
}
pickDelegator[hijacker+flusher+closeNotifier] = func(d *responseWriterDelegator) delegator { // 7
return struct {
*responseWriterDelegator
http.Hijacker
http.Flusher
http.CloseNotifier
}{d, hijackerDelegator{d}, flusherDelegator{d}, closeNotifierDelegator{d}}
}
pickDelegator[readerFrom] = func(d *responseWriterDelegator) delegator { // 8
return readerFromDelegator{d}
}
pickDelegator[readerFrom+closeNotifier] = func(d *responseWriterDelegator) delegator { // 9
return struct {
*responseWriterDelegator
io.ReaderFrom
http.CloseNotifier
}{d, readerFromDelegator{d}, closeNotifierDelegator{d}}
}
pickDelegator[readerFrom+flusher] = func(d *responseWriterDelegator) delegator { // 10
return struct {
*responseWriterDelegator
io.ReaderFrom
http.Flusher
}{d, readerFromDelegator{d}, flusherDelegator{d}}
}
pickDelegator[readerFrom+flusher+closeNotifier] = func(d *responseWriterDelegator) delegator { // 11
return struct {
*responseWriterDelegator
io.ReaderFrom
http.Flusher
http.CloseNotifier
}{d, readerFromDelegator{d}, flusherDelegator{d}, closeNotifierDelegator{d}}
}
pickDelegator[readerFrom+hijacker] = func(d *responseWriterDelegator) delegator { // 12
return struct {
*responseWriterDelegator
io.ReaderFrom
http.Hijacker
}{d, readerFromDelegator{d}, hijackerDelegator{d}}
}
pickDelegator[readerFrom+hijacker+closeNotifier] = func(d *responseWriterDelegator) delegator { // 13
return struct {
*responseWriterDelegator
io.ReaderFrom
http.Hijacker
http.CloseNotifier
}{d, readerFromDelegator{d}, hijackerDelegator{d}, closeNotifierDelegator{d}}
}
pickDelegator[readerFrom+hijacker+flusher] = func(d *responseWriterDelegator) delegator { // 14
return struct {
*responseWriterDelegator
io.ReaderFrom
http.Hijacker
http.Flusher
}{d, readerFromDelegator{d}, hijackerDelegator{d}, flusherDelegator{d}}
}
pickDelegator[readerFrom+hijacker+flusher+closeNotifier] = func(d *responseWriterDelegator) delegator { // 15
return struct {
*responseWriterDelegator
io.ReaderFrom
http.Hijacker
http.Flusher
http.CloseNotifier
}{d, readerFromDelegator{d}, hijackerDelegator{d}, flusherDelegator{d}, closeNotifierDelegator{d}}
}
pickDelegator[pusher] = func(d *responseWriterDelegator) delegator { // 16
return pusherDelegator{d}
}
pickDelegator[pusher+closeNotifier] = func(d *responseWriterDelegator) delegator { // 17
return struct {
*responseWriterDelegator
http.Pusher
http.CloseNotifier
}{d, pusherDelegator{d}, closeNotifierDelegator{d}}
}
pickDelegator[pusher+flusher] = func(d *responseWriterDelegator) delegator { // 18
return struct {
*responseWriterDelegator
http.Pusher
http.Flusher
}{d, pusherDelegator{d}, flusherDelegator{d}}
}
pickDelegator[pusher+flusher+closeNotifier] = func(d *responseWriterDelegator) delegator { // 19
return struct {
*responseWriterDelegator
http.Pusher
http.Flusher
http.CloseNotifier
}{d, pusherDelegator{d}, flusherDelegator{d}, closeNotifierDelegator{d}}
}
pickDelegator[pusher+hijacker] = func(d *responseWriterDelegator) delegator { // 20
return struct {
*responseWriterDelegator
http.Pusher
http.Hijacker
}{d, pusherDelegator{d}, hijackerDelegator{d}}
}
pickDelegator[pusher+hijacker+closeNotifier] = func(d *responseWriterDelegator) delegator { // 21
return struct {
*responseWriterDelegator
http.Pusher
http.Hijacker
http.CloseNotifier
}{d, pusherDelegator{d}, hijackerDelegator{d}, closeNotifierDelegator{d}}
}
pickDelegator[pusher+hijacker+flusher] = func(d *responseWriterDelegator) delegator { // 22
return struct {
*responseWriterDelegator
http.Pusher
http.Hijacker
http.Flusher
}{d, pusherDelegator{d}, hijackerDelegator{d}, flusherDelegator{d}}
}
pickDelegator[pusher+hijacker+flusher+closeNotifier] = func(d *responseWriterDelegator) delegator { //23
return struct {
*responseWriterDelegator
http.Pusher
http.Hijacker
http.Flusher
http.CloseNotifier
}{d, pusherDelegator{d}, hijackerDelegator{d}, flusherDelegator{d}, closeNotifierDelegator{d}}
}
pickDelegator[pusher+readerFrom] = func(d *responseWriterDelegator) delegator { // 24
return struct {
*responseWriterDelegator
http.Pusher
io.ReaderFrom
}{d, pusherDelegator{d}, readerFromDelegator{d}}
}
pickDelegator[pusher+readerFrom+closeNotifier] = func(d *responseWriterDelegator) delegator { // 25
return struct {
*responseWriterDelegator
http.Pusher
io.ReaderFrom
http.CloseNotifier
}{d, pusherDelegator{d}, readerFromDelegator{d}, closeNotifierDelegator{d}}
}
pickDelegator[pusher+readerFrom+flusher] = func(d *responseWriterDelegator) delegator { // 26
return struct {
*responseWriterDelegator
http.Pusher
io.ReaderFrom
http.Flusher
}{d, pusherDelegator{d}, readerFromDelegator{d}, flusherDelegator{d}}
}
pickDelegator[pusher+readerFrom+flusher+closeNotifier] = func(d *responseWriterDelegator) delegator { // 27
return struct {
*responseWriterDelegator
http.Pusher
io.ReaderFrom
http.Flusher
http.CloseNotifier
}{d, pusherDelegator{d}, readerFromDelegator{d}, flusherDelegator{d}, closeNotifierDelegator{d}}
}
pickDelegator[pusher+readerFrom+hijacker] = func(d *responseWriterDelegator) delegator { // 28
return struct {
*responseWriterDelegator
http.Pusher
io.ReaderFrom
http.Hijacker
}{d, pusherDelegator{d}, readerFromDelegator{d}, hijackerDelegator{d}}
}
pickDelegator[pusher+readerFrom+hijacker+closeNotifier] = func(d *responseWriterDelegator) delegator { // 29
return struct {
*responseWriterDelegator
http.Pusher
io.ReaderFrom
http.Hijacker
http.CloseNotifier
}{d, pusherDelegator{d}, readerFromDelegator{d}, hijackerDelegator{d}, closeNotifierDelegator{d}}
}
pickDelegator[pusher+readerFrom+hijacker+flusher] = func(d *responseWriterDelegator) delegator { // 30
return struct {
*responseWriterDelegator
http.Pusher
io.ReaderFrom
http.Hijacker
http.Flusher
}{d, pusherDelegator{d}, readerFromDelegator{d}, hijackerDelegator{d}, flusherDelegator{d}}
}
pickDelegator[pusher+readerFrom+hijacker+flusher+closeNotifier] = func(d *responseWriterDelegator) delegator { // 31
return struct {
*responseWriterDelegator
http.Pusher
io.ReaderFrom
http.Hijacker
http.Flusher
http.CloseNotifier
}{d, pusherDelegator{d}, readerFromDelegator{d}, hijackerDelegator{d}, flusherDelegator{d}, closeNotifierDelegator{d}}
}
}
func newDelegator(w http.ResponseWriter, observeWriteHeaderFunc func(int)) delegator {
d := &responseWriterDelegator{
ResponseWriter: w,
observeWriteHeader: observeWriteHeaderFunc,
}
id := 0
//lint:ignore SA1019 http.CloseNotifier is deprecated but we don't want to
//remove support from client_golang yet.
if _, ok := w.(http.CloseNotifier); ok {
id += closeNotifier
}
if _, ok := w.(http.Flusher); ok {
id += flusher
}
if _, ok := w.(http.Hijacker); ok {
id += hijacker
}
if _, ok := w.(io.ReaderFrom); ok {
id += readerFrom
}
if _, ok := w.(http.Pusher); ok {
id += pusher
}
return pickDelegator[id](d)
}

View file

@ -11,31 +11,34 @@
// See the License for the specific language governing permissions and // See the License for the specific language governing permissions and
// limitations under the License. // limitations under the License.
// Copyright (c) 2013, The Prometheus Authors // Package promhttp provides tooling around HTTP servers and clients.
// All rights reserved.
// //
// Use of this source code is governed by a BSD-style license that can be found // First, the package allows the creation of http.Handler instances to expose
// in the LICENSE file. // Prometheus metrics via HTTP. promhttp.Handler acts on the
// prometheus.DefaultGatherer. With HandlerFor, you can create a handler for a
// Package promhttp contains functions to create http.Handler instances to // custom registry or anything that implements the Gatherer interface. It also
// expose Prometheus metrics via HTTP. In later versions of this package, it // allows the creation of handlers that act differently on errors or allow to
// will also contain tooling to instrument instances of http.Handler and // log errors.
// http.RoundTripper.
// //
// promhttp.Handler acts on the prometheus.DefaultGatherer. With HandlerFor, // Second, the package provides tooling to instrument instances of http.Handler
// you can create a handler for a custom registry or anything that implements // via middleware. Middleware wrappers follow the naming scheme
// the Gatherer interface. It also allows to create handlers that act // InstrumentHandlerX, where X describes the intended use of the middleware.
// differently on errors or allow to log errors. // See each function's doc comment for specific details.
//
// Finally, the package allows for an http.RoundTripper to be instrumented via
// middleware. Middleware wrappers follow the naming scheme
// InstrumentRoundTripperX, where X describes the intended use of the
// middleware. See each function's doc comment for specific details.
package promhttp package promhttp
import ( import (
"bytes"
"compress/gzip" "compress/gzip"
"fmt" "fmt"
"io" "io"
"net/http" "net/http"
"strings" "strings"
"sync" "sync"
"time"
"github.com/prometheus/common/expfmt" "github.com/prometheus/common/expfmt"
@ -44,99 +47,204 @@ import (
const ( const (
contentTypeHeader = "Content-Type" contentTypeHeader = "Content-Type"
contentLengthHeader = "Content-Length"
contentEncodingHeader = "Content-Encoding" contentEncodingHeader = "Content-Encoding"
acceptEncodingHeader = "Accept-Encoding" acceptEncodingHeader = "Accept-Encoding"
) )
var bufPool sync.Pool var gzipPool = sync.Pool{
New: func() interface{} {
func getBuf() *bytes.Buffer { return gzip.NewWriter(nil)
buf := bufPool.Get() },
if buf == nil {
return &bytes.Buffer{}
}
return buf.(*bytes.Buffer)
} }
func giveBuf(buf *bytes.Buffer) { // Handler returns an http.Handler for the prometheus.DefaultGatherer, using
buf.Reset() // default HandlerOpts, i.e. it reports the first error as an HTTP error, it has
bufPool.Put(buf) // no error logging, and it applies compression if requested by the client.
}
// Handler returns an HTTP handler for the prometheus.DefaultGatherer. The
// Handler uses the default HandlerOpts, i.e. report the first error as an HTTP
// error, no error logging, and compression if requested by the client.
// //
// If you want to create a Handler for the DefaultGatherer with different // The returned http.Handler is already instrumented using the
// HandlerOpts, create it with HandlerFor with prometheus.DefaultGatherer and // InstrumentMetricHandler function and the prometheus.DefaultRegisterer. If you
// your desired HandlerOpts. // create multiple http.Handlers by separate calls of the Handler function, the
// metrics used for instrumentation will be shared between them, providing
// global scrape counts.
//
// This function is meant to cover the bulk of basic use cases. If you are doing
// anything that requires more customization (including using a non-default
// Gatherer, different instrumentation, and non-default HandlerOpts), use the
// HandlerFor function. See there for details.
func Handler() http.Handler { func Handler() http.Handler {
return HandlerFor(prometheus.DefaultGatherer, HandlerOpts{}) return InstrumentMetricHandler(
prometheus.DefaultRegisterer, HandlerFor(prometheus.DefaultGatherer, HandlerOpts{}),
)
} }
// HandlerFor returns an http.Handler for the provided Gatherer. The behavior // HandlerFor returns an uninstrumented http.Handler for the provided
// of the Handler is defined by the provided HandlerOpts. // Gatherer. The behavior of the Handler is defined by the provided
// HandlerOpts. Thus, HandlerFor is useful to create http.Handlers for custom
// Gatherers, with non-default HandlerOpts, and/or with custom (or no)
// instrumentation. Use the InstrumentMetricHandler function to apply the same
// kind of instrumentation as it is used by the Handler function.
func HandlerFor(reg prometheus.Gatherer, opts HandlerOpts) http.Handler { func HandlerFor(reg prometheus.Gatherer, opts HandlerOpts) http.Handler {
return http.HandlerFunc(func(w http.ResponseWriter, req *http.Request) { var (
inFlightSem chan struct{}
errCnt = prometheus.NewCounterVec(
prometheus.CounterOpts{
Name: "promhttp_metric_handler_errors_total",
Help: "Total number of internal errors encountered by the promhttp metric handler.",
},
[]string{"cause"},
)
)
if opts.MaxRequestsInFlight > 0 {
inFlightSem = make(chan struct{}, opts.MaxRequestsInFlight)
}
if opts.Registry != nil {
// Initialize all possibilites that can occur below.
errCnt.WithLabelValues("gathering")
errCnt.WithLabelValues("encoding")
if err := opts.Registry.Register(errCnt); err != nil {
if are, ok := err.(prometheus.AlreadyRegisteredError); ok {
errCnt = are.ExistingCollector.(*prometheus.CounterVec)
} else {
panic(err)
}
}
}
h := http.HandlerFunc(func(rsp http.ResponseWriter, req *http.Request) {
if inFlightSem != nil {
select {
case inFlightSem <- struct{}{}: // All good, carry on.
defer func() { <-inFlightSem }()
default:
http.Error(rsp, fmt.Sprintf(
"Limit of concurrent requests reached (%d), try again later.", opts.MaxRequestsInFlight,
), http.StatusServiceUnavailable)
return
}
}
mfs, err := reg.Gather() mfs, err := reg.Gather()
if err != nil { if err != nil {
if opts.ErrorLog != nil { if opts.ErrorLog != nil {
opts.ErrorLog.Println("error gathering metrics:", err) opts.ErrorLog.Println("error gathering metrics:", err)
} }
errCnt.WithLabelValues("gathering").Inc()
switch opts.ErrorHandling { switch opts.ErrorHandling {
case PanicOnError: case PanicOnError:
panic(err) panic(err)
case ContinueOnError: case ContinueOnError:
if len(mfs) == 0 { if len(mfs) == 0 {
http.Error(w, "No metrics gathered, last error:\n\n"+err.Error(), http.StatusInternalServerError) // Still report the error if no metrics have been gathered.
httpError(rsp, err)
return return
} }
case HTTPErrorOnError: case HTTPErrorOnError:
http.Error(w, "An error has occurred during metrics gathering:\n\n"+err.Error(), http.StatusInternalServerError) httpError(rsp, err)
return return
} }
} }
contentType := expfmt.Negotiate(req.Header) contentType := expfmt.Negotiate(req.Header)
buf := getBuf() header := rsp.Header()
defer giveBuf(buf) header.Set(contentTypeHeader, string(contentType))
writer, encoding := decorateWriter(req, buf, opts.DisableCompression)
enc := expfmt.NewEncoder(writer, contentType) w := io.Writer(rsp)
if !opts.DisableCompression && gzipAccepted(req.Header) {
header.Set(contentEncodingHeader, "gzip")
gz := gzipPool.Get().(*gzip.Writer)
defer gzipPool.Put(gz)
gz.Reset(w)
defer gz.Close()
w = gz
}
enc := expfmt.NewEncoder(w, contentType)
var lastErr error var lastErr error
for _, mf := range mfs { for _, mf := range mfs {
if err := enc.Encode(mf); err != nil { if err := enc.Encode(mf); err != nil {
lastErr = err lastErr = err
if opts.ErrorLog != nil { if opts.ErrorLog != nil {
opts.ErrorLog.Println("error encoding metric family:", err) opts.ErrorLog.Println("error encoding and sending metric family:", err)
} }
errCnt.WithLabelValues("encoding").Inc()
switch opts.ErrorHandling { switch opts.ErrorHandling {
case PanicOnError: case PanicOnError:
panic(err) panic(err)
case ContinueOnError: case ContinueOnError:
// Handled later. // Handled later.
case HTTPErrorOnError: case HTTPErrorOnError:
http.Error(w, "An error has occurred during metrics encoding:\n\n"+err.Error(), http.StatusInternalServerError) httpError(rsp, err)
return return
} }
} }
} }
if closer, ok := writer.(io.Closer); ok {
closer.Close() if lastErr != nil {
httpError(rsp, lastErr)
} }
if lastErr != nil && buf.Len() == 0 {
http.Error(w, "No metrics encoded, last error:\n\n"+err.Error(), http.StatusInternalServerError)
return
}
header := w.Header()
header.Set(contentTypeHeader, string(contentType))
header.Set(contentLengthHeader, fmt.Sprint(buf.Len()))
if encoding != "" {
header.Set(contentEncodingHeader, encoding)
}
w.Write(buf.Bytes())
// TODO(beorn7): Consider streaming serving of metrics.
}) })
if opts.Timeout <= 0 {
return h
}
return http.TimeoutHandler(h, opts.Timeout, fmt.Sprintf(
"Exceeded configured timeout of %v.\n",
opts.Timeout,
))
}
// InstrumentMetricHandler is usually used with an http.Handler returned by the
// HandlerFor function. It instruments the provided http.Handler with two
// metrics: A counter vector "promhttp_metric_handler_requests_total" to count
// scrapes partitioned by HTTP status code, and a gauge
// "promhttp_metric_handler_requests_in_flight" to track the number of
// simultaneous scrapes. This function idempotently registers collectors for
// both metrics with the provided Registerer. It panics if the registration
// fails. The provided metrics are useful to see how many scrapes hit the
// monitored target (which could be from different Prometheus servers or other
// scrapers), and how often they overlap (which would result in more than one
// scrape in flight at the same time). Note that the scrapes-in-flight gauge
// will contain the scrape by which it is exposed, while the scrape counter will
// only get incremented after the scrape is complete (as only then the status
// code is known). For tracking scrape durations, use the
// "scrape_duration_seconds" gauge created by the Prometheus server upon each
// scrape.
func InstrumentMetricHandler(reg prometheus.Registerer, handler http.Handler) http.Handler {
cnt := prometheus.NewCounterVec(
prometheus.CounterOpts{
Name: "promhttp_metric_handler_requests_total",
Help: "Total number of scrapes by HTTP status code.",
},
[]string{"code"},
)
// Initialize the most likely HTTP status codes.
cnt.WithLabelValues("200")
cnt.WithLabelValues("500")
cnt.WithLabelValues("503")
if err := reg.Register(cnt); err != nil {
if are, ok := err.(prometheus.AlreadyRegisteredError); ok {
cnt = are.ExistingCollector.(*prometheus.CounterVec)
} else {
panic(err)
}
}
gge := prometheus.NewGauge(prometheus.GaugeOpts{
Name: "promhttp_metric_handler_requests_in_flight",
Help: "Current number of scrapes being served.",
})
if err := reg.Register(gge); err != nil {
if are, ok := err.(prometheus.AlreadyRegisteredError); ok {
gge = are.ExistingCollector.(prometheus.Gauge)
} else {
panic(err)
}
}
return InstrumentHandlerCounter(cnt, InstrumentHandlerInFlight(gge, handler))
} }
// HandlerErrorHandling defines how a Handler serving metrics will handle // HandlerErrorHandling defines how a Handler serving metrics will handle
@ -152,9 +260,12 @@ const (
// Ignore errors and try to serve as many metrics as possible. However, // Ignore errors and try to serve as many metrics as possible. However,
// if no metrics can be served, serve an HTTP status code 500 and the // if no metrics can be served, serve an HTTP status code 500 and the
// last error message in the body. Only use this in deliberate "best // last error message in the body. Only use this in deliberate "best
// effort" metrics collection scenarios. It is recommended to at least // effort" metrics collection scenarios. In this case, it is highly
// log errors (by providing an ErrorLog in HandlerOpts) to not mask // recommended to provide other means of detecting errors: By setting an
// errors completely. // ErrorLog in HandlerOpts, the errors are logged. By providing a
// Registry in HandlerOpts, the exposed metrics include an error counter
// "promhttp_metric_handler_errors_total", which can be used for
// alerts.
ContinueOnError ContinueOnError
// Panic upon the first error encountered (useful for "crash only" apps). // Panic upon the first error encountered (useful for "crash only" apps).
PanicOnError PanicOnError
@ -177,25 +288,62 @@ type HandlerOpts struct {
// logged regardless of the configured ErrorHandling provided ErrorLog // logged regardless of the configured ErrorHandling provided ErrorLog
// is not nil. // is not nil.
ErrorHandling HandlerErrorHandling ErrorHandling HandlerErrorHandling
// If Registry is not nil, it is used to register a metric
// "promhttp_metric_handler_errors_total", partitioned by "cause". A
// failed registration causes a panic. Note that this error counter is
// different from the instrumentation you get from the various
// InstrumentHandler... helpers. It counts errors that don't necessarily
// result in a non-2xx HTTP status code. There are two typical cases:
// (1) Encoding errors that only happen after streaming of the HTTP body
// has already started (and the status code 200 has been sent). This
// should only happen with custom collectors. (2) Collection errors with
// no effect on the HTTP status code because ErrorHandling is set to
// ContinueOnError.
Registry prometheus.Registerer
// If DisableCompression is true, the handler will never compress the // If DisableCompression is true, the handler will never compress the
// response, even if requested by the client. // response, even if requested by the client.
DisableCompression bool DisableCompression bool
// The number of concurrent HTTP requests is limited to
// MaxRequestsInFlight. Additional requests are responded to with 503
// Service Unavailable and a suitable message in the body. If
// MaxRequestsInFlight is 0 or negative, no limit is applied.
MaxRequestsInFlight int
// If handling a request takes longer than Timeout, it is responded to
// with 503 ServiceUnavailable and a suitable Message. No timeout is
// applied if Timeout is 0 or negative. Note that with the current
// implementation, reaching the timeout simply ends the HTTP requests as
// described above (and even that only if sending of the body hasn't
// started yet), while the bulk work of gathering all the metrics keeps
// running in the background (with the eventual result to be thrown
// away). Until the implementation is improved, it is recommended to
// implement a separate timeout in potentially slow Collectors.
Timeout time.Duration
} }
// decorateWriter wraps a writer to handle gzip compression if requested. It // gzipAccepted returns whether the client will accept gzip-encoded content.
// returns the decorated writer and the appropriate "Content-Encoding" header func gzipAccepted(header http.Header) bool {
// (which is empty if no compression is enabled). a := header.Get(acceptEncodingHeader)
func decorateWriter(request *http.Request, writer io.Writer, compressionDisabled bool) (io.Writer, string) { parts := strings.Split(a, ",")
if compressionDisabled {
return writer, ""
}
header := request.Header.Get(acceptEncodingHeader)
parts := strings.Split(header, ",")
for _, part := range parts { for _, part := range parts {
part := strings.TrimSpace(part) part = strings.TrimSpace(part)
if part == "gzip" || strings.HasPrefix(part, "gzip;") { if part == "gzip" || strings.HasPrefix(part, "gzip;") {
return gzip.NewWriter(writer), "gzip" return true
} }
} }
return writer, "" return false
}
// httpError removes any content-encoding header and then calls http.Error with
// the provided error and http.StatusInternalServerErrer. Error contents is
// supposed to be uncompressed plain text. However, same as with a plain
// http.Error, any header settings will be void if the header has already been
// sent. The error message will still be written to the writer, but it will
// probably be of limited use.
func httpError(rsp http.ResponseWriter, err error) {
rsp.Header().Del(contentEncodingHeader)
http.Error(
rsp,
"An error has occurred while serving metrics:\n\n"+err.Error(),
http.StatusInternalServerError,
)
} }

View file

@ -0,0 +1,219 @@
// Copyright 2017 The Prometheus Authors
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package promhttp
import (
"crypto/tls"
"net/http"
"net/http/httptrace"
"time"
"github.com/prometheus/client_golang/prometheus"
)
// The RoundTripperFunc type is an adapter to allow the use of ordinary
// functions as RoundTrippers. If f is a function with the appropriate
// signature, RountTripperFunc(f) is a RoundTripper that calls f.
type RoundTripperFunc func(req *http.Request) (*http.Response, error)
// RoundTrip implements the RoundTripper interface.
func (rt RoundTripperFunc) RoundTrip(r *http.Request) (*http.Response, error) {
return rt(r)
}
// InstrumentRoundTripperInFlight is a middleware that wraps the provided
// http.RoundTripper. It sets the provided prometheus.Gauge to the number of
// requests currently handled by the wrapped http.RoundTripper.
//
// See the example for ExampleInstrumentRoundTripperDuration for example usage.
func InstrumentRoundTripperInFlight(gauge prometheus.Gauge, next http.RoundTripper) RoundTripperFunc {
return RoundTripperFunc(func(r *http.Request) (*http.Response, error) {
gauge.Inc()
defer gauge.Dec()
return next.RoundTrip(r)
})
}
// InstrumentRoundTripperCounter is a middleware that wraps the provided
// http.RoundTripper to observe the request result with the provided CounterVec.
// The CounterVec must have zero, one, or two non-const non-curried labels. For
// those, the only allowed label names are "code" and "method". The function
// panics otherwise. Partitioning of the CounterVec happens by HTTP status code
// and/or HTTP method if the respective instance label names are present in the
// CounterVec. For unpartitioned counting, use a CounterVec with zero labels.
//
// If the wrapped RoundTripper panics or returns a non-nil error, the Counter
// is not incremented.
//
// See the example for ExampleInstrumentRoundTripperDuration for example usage.
func InstrumentRoundTripperCounter(counter *prometheus.CounterVec, next http.RoundTripper) RoundTripperFunc {
code, method := checkLabels(counter)
return RoundTripperFunc(func(r *http.Request) (*http.Response, error) {
resp, err := next.RoundTrip(r)
if err == nil {
counter.With(labels(code, method, r.Method, resp.StatusCode)).Inc()
}
return resp, err
})
}
// InstrumentRoundTripperDuration is a middleware that wraps the provided
// http.RoundTripper to observe the request duration with the provided
// ObserverVec. The ObserverVec must have zero, one, or two non-const
// non-curried labels. For those, the only allowed label names are "code" and
// "method". The function panics otherwise. The Observe method of the Observer
// in the ObserverVec is called with the request duration in
// seconds. Partitioning happens by HTTP status code and/or HTTP method if the
// respective instance label names are present in the ObserverVec. For
// unpartitioned observations, use an ObserverVec with zero labels. Note that
// partitioning of Histograms is expensive and should be used judiciously.
//
// If the wrapped RoundTripper panics or returns a non-nil error, no values are
// reported.
//
// Note that this method is only guaranteed to never observe negative durations
// if used with Go1.9+.
func InstrumentRoundTripperDuration(obs prometheus.ObserverVec, next http.RoundTripper) RoundTripperFunc {
code, method := checkLabels(obs)
return RoundTripperFunc(func(r *http.Request) (*http.Response, error) {
start := time.Now()
resp, err := next.RoundTrip(r)
if err == nil {
obs.With(labels(code, method, r.Method, resp.StatusCode)).Observe(time.Since(start).Seconds())
}
return resp, err
})
}
// InstrumentTrace is used to offer flexibility in instrumenting the available
// httptrace.ClientTrace hook functions. Each function is passed a float64
// representing the time in seconds since the start of the http request. A user
// may choose to use separately buckets Histograms, or implement custom
// instance labels on a per function basis.
type InstrumentTrace struct {
GotConn func(float64)
PutIdleConn func(float64)
GotFirstResponseByte func(float64)
Got100Continue func(float64)
DNSStart func(float64)
DNSDone func(float64)
ConnectStart func(float64)
ConnectDone func(float64)
TLSHandshakeStart func(float64)
TLSHandshakeDone func(float64)
WroteHeaders func(float64)
Wait100Continue func(float64)
WroteRequest func(float64)
}
// InstrumentRoundTripperTrace is a middleware that wraps the provided
// RoundTripper and reports times to hook functions provided in the
// InstrumentTrace struct. Hook functions that are not present in the provided
// InstrumentTrace struct are ignored. Times reported to the hook functions are
// time since the start of the request. Only with Go1.9+, those times are
// guaranteed to never be negative. (Earlier Go versions are not using a
// monotonic clock.) Note that partitioning of Histograms is expensive and
// should be used judiciously.
//
// For hook functions that receive an error as an argument, no observations are
// made in the event of a non-nil error value.
//
// See the example for ExampleInstrumentRoundTripperDuration for example usage.
func InstrumentRoundTripperTrace(it *InstrumentTrace, next http.RoundTripper) RoundTripperFunc {
return RoundTripperFunc(func(r *http.Request) (*http.Response, error) {
start := time.Now()
trace := &httptrace.ClientTrace{
GotConn: func(_ httptrace.GotConnInfo) {
if it.GotConn != nil {
it.GotConn(time.Since(start).Seconds())
}
},
PutIdleConn: func(err error) {
if err != nil {
return
}
if it.PutIdleConn != nil {
it.PutIdleConn(time.Since(start).Seconds())
}
},
DNSStart: func(_ httptrace.DNSStartInfo) {
if it.DNSStart != nil {
it.DNSStart(time.Since(start).Seconds())
}
},
DNSDone: func(_ httptrace.DNSDoneInfo) {
if it.DNSDone != nil {
it.DNSDone(time.Since(start).Seconds())
}
},
ConnectStart: func(_, _ string) {
if it.ConnectStart != nil {
it.ConnectStart(time.Since(start).Seconds())
}
},
ConnectDone: func(_, _ string, err error) {
if err != nil {
return
}
if it.ConnectDone != nil {
it.ConnectDone(time.Since(start).Seconds())
}
},
GotFirstResponseByte: func() {
if it.GotFirstResponseByte != nil {
it.GotFirstResponseByte(time.Since(start).Seconds())
}
},
Got100Continue: func() {
if it.Got100Continue != nil {
it.Got100Continue(time.Since(start).Seconds())
}
},
TLSHandshakeStart: func() {
if it.TLSHandshakeStart != nil {
it.TLSHandshakeStart(time.Since(start).Seconds())
}
},
TLSHandshakeDone: func(_ tls.ConnectionState, err error) {
if err != nil {
return
}
if it.TLSHandshakeDone != nil {
it.TLSHandshakeDone(time.Since(start).Seconds())
}
},
WroteHeaders: func() {
if it.WroteHeaders != nil {
it.WroteHeaders(time.Since(start).Seconds())
}
},
Wait100Continue: func() {
if it.Wait100Continue != nil {
it.Wait100Continue(time.Since(start).Seconds())
}
},
WroteRequest: func(_ httptrace.WroteRequestInfo) {
if it.WroteRequest != nil {
it.WroteRequest(time.Since(start).Seconds())
}
},
}
r = r.WithContext(httptrace.WithClientTrace(r.Context(), trace))
return next.RoundTrip(r)
})
}

View file

@ -0,0 +1,447 @@
// Copyright 2017 The Prometheus Authors
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package promhttp
import (
"errors"
"net/http"
"strconv"
"strings"
"time"
dto "github.com/prometheus/client_model/go"
"github.com/prometheus/client_golang/prometheus"
)
// magicString is used for the hacky label test in checkLabels. Remove once fixed.
const magicString = "zZgWfBxLqvG8kc8IMv3POi2Bb0tZI3vAnBx+gBaFi9FyPzB/CzKUer1yufDa"
// InstrumentHandlerInFlight is a middleware that wraps the provided
// http.Handler. It sets the provided prometheus.Gauge to the number of
// requests currently handled by the wrapped http.Handler.
//
// See the example for InstrumentHandlerDuration for example usage.
func InstrumentHandlerInFlight(g prometheus.Gauge, next http.Handler) http.Handler {
return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
g.Inc()
defer g.Dec()
next.ServeHTTP(w, r)
})
}
// InstrumentHandlerDuration is a middleware that wraps the provided
// http.Handler to observe the request duration with the provided ObserverVec.
// The ObserverVec must have zero, one, or two non-const non-curried labels. For
// those, the only allowed label names are "code" and "method". The function
// panics otherwise. The Observe method of the Observer in the ObserverVec is
// called with the request duration in seconds. Partitioning happens by HTTP
// status code and/or HTTP method if the respective instance label names are
// present in the ObserverVec. For unpartitioned observations, use an
// ObserverVec with zero labels. Note that partitioning of Histograms is
// expensive and should be used judiciously.
//
// If the wrapped Handler does not set a status code, a status code of 200 is assumed.
//
// If the wrapped Handler panics, no values are reported.
//
// Note that this method is only guaranteed to never observe negative durations
// if used with Go1.9+.
func InstrumentHandlerDuration(obs prometheus.ObserverVec, next http.Handler) http.HandlerFunc {
code, method := checkLabels(obs)
if code {
return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
now := time.Now()
d := newDelegator(w, nil)
next.ServeHTTP(d, r)
obs.With(labels(code, method, r.Method, d.Status())).Observe(time.Since(now).Seconds())
})
}
return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
now := time.Now()
next.ServeHTTP(w, r)
obs.With(labels(code, method, r.Method, 0)).Observe(time.Since(now).Seconds())
})
}
// InstrumentHandlerCounter is a middleware that wraps the provided http.Handler
// to observe the request result with the provided CounterVec. The CounterVec
// must have zero, one, or two non-const non-curried labels. For those, the only
// allowed label names are "code" and "method". The function panics
// otherwise. Partitioning of the CounterVec happens by HTTP status code and/or
// HTTP method if the respective instance label names are present in the
// CounterVec. For unpartitioned counting, use a CounterVec with zero labels.
//
// If the wrapped Handler does not set a status code, a status code of 200 is assumed.
//
// If the wrapped Handler panics, the Counter is not incremented.
//
// See the example for InstrumentHandlerDuration for example usage.
func InstrumentHandlerCounter(counter *prometheus.CounterVec, next http.Handler) http.HandlerFunc {
code, method := checkLabels(counter)
if code {
return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
d := newDelegator(w, nil)
next.ServeHTTP(d, r)
counter.With(labels(code, method, r.Method, d.Status())).Inc()
})
}
return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
next.ServeHTTP(w, r)
counter.With(labels(code, method, r.Method, 0)).Inc()
})
}
// InstrumentHandlerTimeToWriteHeader is a middleware that wraps the provided
// http.Handler to observe with the provided ObserverVec the request duration
// until the response headers are written. The ObserverVec must have zero, one,
// or two non-const non-curried labels. For those, the only allowed label names
// are "code" and "method". The function panics otherwise. The Observe method of
// the Observer in the ObserverVec is called with the request duration in
// seconds. Partitioning happens by HTTP status code and/or HTTP method if the
// respective instance label names are present in the ObserverVec. For
// unpartitioned observations, use an ObserverVec with zero labels. Note that
// partitioning of Histograms is expensive and should be used judiciously.
//
// If the wrapped Handler panics before calling WriteHeader, no value is
// reported.
//
// Note that this method is only guaranteed to never observe negative durations
// if used with Go1.9+.
//
// See the example for InstrumentHandlerDuration for example usage.
func InstrumentHandlerTimeToWriteHeader(obs prometheus.ObserverVec, next http.Handler) http.HandlerFunc {
code, method := checkLabels(obs)
return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
now := time.Now()
d := newDelegator(w, func(status int) {
obs.With(labels(code, method, r.Method, status)).Observe(time.Since(now).Seconds())
})
next.ServeHTTP(d, r)
})
}
// InstrumentHandlerRequestSize is a middleware that wraps the provided
// http.Handler to observe the request size with the provided ObserverVec. The
// ObserverVec must have zero, one, or two non-const non-curried labels. For
// those, the only allowed label names are "code" and "method". The function
// panics otherwise. The Observe method of the Observer in the ObserverVec is
// called with the request size in bytes. Partitioning happens by HTTP status
// code and/or HTTP method if the respective instance label names are present in
// the ObserverVec. For unpartitioned observations, use an ObserverVec with zero
// labels. Note that partitioning of Histograms is expensive and should be used
// judiciously.
//
// If the wrapped Handler does not set a status code, a status code of 200 is assumed.
//
// If the wrapped Handler panics, no values are reported.
//
// See the example for InstrumentHandlerDuration for example usage.
func InstrumentHandlerRequestSize(obs prometheus.ObserverVec, next http.Handler) http.HandlerFunc {
code, method := checkLabels(obs)
if code {
return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
d := newDelegator(w, nil)
next.ServeHTTP(d, r)
size := computeApproximateRequestSize(r)
obs.With(labels(code, method, r.Method, d.Status())).Observe(float64(size))
})
}
return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
next.ServeHTTP(w, r)
size := computeApproximateRequestSize(r)
obs.With(labels(code, method, r.Method, 0)).Observe(float64(size))
})
}
// InstrumentHandlerResponseSize is a middleware that wraps the provided
// http.Handler to observe the response size with the provided ObserverVec. The
// ObserverVec must have zero, one, or two non-const non-curried labels. For
// those, the only allowed label names are "code" and "method". The function
// panics otherwise. The Observe method of the Observer in the ObserverVec is
// called with the response size in bytes. Partitioning happens by HTTP status
// code and/or HTTP method if the respective instance label names are present in
// the ObserverVec. For unpartitioned observations, use an ObserverVec with zero
// labels. Note that partitioning of Histograms is expensive and should be used
// judiciously.
//
// If the wrapped Handler does not set a status code, a status code of 200 is assumed.
//
// If the wrapped Handler panics, no values are reported.
//
// See the example for InstrumentHandlerDuration for example usage.
func InstrumentHandlerResponseSize(obs prometheus.ObserverVec, next http.Handler) http.Handler {
code, method := checkLabels(obs)
return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
d := newDelegator(w, nil)
next.ServeHTTP(d, r)
obs.With(labels(code, method, r.Method, d.Status())).Observe(float64(d.Written()))
})
}
func checkLabels(c prometheus.Collector) (code bool, method bool) {
// TODO(beorn7): Remove this hacky way to check for instance labels
// once Descriptors can have their dimensionality queried.
var (
desc *prometheus.Desc
m prometheus.Metric
pm dto.Metric
lvs []string
)
// Get the Desc from the Collector.
descc := make(chan *prometheus.Desc, 1)
c.Describe(descc)
select {
case desc = <-descc:
default:
panic("no description provided by collector")
}
select {
case <-descc:
panic("more than one description provided by collector")
default:
}
close(descc)
// Create a ConstMetric with the Desc. Since we don't know how many
// variable labels there are, try for as long as it needs.
for err := errors.New("dummy"); err != nil; lvs = append(lvs, magicString) {
m, err = prometheus.NewConstMetric(desc, prometheus.UntypedValue, 0, lvs...)
}
// Write out the metric into a proto message and look at the labels.
// If the value is not the magicString, it is a constLabel, which doesn't interest us.
// If the label is curried, it doesn't interest us.
// In all other cases, only "code" or "method" is allowed.
if err := m.Write(&pm); err != nil {
panic("error checking metric for labels")
}
for _, label := range pm.Label {
name, value := label.GetName(), label.GetValue()
if value != magicString || isLabelCurried(c, name) {
continue
}
switch name {
case "code":
code = true
case "method":
method = true
default:
panic("metric partitioned with non-supported labels")
}
}
return
}
func isLabelCurried(c prometheus.Collector, label string) bool {
// This is even hackier than the label test above.
// We essentially try to curry again and see if it works.
// But for that, we need to type-convert to the two
// types we use here, ObserverVec or *CounterVec.
switch v := c.(type) {
case *prometheus.CounterVec:
if _, err := v.CurryWith(prometheus.Labels{label: "dummy"}); err == nil {
return false
}
case prometheus.ObserverVec:
if _, err := v.CurryWith(prometheus.Labels{label: "dummy"}); err == nil {
return false
}
default:
panic("unsupported metric vec type")
}
return true
}
// emptyLabels is a one-time allocation for non-partitioned metrics to avoid
// unnecessary allocations on each request.
var emptyLabels = prometheus.Labels{}
func labels(code, method bool, reqMethod string, status int) prometheus.Labels {
if !(code || method) {
return emptyLabels
}
labels := prometheus.Labels{}
if code {
labels["code"] = sanitizeCode(status)
}
if method {
labels["method"] = sanitizeMethod(reqMethod)
}
return labels
}
func computeApproximateRequestSize(r *http.Request) int {
s := 0
if r.URL != nil {
s += len(r.URL.String())
}
s += len(r.Method)
s += len(r.Proto)
for name, values := range r.Header {
s += len(name)
for _, value := range values {
s += len(value)
}
}
s += len(r.Host)
// N.B. r.Form and r.MultipartForm are assumed to be included in r.URL.
if r.ContentLength != -1 {
s += int(r.ContentLength)
}
return s
}
func sanitizeMethod(m string) string {
switch m {
case "GET", "get":
return "get"
case "PUT", "put":
return "put"
case "HEAD", "head":
return "head"
case "POST", "post":
return "post"
case "DELETE", "delete":
return "delete"
case "CONNECT", "connect":
return "connect"
case "OPTIONS", "options":
return "options"
case "NOTIFY", "notify":
return "notify"
default:
return strings.ToLower(m)
}
}
// If the wrapped http.Handler has not set a status code, i.e. the value is
// currently 0, santizeCode will return 200, for consistency with behavior in
// the stdlib.
func sanitizeCode(s int) string {
switch s {
case 100:
return "100"
case 101:
return "101"
case 200, 0:
return "200"
case 201:
return "201"
case 202:
return "202"
case 203:
return "203"
case 204:
return "204"
case 205:
return "205"
case 206:
return "206"
case 300:
return "300"
case 301:
return "301"
case 302:
return "302"
case 304:
return "304"
case 305:
return "305"
case 307:
return "307"
case 400:
return "400"
case 401:
return "401"
case 402:
return "402"
case 403:
return "403"
case 404:
return "404"
case 405:
return "405"
case 406:
return "406"
case 407:
return "407"
case 408:
return "408"
case 409:
return "409"
case 410:
return "410"
case 411:
return "411"
case 412:
return "412"
case 413:
return "413"
case 414:
return "414"
case 415:
return "415"
case 416:
return "416"
case 417:
return "417"
case 418:
return "418"
case 500:
return "500"
case 501:
return "501"
case 502:
return "502"
case 503:
return "503"
case 504:
return "504"
case 505:
return "505"
case 428:
return "428"
case 429:
return "429"
case 431:
return "431"
case 511:
return "511"
default:
return strconv.Itoa(s)
}
}

View file

@ -15,15 +15,22 @@ package prometheus
import ( import (
"bytes" "bytes"
"errors"
"fmt" "fmt"
"io/ioutil"
"os" "os"
"path/filepath"
"runtime"
"sort" "sort"
"strings"
"sync" "sync"
"unicode/utf8"
"github.com/golang/protobuf/proto" "github.com/golang/protobuf/proto"
"github.com/prometheus/common/expfmt"
dto "github.com/prometheus/client_model/go" dto "github.com/prometheus/client_model/go"
"github.com/prometheus/client_golang/prometheus/internal"
) )
const ( const (
@ -35,13 +42,14 @@ const (
// DefaultRegisterer and DefaultGatherer are the implementations of the // DefaultRegisterer and DefaultGatherer are the implementations of the
// Registerer and Gatherer interface a number of convenience functions in this // Registerer and Gatherer interface a number of convenience functions in this
// package act on. Initially, both variables point to the same Registry, which // package act on. Initially, both variables point to the same Registry, which
// has a process collector (see NewProcessCollector) and a Go collector (see // has a process collector (currently on Linux only, see NewProcessCollector)
// NewGoCollector) already registered. This approach to keep default instances // and a Go collector (see NewGoCollector, in particular the note about
// as global state mirrors the approach of other packages in the Go standard // stop-the-world implication with Go versions older than 1.9) already
// library. Note that there are caveats. Change the variables with caution and // registered. This approach to keep default instances as global state mirrors
// only if you understand the consequences. Users who want to avoid global state // the approach of other packages in the Go standard library. Note that there
// altogether should not use the convenience function and act on custom // are caveats. Change the variables with caution and only if you understand the
// instances instead. // consequences. Users who want to avoid global state altogether should not use
// the convenience functions and act on custom instances instead.
var ( var (
defaultRegistry = NewRegistry() defaultRegistry = NewRegistry()
DefaultRegisterer Registerer = defaultRegistry DefaultRegisterer Registerer = defaultRegistry
@ -49,7 +57,7 @@ var (
) )
func init() { func init() {
MustRegister(NewProcessCollector(os.Getpid(), "")) MustRegister(NewProcessCollector(ProcessCollectorOpts{}))
MustRegister(NewGoCollector()) MustRegister(NewGoCollector())
} }
@ -65,7 +73,8 @@ func NewRegistry() *Registry {
// NewPedanticRegistry returns a registry that checks during collection if each // NewPedanticRegistry returns a registry that checks during collection if each
// collected Metric is consistent with its reported Desc, and if the Desc has // collected Metric is consistent with its reported Desc, and if the Desc has
// actually been registered with the registry. // actually been registered with the registry. Unchecked Collectors (those whose
// Describe methed does not yield any descriptors) are excluded from the check.
// //
// Usually, a Registry will be happy as long as the union of all collected // Usually, a Registry will be happy as long as the union of all collected
// Metrics is consistent and valid even if some metrics are not consistent with // Metrics is consistent and valid even if some metrics are not consistent with
@ -80,7 +89,7 @@ func NewPedanticRegistry() *Registry {
// Registerer is the interface for the part of a registry in charge of // Registerer is the interface for the part of a registry in charge of
// registering and unregistering. Users of custom registries should use // registering and unregistering. Users of custom registries should use
// Registerer as type for registration purposes (rather then the Registry type // Registerer as type for registration purposes (rather than the Registry type
// directly). In that way, they are free to use custom Registerer implementation // directly). In that way, they are free to use custom Registerer implementation
// (e.g. for testing purposes). // (e.g. for testing purposes).
type Registerer interface { type Registerer interface {
@ -95,8 +104,13 @@ type Registerer interface {
// returned error is an instance of AlreadyRegisteredError, which // returned error is an instance of AlreadyRegisteredError, which
// contains the previously registered Collector. // contains the previously registered Collector.
// //
// It is in general not safe to register the same Collector multiple // A Collector whose Describe method does not yield any Desc is treated
// times concurrently. // as unchecked. Registration will always succeed. No check for
// re-registering (see previous paragraph) is performed. Thus, the
// caller is responsible for not double-registering the same unchecked
// Collector, and for providing a Collector that will not cause
// inconsistent metrics on collection. (This would lead to scrape
// errors.)
Register(Collector) error Register(Collector) error
// MustRegister works like Register but registers any number of // MustRegister works like Register but registers any number of
// Collectors and panics upon the first registration that causes an // Collectors and panics upon the first registration that causes an
@ -105,7 +119,9 @@ type Registerer interface {
// Unregister unregisters the Collector that equals the Collector passed // Unregister unregisters the Collector that equals the Collector passed
// in as an argument. (Two Collectors are considered equal if their // in as an argument. (Two Collectors are considered equal if their
// Describe method yields the same set of descriptors.) The function // Describe method yields the same set of descriptors.) The function
// returns whether a Collector was unregistered. // returns whether a Collector was unregistered. Note that an unchecked
// Collector cannot be unregistered (as its Describe method does not
// yield any descriptor).
// //
// Note that even after unregistering, it will not be possible to // Note that even after unregistering, it will not be possible to
// register a new Collector that is inconsistent with the unregistered // register a new Collector that is inconsistent with the unregistered
@ -123,15 +139,23 @@ type Registerer interface {
type Gatherer interface { type Gatherer interface {
// Gather calls the Collect method of the registered Collectors and then // Gather calls the Collect method of the registered Collectors and then
// gathers the collected metrics into a lexicographically sorted slice // gathers the collected metrics into a lexicographically sorted slice
// of MetricFamily protobufs. Even if an error occurs, Gather attempts // of uniquely named MetricFamily protobufs. Gather ensures that the
// to gather as many metrics as possible. Hence, if a non-nil error is // returned slice is valid and self-consistent so that it can be used
// returned, the returned MetricFamily slice could be nil (in case of a // for valid exposition. As an exception to the strict consistency
// fatal error that prevented any meaningful metric collection) or // requirements described for metric.Desc, Gather will tolerate
// contain a number of MetricFamily protobufs, some of which might be // different sets of label names for metrics of the same metric family.
// incomplete, and some might be missing altogether. The returned error //
// (which might be a MultiError) explains the details. In scenarios // Even if an error occurs, Gather attempts to gather as many metrics as
// where complete collection is critical, the returned MetricFamily // possible. Hence, if a non-nil error is returned, the returned
// protobufs should be disregarded if the returned error is non-nil. // MetricFamily slice could be nil (in case of a fatal error that
// prevented any meaningful metric collection) or contain a number of
// MetricFamily protobufs, some of which might be incomplete, and some
// might be missing altogether. The returned error (which might be a
// MultiError) explains the details. Note that this is mostly useful for
// debugging purposes. If the gathered protobufs are to be used for
// exposition in actual monitoring, it is almost always better to not
// expose an incomplete result and instead disregard the returned
// MetricFamily protobufs in case the returned error is non-nil.
Gather() ([]*dto.MetricFamily, error) Gather() ([]*dto.MetricFamily, error)
} }
@ -201,6 +225,13 @@ func (errs MultiError) Error() string {
return buf.String() return buf.String()
} }
// Append appends the provided error if it is not nil.
func (errs *MultiError) Append(err error) {
if err != nil {
*errs = append(*errs, err)
}
}
// MaybeUnwrap returns nil if len(errs) is 0. It returns the first and only // MaybeUnwrap returns nil if len(errs) is 0. It returns the first and only
// contained error as error if len(errs is 1). In all other cases, it returns // contained error as error if len(errs is 1). In all other cases, it returns
// the MultiError directly. This is helpful for returning a MultiError in a way // the MultiError directly. This is helpful for returning a MultiError in a way
@ -225,6 +256,7 @@ type Registry struct {
collectorsByID map[uint64]Collector // ID is a hash of the descIDs. collectorsByID map[uint64]Collector // ID is a hash of the descIDs.
descIDs map[uint64]struct{} descIDs map[uint64]struct{}
dimHashesByName map[string]uint64 dimHashesByName map[string]uint64
uncheckedCollectors []Collector
pedanticChecksEnabled bool pedanticChecksEnabled bool
} }
@ -242,7 +274,12 @@ func (r *Registry) Register(c Collector) error {
close(descChan) close(descChan)
}() }()
r.mtx.Lock() r.mtx.Lock()
defer r.mtx.Unlock() defer func() {
// Drain channel in case of premature return to not leak a goroutine.
for range descChan {
}
r.mtx.Unlock()
}()
// Conduct various tests... // Conduct various tests...
for desc := range descChan { for desc := range descChan {
@ -282,15 +319,24 @@ func (r *Registry) Register(c Collector) error {
} }
} }
} }
// Did anything happen at all? // A Collector yielding no Desc at all is considered unchecked.
if len(newDescIDs) == 0 { if len(newDescIDs) == 0 {
return errors.New("collector has no descriptors") r.uncheckedCollectors = append(r.uncheckedCollectors, c)
return nil
} }
if existing, exists := r.collectorsByID[collectorID]; exists { if existing, exists := r.collectorsByID[collectorID]; exists {
switch e := existing.(type) {
case *wrappingCollector:
return AlreadyRegisteredError{ return AlreadyRegisteredError{
ExistingCollector: existing, ExistingCollector: e.unwrapRecursively(),
NewCollector: c, NewCollector: c,
} }
default:
return AlreadyRegisteredError{
ExistingCollector: e,
NewCollector: c,
}
}
} }
// If the collectorID is new, but at least one of the descs existed // If the collectorID is new, but at least one of the descs existed
// before, we are in trouble. // before, we are in trouble.
@ -358,31 +404,25 @@ func (r *Registry) MustRegister(cs ...Collector) {
// Gather implements Gatherer. // Gather implements Gatherer.
func (r *Registry) Gather() ([]*dto.MetricFamily, error) { func (r *Registry) Gather() ([]*dto.MetricFamily, error) {
var ( var (
metricChan = make(chan Metric, capMetricChan) checkedMetricChan = make(chan Metric, capMetricChan)
uncheckedMetricChan = make(chan Metric, capMetricChan)
metricHashes = map[uint64]struct{}{} metricHashes = map[uint64]struct{}{}
dimHashes = map[string]uint64{}
wg sync.WaitGroup wg sync.WaitGroup
errs MultiError // The collected errors to return in the end. errs MultiError // The collected errors to return in the end.
registeredDescIDs map[uint64]struct{} // Only used for pedantic checks registeredDescIDs map[uint64]struct{} // Only used for pedantic checks
) )
r.mtx.RLock() r.mtx.RLock()
goroutineBudget := len(r.collectorsByID) + len(r.uncheckedCollectors)
metricFamiliesByName := make(map[string]*dto.MetricFamily, len(r.dimHashesByName)) metricFamiliesByName := make(map[string]*dto.MetricFamily, len(r.dimHashesByName))
checkedCollectors := make(chan Collector, len(r.collectorsByID))
// Scatter. uncheckedCollectors := make(chan Collector, len(r.uncheckedCollectors))
// (Collectors could be complex and slow, so we call them all at once.)
wg.Add(len(r.collectorsByID))
go func() {
wg.Wait()
close(metricChan)
}()
for _, collector := range r.collectorsByID { for _, collector := range r.collectorsByID {
go func(collector Collector) { checkedCollectors <- collector
defer wg.Done() }
collector.Collect(metricChan) for _, collector := range r.uncheckedCollectors {
}(collector) uncheckedCollectors <- collector
} }
// In case pedantic checks are enabled, we have to copy the map before // In case pedantic checks are enabled, we have to copy the map before
// giving up the RLock. // giving up the RLock.
if r.pedanticChecksEnabled { if r.pedanticChecksEnabled {
@ -391,83 +431,218 @@ func (r *Registry) Gather() ([]*dto.MetricFamily, error) {
registeredDescIDs[id] = struct{}{} registeredDescIDs[id] = struct{}{}
} }
} }
r.mtx.RUnlock() r.mtx.RUnlock()
// Drain metricChan in case of premature return. wg.Add(goroutineBudget)
collectWorker := func() {
for {
select {
case collector := <-checkedCollectors:
collector.Collect(checkedMetricChan)
case collector := <-uncheckedCollectors:
collector.Collect(uncheckedMetricChan)
default:
return
}
wg.Done()
}
}
// Start the first worker now to make sure at least one is running.
go collectWorker()
goroutineBudget--
// Close checkedMetricChan and uncheckedMetricChan once all collectors
// are collected.
go func() {
wg.Wait()
close(checkedMetricChan)
close(uncheckedMetricChan)
}()
// Drain checkedMetricChan and uncheckedMetricChan in case of premature return.
defer func() { defer func() {
for range metricChan { if checkedMetricChan != nil {
for range checkedMetricChan {
}
}
if uncheckedMetricChan != nil {
for range uncheckedMetricChan {
}
} }
}() }()
// Gather. // Copy the channel references so we can nil them out later to remove
for metric := range metricChan { // them from the select statements below.
// This could be done concurrently, too, but it required locking cmc := checkedMetricChan
// of metricFamiliesByName (and of metricHashes if checks are umc := uncheckedMetricChan
// enabled). Most likely not worth it.
for {
select {
case metric, ok := <-cmc:
if !ok {
cmc = nil
break
}
errs.Append(processMetric(
metric, metricFamiliesByName,
metricHashes,
registeredDescIDs,
))
case metric, ok := <-umc:
if !ok {
umc = nil
break
}
errs.Append(processMetric(
metric, metricFamiliesByName,
metricHashes,
nil,
))
default:
if goroutineBudget <= 0 || len(checkedCollectors)+len(uncheckedCollectors) == 0 {
// All collectors are already being worked on or
// we have already as many goroutines started as
// there are collectors. Do the same as above,
// just without the default.
select {
case metric, ok := <-cmc:
if !ok {
cmc = nil
break
}
errs.Append(processMetric(
metric, metricFamiliesByName,
metricHashes,
registeredDescIDs,
))
case metric, ok := <-umc:
if !ok {
umc = nil
break
}
errs.Append(processMetric(
metric, metricFamiliesByName,
metricHashes,
nil,
))
}
break
}
// Start more workers.
go collectWorker()
goroutineBudget--
runtime.Gosched()
}
// Once both checkedMetricChan and uncheckdMetricChan are closed
// and drained, the contraption above will nil out cmc and umc,
// and then we can leave the collect loop here.
if cmc == nil && umc == nil {
break
}
}
return internal.NormalizeMetricFamilies(metricFamiliesByName), errs.MaybeUnwrap()
}
// WriteToTextfile calls Gather on the provided Gatherer, encodes the result in the
// Prometheus text format, and writes it to a temporary file. Upon success, the
// temporary file is renamed to the provided filename.
//
// This is intended for use with the textfile collector of the node exporter.
// Note that the node exporter expects the filename to be suffixed with ".prom".
func WriteToTextfile(filename string, g Gatherer) error {
tmp, err := ioutil.TempFile(filepath.Dir(filename), filepath.Base(filename))
if err != nil {
return err
}
defer os.Remove(tmp.Name())
mfs, err := g.Gather()
if err != nil {
return err
}
for _, mf := range mfs {
if _, err := expfmt.MetricFamilyToText(tmp, mf); err != nil {
return err
}
}
if err := tmp.Close(); err != nil {
return err
}
if err := os.Chmod(tmp.Name(), 0644); err != nil {
return err
}
return os.Rename(tmp.Name(), filename)
}
// processMetric is an internal helper method only used by the Gather method.
func processMetric(
metric Metric,
metricFamiliesByName map[string]*dto.MetricFamily,
metricHashes map[uint64]struct{},
registeredDescIDs map[uint64]struct{},
) error {
desc := metric.Desc() desc := metric.Desc()
// Wrapped metrics collected by an unchecked Collector can have an
// invalid Desc.
if desc.err != nil {
return desc.err
}
dtoMetric := &dto.Metric{} dtoMetric := &dto.Metric{}
if err := metric.Write(dtoMetric); err != nil { if err := metric.Write(dtoMetric); err != nil {
errs = append(errs, fmt.Errorf( return fmt.Errorf("error collecting metric %v: %s", desc, err)
"error collecting metric %v: %s", desc, err,
))
continue
} }
metricFamily, ok := metricFamiliesByName[desc.fqName] metricFamily, ok := metricFamiliesByName[desc.fqName]
if ok { if ok { // Existing name.
if metricFamily.GetHelp() != desc.help { if metricFamily.GetHelp() != desc.help {
errs = append(errs, fmt.Errorf( return fmt.Errorf(
"collected metric %s %s has help %q but should have %q", "collected metric %s %s has help %q but should have %q",
desc.fqName, dtoMetric, desc.help, metricFamily.GetHelp(), desc.fqName, dtoMetric, desc.help, metricFamily.GetHelp(),
)) )
continue
} }
// TODO(beorn7): Simplify switch once Desc has type. // TODO(beorn7): Simplify switch once Desc has type.
switch metricFamily.GetType() { switch metricFamily.GetType() {
case dto.MetricType_COUNTER: case dto.MetricType_COUNTER:
if dtoMetric.Counter == nil { if dtoMetric.Counter == nil {
errs = append(errs, fmt.Errorf( return fmt.Errorf(
"collected metric %s %s should be a Counter", "collected metric %s %s should be a Counter",
desc.fqName, dtoMetric, desc.fqName, dtoMetric,
)) )
continue
} }
case dto.MetricType_GAUGE: case dto.MetricType_GAUGE:
if dtoMetric.Gauge == nil { if dtoMetric.Gauge == nil {
errs = append(errs, fmt.Errorf( return fmt.Errorf(
"collected metric %s %s should be a Gauge", "collected metric %s %s should be a Gauge",
desc.fqName, dtoMetric, desc.fqName, dtoMetric,
)) )
continue
} }
case dto.MetricType_SUMMARY: case dto.MetricType_SUMMARY:
if dtoMetric.Summary == nil { if dtoMetric.Summary == nil {
errs = append(errs, fmt.Errorf( return fmt.Errorf(
"collected metric %s %s should be a Summary", "collected metric %s %s should be a Summary",
desc.fqName, dtoMetric, desc.fqName, dtoMetric,
)) )
continue
} }
case dto.MetricType_UNTYPED: case dto.MetricType_UNTYPED:
if dtoMetric.Untyped == nil { if dtoMetric.Untyped == nil {
errs = append(errs, fmt.Errorf( return fmt.Errorf(
"collected metric %s %s should be Untyped", "collected metric %s %s should be Untyped",
desc.fqName, dtoMetric, desc.fqName, dtoMetric,
)) )
continue
} }
case dto.MetricType_HISTOGRAM: case dto.MetricType_HISTOGRAM:
if dtoMetric.Histogram == nil { if dtoMetric.Histogram == nil {
errs = append(errs, fmt.Errorf( return fmt.Errorf(
"collected metric %s %s should be a Histogram", "collected metric %s %s should be a Histogram",
desc.fqName, dtoMetric, desc.fqName, dtoMetric,
)) )
continue
} }
default: default:
panic("encountered MetricFamily with invalid type") panic("encountered MetricFamily with invalid type")
} }
} else { } else { // New name.
metricFamily = &dto.MetricFamily{} metricFamily = &dto.MetricFamily{}
metricFamily.Name = proto.String(desc.fqName) metricFamily.Name = proto.String(desc.fqName)
metricFamily.Help = proto.String(desc.help) metricFamily.Help = proto.String(desc.help)
@ -484,40 +659,36 @@ func (r *Registry) Gather() ([]*dto.MetricFamily, error) {
case dtoMetric.Histogram != nil: case dtoMetric.Histogram != nil:
metricFamily.Type = dto.MetricType_HISTOGRAM.Enum() metricFamily.Type = dto.MetricType_HISTOGRAM.Enum()
default: default:
errs = append(errs, fmt.Errorf( return fmt.Errorf("empty metric collected: %s", dtoMetric)
"empty metric collected: %s", dtoMetric, }
)) if err := checkSuffixCollisions(metricFamily, metricFamiliesByName); err != nil {
continue return err
} }
metricFamiliesByName[desc.fqName] = metricFamily metricFamiliesByName[desc.fqName] = metricFamily
} }
if err := checkMetricConsistency(metricFamily, dtoMetric, metricHashes, dimHashes); err != nil { if err := checkMetricConsistency(metricFamily, dtoMetric, metricHashes); err != nil {
errs = append(errs, err) return err
continue
} }
if r.pedanticChecksEnabled { if registeredDescIDs != nil {
// Is the desc registered at all? // Is the desc registered at all?
if _, exist := registeredDescIDs[desc.id]; !exist { if _, exist := registeredDescIDs[desc.id]; !exist {
errs = append(errs, fmt.Errorf( return fmt.Errorf(
"collected metric %s %s with unregistered descriptor %s", "collected metric %s %s with unregistered descriptor %s",
metricFamily.GetName(), dtoMetric, desc, metricFamily.GetName(), dtoMetric, desc,
)) )
continue
} }
if err := checkDescConsistency(metricFamily, dtoMetric, desc); err != nil { if err := checkDescConsistency(metricFamily, dtoMetric, desc); err != nil {
errs = append(errs, err) return err
continue
} }
} }
metricFamily.Metric = append(metricFamily.Metric, dtoMetric) metricFamily.Metric = append(metricFamily.Metric, dtoMetric)
} return nil
return normalizeMetricFamilies(metricFamiliesByName), errs.MaybeUnwrap()
} }
// Gatherers is a slice of Gatherer instances that implements the Gatherer // Gatherers is a slice of Gatherer instances that implements the Gatherer
// interface itself. Its Gather method calls Gather on all Gatherers in the // interface itself. Its Gather method calls Gather on all Gatherers in the
// slice in order and returns the merged results. Errors returned from the // slice in order and returns the merged results. Errors returned from the
// Gather calles are all returned in a flattened MultiError. Duplicate and // Gather calls are all returned in a flattened MultiError. Duplicate and
// inconsistent Metrics are skipped (first occurrence in slice order wins) and // inconsistent Metrics are skipped (first occurrence in slice order wins) and
// reported in the returned error. // reported in the returned error.
// //
@ -537,7 +708,6 @@ func (gs Gatherers) Gather() ([]*dto.MetricFamily, error) {
var ( var (
metricFamiliesByName = map[string]*dto.MetricFamily{} metricFamiliesByName = map[string]*dto.MetricFamily{}
metricHashes = map[uint64]struct{}{} metricHashes = map[uint64]struct{}{}
dimHashes = map[string]uint64{}
errs MultiError // The collected errors to return in the end. errs MultiError // The collected errors to return in the end.
) )
@ -574,10 +744,14 @@ func (gs Gatherers) Gather() ([]*dto.MetricFamily, error) {
existingMF.Name = mf.Name existingMF.Name = mf.Name
existingMF.Help = mf.Help existingMF.Help = mf.Help
existingMF.Type = mf.Type existingMF.Type = mf.Type
if err := checkSuffixCollisions(existingMF, metricFamiliesByName); err != nil {
errs = append(errs, err)
continue
}
metricFamiliesByName[mf.GetName()] = existingMF metricFamiliesByName[mf.GetName()] = existingMF
} }
for _, m := range mf.Metric { for _, m := range mf.Metric {
if err := checkMetricConsistency(existingMF, m, metricHashes, dimHashes); err != nil { if err := checkMetricConsistency(existingMF, m, metricHashes); err != nil {
errs = append(errs, err) errs = append(errs, err)
continue continue
} }
@ -585,88 +759,80 @@ func (gs Gatherers) Gather() ([]*dto.MetricFamily, error) {
} }
} }
} }
return normalizeMetricFamilies(metricFamiliesByName), errs.MaybeUnwrap() return internal.NormalizeMetricFamilies(metricFamiliesByName), errs.MaybeUnwrap()
} }
// metricSorter is a sortable slice of *dto.Metric. // checkSuffixCollisions checks for collisions with the “magic” suffixes the
type metricSorter []*dto.Metric // Prometheus text format and the internal metric representation of the
// Prometheus server add while flattening Summaries and Histograms.
func (s metricSorter) Len() int { func checkSuffixCollisions(mf *dto.MetricFamily, mfs map[string]*dto.MetricFamily) error {
return len(s) var (
newName = mf.GetName()
newType = mf.GetType()
newNameWithoutSuffix = ""
)
switch {
case strings.HasSuffix(newName, "_count"):
newNameWithoutSuffix = newName[:len(newName)-6]
case strings.HasSuffix(newName, "_sum"):
newNameWithoutSuffix = newName[:len(newName)-4]
case strings.HasSuffix(newName, "_bucket"):
newNameWithoutSuffix = newName[:len(newName)-7]
} }
if newNameWithoutSuffix != "" {
func (s metricSorter) Swap(i, j int) { if existingMF, ok := mfs[newNameWithoutSuffix]; ok {
s[i], s[j] = s[j], s[i] switch existingMF.GetType() {
case dto.MetricType_SUMMARY:
if !strings.HasSuffix(newName, "_bucket") {
return fmt.Errorf(
"collected metric named %q collides with previously collected summary named %q",
newName, newNameWithoutSuffix,
)
} }
case dto.MetricType_HISTOGRAM:
func (s metricSorter) Less(i, j int) bool { return fmt.Errorf(
if len(s[i].Label) != len(s[j].Label) { "collected metric named %q collides with previously collected histogram named %q",
// This should not happen. The metrics are newName, newNameWithoutSuffix,
// inconsistent. However, we have to deal with the fact, as )
// people might use custom collectors or metric family injection
// to create inconsistent metrics. So let's simply compare the
// number of labels in this case. That will still yield
// reproducible sorting.
return len(s[i].Label) < len(s[j].Label)
}
for n, lp := range s[i].Label {
vi := lp.GetValue()
vj := s[j].Label[n].GetValue()
if vi != vj {
return vi < vj
} }
} }
// We should never arrive here. Multiple metrics with the same
// label set in the same scrape will lead to undefined ingestion
// behavior. However, as above, we have to provide stable sorting
// here, even for inconsistent metrics. So sort equal metrics
// by their timestamp, with missing timestamps (implying "now")
// coming last.
if s[i].TimestampMs == nil {
return false
} }
if s[j].TimestampMs == nil { if newType == dto.MetricType_SUMMARY || newType == dto.MetricType_HISTOGRAM {
return true if _, ok := mfs[newName+"_count"]; ok {
return fmt.Errorf(
"collected histogram or summary named %q collides with previously collected metric named %q",
newName, newName+"_count",
)
} }
return s[i].GetTimestampMs() < s[j].GetTimestampMs() if _, ok := mfs[newName+"_sum"]; ok {
} return fmt.Errorf(
"collected histogram or summary named %q collides with previously collected metric named %q",
// normalizeMetricFamilies returns a MetricFamily slice with empty newName, newName+"_sum",
// MetricFamilies pruned and the remaining MetricFamilies sorted by name within )
// the slice, with the contained Metrics sorted within each MetricFamily.
func normalizeMetricFamilies(metricFamiliesByName map[string]*dto.MetricFamily) []*dto.MetricFamily {
for _, mf := range metricFamiliesByName {
sort.Sort(metricSorter(mf.Metric))
}
names := make([]string, 0, len(metricFamiliesByName))
for name, mf := range metricFamiliesByName {
if len(mf.Metric) > 0 {
names = append(names, name)
} }
} }
sort.Strings(names) if newType == dto.MetricType_HISTOGRAM {
result := make([]*dto.MetricFamily, 0, len(names)) if _, ok := mfs[newName+"_bucket"]; ok {
for _, name := range names { return fmt.Errorf(
result = append(result, metricFamiliesByName[name]) "collected histogram named %q collides with previously collected metric named %q",
newName, newName+"_bucket",
)
} }
return result }
return nil
} }
// checkMetricConsistency checks if the provided Metric is consistent with the // checkMetricConsistency checks if the provided Metric is consistent with the
// provided MetricFamily. It also hashed the Metric labels and the MetricFamily // provided MetricFamily. It also hashes the Metric labels and the MetricFamily
// name. If the resulting hash is alread in the provided metricHashes, an error // name. If the resulting hash is already in the provided metricHashes, an error
// is returned. If not, it is added to metricHashes. The provided dimHashes maps // is returned. If not, it is added to metricHashes.
// MetricFamily names to their dimHash (hashed sorted label names). If dimHashes
// doesn't yet contain a hash for the provided MetricFamily, it is
// added. Otherwise, an error is returned if the existing dimHashes in not equal
// the calculated dimHash.
func checkMetricConsistency( func checkMetricConsistency(
metricFamily *dto.MetricFamily, metricFamily *dto.MetricFamily,
dtoMetric *dto.Metric, dtoMetric *dto.Metric,
metricHashes map[uint64]struct{}, metricHashes map[uint64]struct{},
dimHashes map[string]uint64,
) error { ) error {
name := metricFamily.GetName()
// Type consistency with metric family. // Type consistency with metric family.
if metricFamily.GetType() == dto.MetricType_GAUGE && dtoMetric.Gauge == nil || if metricFamily.GetType() == dto.MetricType_GAUGE && dtoMetric.Gauge == nil ||
metricFamily.GetType() == dto.MetricType_COUNTER && dtoMetric.Counter == nil || metricFamily.GetType() == dto.MetricType_COUNTER && dtoMetric.Counter == nil ||
@ -674,41 +840,65 @@ func checkMetricConsistency(
metricFamily.GetType() == dto.MetricType_HISTOGRAM && dtoMetric.Histogram == nil || metricFamily.GetType() == dto.MetricType_HISTOGRAM && dtoMetric.Histogram == nil ||
metricFamily.GetType() == dto.MetricType_UNTYPED && dtoMetric.Untyped == nil { metricFamily.GetType() == dto.MetricType_UNTYPED && dtoMetric.Untyped == nil {
return fmt.Errorf( return fmt.Errorf(
"collected metric %s %s is not a %s", "collected metric %q { %s} is not a %s",
metricFamily.GetName(), dtoMetric, metricFamily.GetType(), name, dtoMetric, metricFamily.GetType(),
) )
} }
// Is the metric unique (i.e. no other metric with the same name and the same label values)? previousLabelName := ""
for _, labelPair := range dtoMetric.GetLabel() {
labelName := labelPair.GetName()
if labelName == previousLabelName {
return fmt.Errorf(
"collected metric %q { %s} has two or more labels with the same name: %s",
name, dtoMetric, labelName,
)
}
if !checkLabelName(labelName) {
return fmt.Errorf(
"collected metric %q { %s} has a label with an invalid name: %s",
name, dtoMetric, labelName,
)
}
if dtoMetric.Summary != nil && labelName == quantileLabel {
return fmt.Errorf(
"collected metric %q { %s} must not have an explicit %q label",
name, dtoMetric, quantileLabel,
)
}
if !utf8.ValidString(labelPair.GetValue()) {
return fmt.Errorf(
"collected metric %q { %s} has a label named %q whose value is not utf8: %#v",
name, dtoMetric, labelName, labelPair.GetValue())
}
previousLabelName = labelName
}
// Is the metric unique (i.e. no other metric with the same name and the same labels)?
h := hashNew() h := hashNew()
h = hashAdd(h, metricFamily.GetName()) h = hashAdd(h, name)
h = hashAddByte(h, separatorByte) h = hashAddByte(h, separatorByte)
dh := hashNew()
// Make sure label pairs are sorted. We depend on it for the consistency // Make sure label pairs are sorted. We depend on it for the consistency
// check. // check.
sort.Sort(LabelPairSorter(dtoMetric.Label)) if !sort.IsSorted(labelPairSorter(dtoMetric.Label)) {
// We cannot sort dtoMetric.Label in place as it is immutable by contract.
copiedLabels := make([]*dto.LabelPair, len(dtoMetric.Label))
copy(copiedLabels, dtoMetric.Label)
sort.Sort(labelPairSorter(copiedLabels))
dtoMetric.Label = copiedLabels
}
for _, lp := range dtoMetric.Label { for _, lp := range dtoMetric.Label {
h = hashAdd(h, lp.GetName())
h = hashAddByte(h, separatorByte)
h = hashAdd(h, lp.GetValue()) h = hashAdd(h, lp.GetValue())
h = hashAddByte(h, separatorByte) h = hashAddByte(h, separatorByte)
dh = hashAdd(dh, lp.GetName())
dh = hashAddByte(dh, separatorByte)
} }
if _, exists := metricHashes[h]; exists { if _, exists := metricHashes[h]; exists {
return fmt.Errorf( return fmt.Errorf(
"collected metric %s %s was collected before with the same name and label values", "collected metric %q { %s} was collected before with the same name and label values",
metricFamily.GetName(), dtoMetric, name, dtoMetric,
) )
} }
if dimHash, ok := dimHashes[metricFamily.GetName()]; ok {
if dimHash != dh {
return fmt.Errorf(
"collected metric %s %s has label dimensions inconsistent with previously collected metrics in the same metric family",
metricFamily.GetName(), dtoMetric,
)
}
} else {
dimHashes[metricFamily.GetName()] = dh
}
metricHashes[h] = struct{}{} metricHashes[h] = struct{}{}
return nil return nil
} }
@ -727,8 +917,8 @@ func checkDescConsistency(
} }
// Is the desc consistent with the content of the metric? // Is the desc consistent with the content of the metric?
lpsFromDesc := make([]*dto.LabelPair, 0, len(dtoMetric.Label)) lpsFromDesc := make([]*dto.LabelPair, len(desc.constLabelPairs), len(dtoMetric.Label))
lpsFromDesc = append(lpsFromDesc, desc.constLabelPairs...) copy(lpsFromDesc, desc.constLabelPairs)
for _, l := range desc.variableLabels { for _, l := range desc.variableLabels {
lpsFromDesc = append(lpsFromDesc, &dto.LabelPair{ lpsFromDesc = append(lpsFromDesc, &dto.LabelPair{
Name: proto.String(l), Name: proto.String(l),
@ -740,7 +930,7 @@ func checkDescConsistency(
metricFamily.GetName(), dtoMetric, desc, metricFamily.GetName(), dtoMetric, desc,
) )
} }
sort.Sort(LabelPairSorter(lpsFromDesc)) sort.Sort(labelPairSorter(lpsFromDesc))
for i, lpFromDesc := range lpsFromDesc { for i, lpFromDesc := range lpsFromDesc {
lpFromMetric := dtoMetric.Label[i] lpFromMetric := dtoMetric.Label[i]
if lpFromDesc.GetName() != lpFromMetric.GetName() || if lpFromDesc.GetName() != lpFromMetric.GetName() ||

View file

@ -16,8 +16,10 @@ package prometheus
import ( import (
"fmt" "fmt"
"math" "math"
"runtime"
"sort" "sort"
"sync" "sync"
"sync/atomic"
"time" "time"
"github.com/beorn7/perks/quantile" "github.com/beorn7/perks/quantile"
@ -36,7 +38,10 @@ const quantileLabel = "quantile"
// //
// A typical use-case is the observation of request latencies. By default, a // A typical use-case is the observation of request latencies. By default, a
// Summary provides the median, the 90th and the 99th percentile of the latency // Summary provides the median, the 90th and the 99th percentile of the latency
// as rank estimations. // as rank estimations. However, the default behavior will change in the
// upcoming v1.0.0 of the library. There will be no rank estimations at all by
// default. For a sane transition, it is recommended to set the desired rank
// estimations explicitly.
// //
// Note that the rank estimations cannot be aggregated in a meaningful way with // Note that the rank estimations cannot be aggregated in a meaningful way with
// the Prometheus query language (i.e. you cannot average or add them). If you // the Prometheus query language (i.e. you cannot average or add them). If you
@ -53,17 +58,9 @@ type Summary interface {
Observe(float64) Observe(float64)
} }
// DefObjectives are the default Summary quantile values. var errQuantileLabelNotAllowed = fmt.Errorf(
//
// Deprecated: DefObjectives will not be used as the default objectives in
// v0.10 of the library. The default Summary will have no quantiles then.
var (
DefObjectives = map[float64]float64{0.5: 0.05, 0.9: 0.01, 0.99: 0.001}
errQuantileLabelNotAllowed = fmt.Errorf(
"%q is not allowed as label name in summaries", quantileLabel, "%q is not allowed as label name in summaries", quantileLabel,
) )
)
// Default values for SummaryOpts. // Default values for SummaryOpts.
const ( const (
@ -78,8 +75,10 @@ const (
) )
// SummaryOpts bundles the options for creating a Summary metric. It is // SummaryOpts bundles the options for creating a Summary metric. It is
// mandatory to set Name and Help to a non-empty string. All other fields are // mandatory to set Name to a non-empty string. While all other fields are
// optional and can safely be left at their zero value. // optional and can safely be left at their zero value, it is recommended to set
// a help string and to explicitly set the Objectives field to the desired value
// as the default value will change in the upcoming v1.0.0 of the library.
type SummaryOpts struct { type SummaryOpts struct {
// Namespace, Subsystem, and Name are components of the fully-qualified // Namespace, Subsystem, and Name are components of the fully-qualified
// name of the Summary (created by joining these components with // name of the Summary (created by joining these components with
@ -90,41 +89,34 @@ type SummaryOpts struct {
Subsystem string Subsystem string
Name string Name string
// Help provides information about this Summary. Mandatory! // Help provides information about this Summary.
// //
// Metrics with the same fully-qualified name must have the same Help // Metrics with the same fully-qualified name must have the same Help
// string. // string.
Help string Help string
// ConstLabels are used to attach fixed labels to this // ConstLabels are used to attach fixed labels to this metric. Metrics
// Summary. Summaries with the same fully-qualified name must have the // with the same fully-qualified name must have the same label names in
// same label names in their ConstLabels. // their ConstLabels.
// //
// Note that in most cases, labels have a value that varies during the // Due to the way a Summary is represented in the Prometheus text format
// lifetime of a process. Those labels are usually managed with a // and how it is handled by the Prometheus server internally, “quantile”
// SummaryVec. ConstLabels serve only special purposes. One is for the // is an illegal label name. Construction of a Summary or SummaryVec
// special case where the value of a label does not change during the // will panic if this label name is used in ConstLabels.
// lifetime of a process, e.g. if the revision of the running binary is
// put into a label. Another, more advanced purpose is if more than one
// Collector needs to collect Summaries with the same fully-qualified
// name. In that case, those Summaries must differ in the values of
// their ConstLabels. See the Collector examples.
// //
// If the value of a label never changes (not even between binaries), // ConstLabels are only used rarely. In particular, do not use them to
// that label most likely should not be a label at all (but part of the // attach the same labels to all your metrics. Those use cases are
// metric name). // better covered by target labels set by the scraping Prometheus
// server, or by one specific metric (e.g. a build_info or a
// machine_role metric). See also
// https://prometheus.io/docs/instrumenting/writing_exporters/#target-labels,-not-static-scraped-labels
ConstLabels Labels ConstLabels Labels
// Objectives defines the quantile rank estimates with their respective // Objectives defines the quantile rank estimates with their respective
// absolute error. If Objectives[q] = e, then the value reported for q // absolute error. If Objectives[q] = e, then the value reported for q
// will be the φ-quantile value for some φ between q-e and q+e. The // will be the φ-quantile value for some φ between q-e and q+e. The
// default value is DefObjectives. It is used if Objectives is left at // default value is an empty map, resulting in a summary without
// its zero value (i.e. nil). To create a Summary without Objectives, // quantiles.
// set it to an empty map (i.e. map[float64]float64{}).
//
// Deprecated: Note that the current value of DefObjectives is
// deprecated. It will be replaced by an empty map in v0.10 of the
// library. Please explicitly set Objectives to the desired value.
Objectives map[float64]float64 Objectives map[float64]float64
// MaxAge defines the duration for which an observation stays relevant // MaxAge defines the duration for which an observation stays relevant
@ -148,7 +140,7 @@ type SummaryOpts struct {
BufCap uint32 BufCap uint32
} }
// Great fuck-up with the sliding-window decay algorithm... The Merge method of // Problem with the sliding-window decay algorithm... The Merge method of
// perk/quantile is actually not working as advertised - and it might be // perk/quantile is actually not working as advertised - and it might be
// unfixable, as the underlying algorithm is apparently not capable of merging // unfixable, as the underlying algorithm is apparently not capable of merging
// summaries in the first place. To avoid using Merge, we are currently adding // summaries in the first place. To avoid using Merge, we are currently adding
@ -178,7 +170,7 @@ func NewSummary(opts SummaryOpts) Summary {
func newSummary(desc *Desc, opts SummaryOpts, labelValues ...string) Summary { func newSummary(desc *Desc, opts SummaryOpts, labelValues ...string) Summary {
if len(desc.variableLabels) != len(labelValues) { if len(desc.variableLabels) != len(labelValues) {
panic(errInconsistentCardinality) panic(makeInconsistentCardinalityError(desc.fqName, desc.variableLabels, labelValues))
} }
for _, n := range desc.variableLabels { for _, n := range desc.variableLabels {
@ -193,7 +185,7 @@ func newSummary(desc *Desc, opts SummaryOpts, labelValues ...string) Summary {
} }
if opts.Objectives == nil { if opts.Objectives == nil {
opts.Objectives = DefObjectives opts.Objectives = map[float64]float64{}
} }
if opts.MaxAge < 0 { if opts.MaxAge < 0 {
@ -211,6 +203,17 @@ func newSummary(desc *Desc, opts SummaryOpts, labelValues ...string) Summary {
opts.BufCap = DefBufCap opts.BufCap = DefBufCap
} }
if len(opts.Objectives) == 0 {
// Use the lock-free implementation of a Summary without objectives.
s := &noObjectivesSummary{
desc: desc,
labelPairs: makeLabelPairs(desc, labelValues),
counts: [2]*summaryCounts{&summaryCounts{}, &summaryCounts{}},
}
s.init(s) // Init self-collection.
return s
}
s := &summary{ s := &summary{
desc: desc, desc: desc,
@ -379,6 +382,116 @@ func (s *summary) swapBufs(now time.Time) {
} }
} }
type summaryCounts struct {
// sumBits contains the bits of the float64 representing the sum of all
// observations. sumBits and count have to go first in the struct to
// guarantee alignment for atomic operations.
// http://golang.org/pkg/sync/atomic/#pkg-note-BUG
sumBits uint64
count uint64
}
type noObjectivesSummary struct {
// countAndHotIdx enables lock-free writes with use of atomic updates.
// The most significant bit is the hot index [0 or 1] of the count field
// below. Observe calls update the hot one. All remaining bits count the
// number of Observe calls. Observe starts by incrementing this counter,
// and finish by incrementing the count field in the respective
// summaryCounts, as a marker for completion.
//
// Calls of the Write method (which are non-mutating reads from the
// perspective of the summary) swap the hotcold under the writeMtx
// lock. A cooldown is awaited (while locked) by comparing the number of
// observations with the initiation count. Once they match, then the
// last observation on the now cool one has completed. All cool fields must
// be merged into the new hot before releasing writeMtx.
// Fields with atomic access first! See alignment constraint:
// http://golang.org/pkg/sync/atomic/#pkg-note-BUG
countAndHotIdx uint64
selfCollector
desc *Desc
writeMtx sync.Mutex // Only used in the Write method.
// Two counts, one is "hot" for lock-free observations, the other is
// "cold" for writing out a dto.Metric. It has to be an array of
// pointers to guarantee 64bit alignment of the histogramCounts, see
// http://golang.org/pkg/sync/atomic/#pkg-note-BUG.
counts [2]*summaryCounts
labelPairs []*dto.LabelPair
}
func (s *noObjectivesSummary) Desc() *Desc {
return s.desc
}
func (s *noObjectivesSummary) Observe(v float64) {
// We increment h.countAndHotIdx so that the counter in the lower
// 63 bits gets incremented. At the same time, we get the new value
// back, which we can use to find the currently-hot counts.
n := atomic.AddUint64(&s.countAndHotIdx, 1)
hotCounts := s.counts[n>>63]
for {
oldBits := atomic.LoadUint64(&hotCounts.sumBits)
newBits := math.Float64bits(math.Float64frombits(oldBits) + v)
if atomic.CompareAndSwapUint64(&hotCounts.sumBits, oldBits, newBits) {
break
}
}
// Increment count last as we take it as a signal that the observation
// is complete.
atomic.AddUint64(&hotCounts.count, 1)
}
func (s *noObjectivesSummary) Write(out *dto.Metric) error {
// For simplicity, we protect this whole method by a mutex. It is not in
// the hot path, i.e. Observe is called much more often than Write. The
// complication of making Write lock-free isn't worth it, if possible at
// all.
s.writeMtx.Lock()
defer s.writeMtx.Unlock()
// Adding 1<<63 switches the hot index (from 0 to 1 or from 1 to 0)
// without touching the count bits. See the struct comments for a full
// description of the algorithm.
n := atomic.AddUint64(&s.countAndHotIdx, 1<<63)
// count is contained unchanged in the lower 63 bits.
count := n & ((1 << 63) - 1)
// The most significant bit tells us which counts is hot. The complement
// is thus the cold one.
hotCounts := s.counts[n>>63]
coldCounts := s.counts[(^n)>>63]
// Await cooldown.
for count != atomic.LoadUint64(&coldCounts.count) {
runtime.Gosched() // Let observations get work done.
}
sum := &dto.Summary{
SampleCount: proto.Uint64(count),
SampleSum: proto.Float64(math.Float64frombits(atomic.LoadUint64(&coldCounts.sumBits))),
}
out.Summary = sum
out.Label = s.labelPairs
// Finally add all the cold counts to the new hot counts and reset the cold counts.
atomic.AddUint64(&hotCounts.count, count)
atomic.StoreUint64(&coldCounts.count, 0)
for {
oldBits := atomic.LoadUint64(&hotCounts.sumBits)
newBits := math.Float64bits(math.Float64frombits(oldBits) + sum.GetSampleSum())
if atomic.CompareAndSwapUint64(&hotCounts.sumBits, oldBits, newBits) {
atomic.StoreUint64(&coldCounts.sumBits, 0)
break
}
}
return nil
}
type quantSort []*dto.Quantile type quantSort []*dto.Quantile
func (s quantSort) Len() int { func (s quantSort) Len() int {
@ -399,13 +512,21 @@ func (s quantSort) Less(i, j int) bool {
// (e.g. HTTP request latencies, partitioned by status code and method). Create // (e.g. HTTP request latencies, partitioned by status code and method). Create
// instances with NewSummaryVec. // instances with NewSummaryVec.
type SummaryVec struct { type SummaryVec struct {
*MetricVec *metricVec
} }
// NewSummaryVec creates a new SummaryVec based on the provided SummaryOpts and // NewSummaryVec creates a new SummaryVec based on the provided SummaryOpts and
// partitioned by the given label names. At least one label name must be // partitioned by the given label names.
// provided. //
// Due to the way a Summary is represented in the Prometheus text format and how
// it is handled by the Prometheus server internally, “quantile” is an illegal
// label name. NewSummaryVec will panic if this label name is used.
func NewSummaryVec(opts SummaryOpts, labelNames []string) *SummaryVec { func NewSummaryVec(opts SummaryOpts, labelNames []string) *SummaryVec {
for _, ln := range labelNames {
if ln == quantileLabel {
panic(errQuantileLabelNotAllowed)
}
}
desc := NewDesc( desc := NewDesc(
BuildFQName(opts.Namespace, opts.Subsystem, opts.Name), BuildFQName(opts.Namespace, opts.Subsystem, opts.Name),
opts.Help, opts.Help,
@ -413,47 +534,116 @@ func NewSummaryVec(opts SummaryOpts, labelNames []string) *SummaryVec {
opts.ConstLabels, opts.ConstLabels,
) )
return &SummaryVec{ return &SummaryVec{
MetricVec: newMetricVec(desc, func(lvs ...string) Metric { metricVec: newMetricVec(desc, func(lvs ...string) Metric {
return newSummary(desc, opts, lvs...) return newSummary(desc, opts, lvs...)
}), }),
} }
} }
// GetMetricWithLabelValues replaces the method of the same name in // GetMetricWithLabelValues returns the Summary for the given slice of label
// MetricVec. The difference is that this method returns a Summary and not a // values (same order as the VariableLabels in Desc). If that combination of
// Metric so that no type conversion is required. // label values is accessed for the first time, a new Summary is created.
func (m *SummaryVec) GetMetricWithLabelValues(lvs ...string) (Summary, error) { //
metric, err := m.MetricVec.GetMetricWithLabelValues(lvs...) // It is possible to call this method without using the returned Summary to only
// create the new Summary but leave it at its starting value, a Summary without
// any observations.
//
// Keeping the Summary for later use is possible (and should be considered if
// performance is critical), but keep in mind that Reset, DeleteLabelValues and
// Delete can be used to delete the Summary from the SummaryVec. In that case,
// the Summary will still exist, but it will not be exported anymore, even if a
// Summary with the same label values is created later. See also the CounterVec
// example.
//
// An error is returned if the number of label values is not the same as the
// number of VariableLabels in Desc (minus any curried labels).
//
// Note that for more than one label value, this method is prone to mistakes
// caused by an incorrect order of arguments. Consider GetMetricWith(Labels) as
// an alternative to avoid that type of mistake. For higher label numbers, the
// latter has a much more readable (albeit more verbose) syntax, but it comes
// with a performance overhead (for creating and processing the Labels map).
// See also the GaugeVec example.
func (v *SummaryVec) GetMetricWithLabelValues(lvs ...string) (Observer, error) {
metric, err := v.metricVec.getMetricWithLabelValues(lvs...)
if metric != nil { if metric != nil {
return metric.(Summary), err return metric.(Observer), err
} }
return nil, err return nil, err
} }
// GetMetricWith replaces the method of the same name in MetricVec. The // GetMetricWith returns the Summary for the given Labels map (the label names
// difference is that this method returns a Summary and not a Metric so that no // must match those of the VariableLabels in Desc). If that label map is
// type conversion is required. // accessed for the first time, a new Summary is created. Implications of
func (m *SummaryVec) GetMetricWith(labels Labels) (Summary, error) { // creating a Summary without using it and keeping the Summary for later use are
metric, err := m.MetricVec.GetMetricWith(labels) // the same as for GetMetricWithLabelValues.
//
// An error is returned if the number and names of the Labels are inconsistent
// with those of the VariableLabels in Desc (minus any curried labels).
//
// This method is used for the same purpose as
// GetMetricWithLabelValues(...string). See there for pros and cons of the two
// methods.
func (v *SummaryVec) GetMetricWith(labels Labels) (Observer, error) {
metric, err := v.metricVec.getMetricWith(labels)
if metric != nil { if metric != nil {
return metric.(Summary), err return metric.(Observer), err
} }
return nil, err return nil, err
} }
// WithLabelValues works as GetMetricWithLabelValues, but panics where // WithLabelValues works as GetMetricWithLabelValues, but panics where
// GetMetricWithLabelValues would have returned an error. By not returning an // GetMetricWithLabelValues would have returned an error. Not returning an
// error, WithLabelValues allows shortcuts like // error allows shortcuts like
// myVec.WithLabelValues("404", "GET").Observe(42.21) // myVec.WithLabelValues("404", "GET").Observe(42.21)
func (m *SummaryVec) WithLabelValues(lvs ...string) Summary { func (v *SummaryVec) WithLabelValues(lvs ...string) Observer {
return m.MetricVec.WithLabelValues(lvs...).(Summary) s, err := v.GetMetricWithLabelValues(lvs...)
if err != nil {
panic(err)
}
return s
} }
// With works as GetMetricWith, but panics where GetMetricWithLabels would have // With works as GetMetricWith, but panics where GetMetricWithLabels would have
// returned an error. By not returning an error, With allows shortcuts like // returned an error. Not returning an error allows shortcuts like
// myVec.With(Labels{"code": "404", "method": "GET"}).Observe(42.21) // myVec.With(prometheus.Labels{"code": "404", "method": "GET"}).Observe(42.21)
func (m *SummaryVec) With(labels Labels) Summary { func (v *SummaryVec) With(labels Labels) Observer {
return m.MetricVec.With(labels).(Summary) s, err := v.GetMetricWith(labels)
if err != nil {
panic(err)
}
return s
}
// CurryWith returns a vector curried with the provided labels, i.e. the
// returned vector has those labels pre-set for all labeled operations performed
// on it. The cardinality of the curried vector is reduced accordingly. The
// order of the remaining labels stays the same (just with the curried labels
// taken out of the sequence which is relevant for the
// (GetMetric)WithLabelValues methods). It is possible to curry a curried
// vector, but only with labels not yet used for currying before.
//
// The metrics contained in the SummaryVec are shared between the curried and
// uncurried vectors. They are just accessed differently. Curried and uncurried
// vectors behave identically in terms of collection. Only one must be
// registered with a given registry (usually the uncurried version). The Reset
// method deletes all metrics, even if called on a curried vector.
func (v *SummaryVec) CurryWith(labels Labels) (ObserverVec, error) {
vec, err := v.curryWith(labels)
if vec != nil {
return &SummaryVec{vec}, err
}
return nil, err
}
// MustCurryWith works as CurryWith but panics where CurryWith would have
// returned an error.
func (v *SummaryVec) MustCurryWith(labels Labels) ObserverVec {
vec, err := v.CurryWith(labels)
if err != nil {
panic(err)
}
return vec
} }
type constSummary struct { type constSummary struct {
@ -506,7 +696,7 @@ func (s *constSummary) Write(out *dto.Metric) error {
// map[float64]float64{0.5: 0.23, 0.99: 0.56} // map[float64]float64{0.5: 0.23, 0.99: 0.56}
// //
// NewConstSummary returns an error if the length of labelValues is not // NewConstSummary returns an error if the length of labelValues is not
// consistent with the variable labels in Desc. // consistent with the variable labels in Desc or if Desc is invalid.
func NewConstSummary( func NewConstSummary(
desc *Desc, desc *Desc,
count uint64, count uint64,
@ -514,8 +704,11 @@ func NewConstSummary(
quantiles map[float64]float64, quantiles map[float64]float64,
labelValues ...string, labelValues ...string,
) (Metric, error) { ) (Metric, error) {
if len(desc.variableLabels) != len(labelValues) { if desc.err != nil {
return nil, errInconsistentCardinality return nil, desc.err
}
if err := validateLabelValues(labelValues, len(desc.variableLabels)); err != nil {
return nil, err
} }
return &constSummary{ return &constSummary{
desc: desc, desc: desc,

View file

@ -15,32 +15,6 @@ package prometheus
import "time" import "time"
// Observer is the interface that wraps the Observe method, which is used by
// Histogram and Summary to add observations.
type Observer interface {
Observe(float64)
}
// The ObserverFunc type is an adapter to allow the use of ordinary
// functions as Observers. If f is a function with the appropriate
// signature, ObserverFunc(f) is an Observer that calls f.
//
// This adapter is usually used in connection with the Timer type, and there are
// two general use cases:
//
// The most common one is to use a Gauge as the Observer for a Timer.
// See the "Gauge" Timer example.
//
// The more advanced use case is to create a function that dynamically decides
// which Observer to use for observing the duration. See the "Complex" Timer
// example.
type ObserverFunc func(float64)
// Observe calls f(value). It implements Observer.
func (f ObserverFunc) Observe(value float64) {
f(value)
}
// Timer is a helper type to time functions. Use NewTimer to create new // Timer is a helper type to time functions. Use NewTimer to create new
// instances. // instances.
type Timer struct { type Timer struct {
@ -65,10 +39,16 @@ func NewTimer(o Observer) *Timer {
// ObserveDuration records the duration passed since the Timer was created with // ObserveDuration records the duration passed since the Timer was created with
// NewTimer. It calls the Observe method of the Observer provided during // NewTimer. It calls the Observe method of the Observer provided during
// construction with the duration in seconds as an argument. ObserveDuration is // construction with the duration in seconds as an argument. The observed
// usually called with a defer statement. // duration is also returned. ObserveDuration is usually called with a defer
func (t *Timer) ObserveDuration() { // statement.
//
// Note that this method is only guaranteed to never observe negative durations
// if used with Go1.9+.
func (t *Timer) ObserveDuration() time.Duration {
d := time.Since(t.begin)
if t.observer != nil { if t.observer != nil {
t.observer.Observe(time.Since(t.begin).Seconds()) t.observer.Observe(d.Seconds())
} }
return d
} }

View file

@ -13,113 +13,12 @@
package prometheus package prometheus
// Untyped is a Metric that represents a single numerical value that can
// arbitrarily go up and down.
//
// An Untyped metric works the same as a Gauge. The only difference is that to
// no type information is implied.
//
// To create Untyped instances, use NewUntyped.
//
// Deprecated: The Untyped type is deprecated because it doesn't make sense in
// direct instrumentation. If you need to mirror an external metric of unknown
// type (usually while writing exporters), Use MustNewConstMetric to create an
// untyped metric instance on the fly.
type Untyped interface {
Metric
Collector
// Set sets the Untyped metric to an arbitrary value.
Set(float64)
// Inc increments the Untyped metric by 1.
Inc()
// Dec decrements the Untyped metric by 1.
Dec()
// Add adds the given value to the Untyped metric. (The value can be
// negative, resulting in a decrease.)
Add(float64)
// Sub subtracts the given value from the Untyped metric. (The value can
// be negative, resulting in an increase.)
Sub(float64)
}
// UntypedOpts is an alias for Opts. See there for doc comments. // UntypedOpts is an alias for Opts. See there for doc comments.
type UntypedOpts Opts type UntypedOpts Opts
// NewUntyped creates a new Untyped metric from the provided UntypedOpts. // UntypedFunc works like GaugeFunc but the collected metric is of type
func NewUntyped(opts UntypedOpts) Untyped { // "Untyped". UntypedFunc is useful to mirror an external metric of unknown
return newValue(NewDesc( // type.
BuildFQName(opts.Namespace, opts.Subsystem, opts.Name),
opts.Help,
nil,
opts.ConstLabels,
), UntypedValue, 0)
}
// UntypedVec is a Collector that bundles a set of Untyped metrics that all
// share the same Desc, but have different values for their variable
// labels. This is used if you want to count the same thing partitioned by
// various dimensions. Create instances with NewUntypedVec.
type UntypedVec struct {
*MetricVec
}
// NewUntypedVec creates a new UntypedVec based on the provided UntypedOpts and
// partitioned by the given label names. At least one label name must be
// provided.
func NewUntypedVec(opts UntypedOpts, labelNames []string) *UntypedVec {
desc := NewDesc(
BuildFQName(opts.Namespace, opts.Subsystem, opts.Name),
opts.Help,
labelNames,
opts.ConstLabels,
)
return &UntypedVec{
MetricVec: newMetricVec(desc, func(lvs ...string) Metric {
return newValue(desc, UntypedValue, 0, lvs...)
}),
}
}
// GetMetricWithLabelValues replaces the method of the same name in
// MetricVec. The difference is that this method returns an Untyped and not a
// Metric so that no type conversion is required.
func (m *UntypedVec) GetMetricWithLabelValues(lvs ...string) (Untyped, error) {
metric, err := m.MetricVec.GetMetricWithLabelValues(lvs...)
if metric != nil {
return metric.(Untyped), err
}
return nil, err
}
// GetMetricWith replaces the method of the same name in MetricVec. The
// difference is that this method returns an Untyped and not a Metric so that no
// type conversion is required.
func (m *UntypedVec) GetMetricWith(labels Labels) (Untyped, error) {
metric, err := m.MetricVec.GetMetricWith(labels)
if metric != nil {
return metric.(Untyped), err
}
return nil, err
}
// WithLabelValues works as GetMetricWithLabelValues, but panics where
// GetMetricWithLabelValues would have returned an error. By not returning an
// error, WithLabelValues allows shortcuts like
// myVec.WithLabelValues("404", "GET").Add(42)
func (m *UntypedVec) WithLabelValues(lvs ...string) Untyped {
return m.MetricVec.WithLabelValues(lvs...).(Untyped)
}
// With works as GetMetricWith, but panics where GetMetricWithLabels would have
// returned an error. By not returning an error, With allows shortcuts like
// myVec.With(Labels{"code": "404", "method": "GET"}).Add(42)
func (m *UntypedVec) With(labels Labels) Untyped {
return m.MetricVec.With(labels).(Untyped)
}
// UntypedFunc is an Untyped whose value is determined at collect time by
// calling a provided function.
// //
// To create UntypedFunc instances, use NewUntypedFunc. // To create UntypedFunc instances, use NewUntypedFunc.
type UntypedFunc interface { type UntypedFunc interface {

View file

@ -14,16 +14,12 @@
package prometheus package prometheus
import ( import (
"errors"
"fmt" "fmt"
"math"
"sort" "sort"
"sync/atomic"
"time"
dto "github.com/prometheus/client_model/go"
"github.com/golang/protobuf/proto" "github.com/golang/protobuf/proto"
dto "github.com/prometheus/client_model/go"
) )
// ValueType is an enumeration of metric types that represent a simple value. // ValueType is an enumeration of metric types that represent a simple value.
@ -37,81 +33,6 @@ const (
UntypedValue UntypedValue
) )
var errInconsistentCardinality = errors.New("inconsistent label cardinality")
// value is a generic metric for simple values. It implements Metric, Collector,
// Counter, Gauge, and Untyped. Its effective type is determined by
// ValueType. This is a low-level building block used by the library to back the
// implementations of Counter, Gauge, and Untyped.
type value struct {
// valBits contains the bits of the represented float64 value. It has
// to go first in the struct to guarantee alignment for atomic
// operations. http://golang.org/pkg/sync/atomic/#pkg-note-BUG
valBits uint64
selfCollector
desc *Desc
valType ValueType
labelPairs []*dto.LabelPair
}
// newValue returns a newly allocated value with the given Desc, ValueType,
// sample value and label values. It panics if the number of label
// values is different from the number of variable labels in Desc.
func newValue(desc *Desc, valueType ValueType, val float64, labelValues ...string) *value {
if len(labelValues) != len(desc.variableLabels) {
panic(errInconsistentCardinality)
}
result := &value{
desc: desc,
valType: valueType,
valBits: math.Float64bits(val),
labelPairs: makeLabelPairs(desc, labelValues),
}
result.init(result)
return result
}
func (v *value) Desc() *Desc {
return v.desc
}
func (v *value) Set(val float64) {
atomic.StoreUint64(&v.valBits, math.Float64bits(val))
}
func (v *value) SetToCurrentTime() {
v.Set(float64(time.Now().UnixNano()) / 1e9)
}
func (v *value) Inc() {
v.Add(1)
}
func (v *value) Dec() {
v.Add(-1)
}
func (v *value) Add(val float64) {
for {
oldBits := atomic.LoadUint64(&v.valBits)
newBits := math.Float64bits(math.Float64frombits(oldBits) + val)
if atomic.CompareAndSwapUint64(&v.valBits, oldBits, newBits) {
return
}
}
}
func (v *value) Sub(val float64) {
v.Add(val * -1)
}
func (v *value) Write(out *dto.Metric) error {
val := math.Float64frombits(atomic.LoadUint64(&v.valBits))
return populateMetric(v.valType, val, v.labelPairs, out)
}
// valueFunc is a generic metric for simple values retrieved on collect time // valueFunc is a generic metric for simple values retrieved on collect time
// from a function. It implements Metric and Collector. Its effective type is // from a function. It implements Metric and Collector. Its effective type is
// determined by ValueType. This is a low-level building block used by the // determined by ValueType. This is a low-level building block used by the
@ -156,10 +77,14 @@ func (v *valueFunc) Write(out *dto.Metric) error {
// operations. However, when implementing custom Collectors, it is useful as a // operations. However, when implementing custom Collectors, it is useful as a
// throw-away metric that is generated on the fly to send it to Prometheus in // throw-away metric that is generated on the fly to send it to Prometheus in
// the Collect method. NewConstMetric returns an error if the length of // the Collect method. NewConstMetric returns an error if the length of
// labelValues is not consistent with the variable labels in Desc. // labelValues is not consistent with the variable labels in Desc or if Desc is
// invalid.
func NewConstMetric(desc *Desc, valueType ValueType, value float64, labelValues ...string) (Metric, error) { func NewConstMetric(desc *Desc, valueType ValueType, value float64, labelValues ...string) (Metric, error) {
if len(desc.variableLabels) != len(labelValues) { if desc.err != nil {
return nil, errInconsistentCardinality return nil, desc.err
}
if err := validateLabelValues(labelValues, len(desc.variableLabels)); err != nil {
return nil, err
} }
return &constMetric{ return &constMetric{
desc: desc, desc: desc,
@ -231,9 +156,7 @@ func makeLabelPairs(desc *Desc, labelValues []string) []*dto.LabelPair {
Value: proto.String(labelValues[i]), Value: proto.String(labelValues[i]),
}) })
} }
for _, lp := range desc.constLabelPairs { labelPairs = append(labelPairs, desc.constLabelPairs...)
labelPairs = append(labelPairs, lp) sort.Sort(labelPairSorter(labelPairs))
}
sort.Sort(LabelPairSorter(labelPairs))
return labelPairs return labelPairs
} }

View file

@ -20,33 +20,180 @@ import (
"github.com/prometheus/common/model" "github.com/prometheus/common/model"
) )
// MetricVec is a Collector to bundle metrics of the same name that // metricVec is a Collector to bundle metrics of the same name that differ in
// differ in their label values. MetricVec is usually not used directly but as a // their label values. metricVec is not used directly (and therefore
// building block for implementations of vectors of a given metric // unexported). It is used as a building block for implementations of vectors of
// type. GaugeVec, CounterVec, SummaryVec, and UntypedVec are examples already // a given metric type, like GaugeVec, CounterVec, SummaryVec, and HistogramVec.
// provided in this package. // It also handles label currying. It uses basicMetricVec internally.
type MetricVec struct { type metricVec struct {
mtx sync.RWMutex // Protects the children. *metricMap
children map[uint64][]metricWithLabelValues
desc *Desc
newMetric func(labelValues ...string) Metric curry []curriedLabelValue
hashAdd func(h uint64, s string) uint64 // replace hash function for testing collision handling
// hashAdd and hashAddByte can be replaced for testing collision handling.
hashAdd func(h uint64, s string) uint64
hashAddByte func(h uint64, b byte) uint64 hashAddByte func(h uint64, b byte) uint64
} }
// newMetricVec returns an initialized MetricVec. The concrete value is // newMetricVec returns an initialized metricVec.
// returned for embedding into another struct. func newMetricVec(desc *Desc, newMetric func(lvs ...string) Metric) *metricVec {
func newMetricVec(desc *Desc, newMetric func(lvs ...string) Metric) *MetricVec { return &metricVec{
return &MetricVec{ metricMap: &metricMap{
children: map[uint64][]metricWithLabelValues{}, metrics: map[uint64][]metricWithLabelValues{},
desc: desc, desc: desc,
newMetric: newMetric, newMetric: newMetric,
},
hashAdd: hashAdd, hashAdd: hashAdd,
hashAddByte: hashAddByte, hashAddByte: hashAddByte,
} }
} }
// DeleteLabelValues removes the metric where the variable labels are the same
// as those passed in as labels (same order as the VariableLabels in Desc). It
// returns true if a metric was deleted.
//
// It is not an error if the number of label values is not the same as the
// number of VariableLabels in Desc. However, such inconsistent label count can
// never match an actual metric, so the method will always return false in that
// case.
//
// Note that for more than one label value, this method is prone to mistakes
// caused by an incorrect order of arguments. Consider Delete(Labels) as an
// alternative to avoid that type of mistake. For higher label numbers, the
// latter has a much more readable (albeit more verbose) syntax, but it comes
// with a performance overhead (for creating and processing the Labels map).
// See also the CounterVec example.
func (m *metricVec) DeleteLabelValues(lvs ...string) bool {
h, err := m.hashLabelValues(lvs)
if err != nil {
return false
}
return m.metricMap.deleteByHashWithLabelValues(h, lvs, m.curry)
}
// Delete deletes the metric where the variable labels are the same as those
// passed in as labels. It returns true if a metric was deleted.
//
// It is not an error if the number and names of the Labels are inconsistent
// with those of the VariableLabels in Desc. However, such inconsistent Labels
// can never match an actual metric, so the method will always return false in
// that case.
//
// This method is used for the same purpose as DeleteLabelValues(...string). See
// there for pros and cons of the two methods.
func (m *metricVec) Delete(labels Labels) bool {
h, err := m.hashLabels(labels)
if err != nil {
return false
}
return m.metricMap.deleteByHashWithLabels(h, labels, m.curry)
}
func (m *metricVec) curryWith(labels Labels) (*metricVec, error) {
var (
newCurry []curriedLabelValue
oldCurry = m.curry
iCurry int
)
for i, label := range m.desc.variableLabels {
val, ok := labels[label]
if iCurry < len(oldCurry) && oldCurry[iCurry].index == i {
if ok {
return nil, fmt.Errorf("label name %q is already curried", label)
}
newCurry = append(newCurry, oldCurry[iCurry])
iCurry++
} else {
if !ok {
continue // Label stays uncurried.
}
newCurry = append(newCurry, curriedLabelValue{i, val})
}
}
if l := len(oldCurry) + len(labels) - len(newCurry); l > 0 {
return nil, fmt.Errorf("%d unknown label(s) found during currying", l)
}
return &metricVec{
metricMap: m.metricMap,
curry: newCurry,
hashAdd: m.hashAdd,
hashAddByte: m.hashAddByte,
}, nil
}
func (m *metricVec) getMetricWithLabelValues(lvs ...string) (Metric, error) {
h, err := m.hashLabelValues(lvs)
if err != nil {
return nil, err
}
return m.metricMap.getOrCreateMetricWithLabelValues(h, lvs, m.curry), nil
}
func (m *metricVec) getMetricWith(labels Labels) (Metric, error) {
h, err := m.hashLabels(labels)
if err != nil {
return nil, err
}
return m.metricMap.getOrCreateMetricWithLabels(h, labels, m.curry), nil
}
func (m *metricVec) hashLabelValues(vals []string) (uint64, error) {
if err := validateLabelValues(vals, len(m.desc.variableLabels)-len(m.curry)); err != nil {
return 0, err
}
var (
h = hashNew()
curry = m.curry
iVals, iCurry int
)
for i := 0; i < len(m.desc.variableLabels); i++ {
if iCurry < len(curry) && curry[iCurry].index == i {
h = m.hashAdd(h, curry[iCurry].value)
iCurry++
} else {
h = m.hashAdd(h, vals[iVals])
iVals++
}
h = m.hashAddByte(h, model.SeparatorByte)
}
return h, nil
}
func (m *metricVec) hashLabels(labels Labels) (uint64, error) {
if err := validateValuesInLabels(labels, len(m.desc.variableLabels)-len(m.curry)); err != nil {
return 0, err
}
var (
h = hashNew()
curry = m.curry
iCurry int
)
for i, label := range m.desc.variableLabels {
val, ok := labels[label]
if iCurry < len(curry) && curry[iCurry].index == i {
if ok {
return 0, fmt.Errorf("label name %q is already curried", label)
}
h = m.hashAdd(h, curry[iCurry].value)
iCurry++
} else {
if !ok {
return 0, fmt.Errorf("label name %q missing in label map", label)
}
h = m.hashAdd(h, val)
}
h = m.hashAddByte(h, model.SeparatorByte)
}
return h, nil
}
// metricWithLabelValues provides the metric and its label values for // metricWithLabelValues provides the metric and its label values for
// disambiguation on hash collision. // disambiguation on hash collision.
type metricWithLabelValues struct { type metricWithLabelValues struct {
@ -54,166 +201,72 @@ type metricWithLabelValues struct {
metric Metric metric Metric
} }
// Describe implements Collector. The length of the returned slice // curriedLabelValue sets the curried value for a label at the given index.
// is always one. type curriedLabelValue struct {
func (m *MetricVec) Describe(ch chan<- *Desc) { index int
value string
}
// metricMap is a helper for metricVec and shared between differently curried
// metricVecs.
type metricMap struct {
mtx sync.RWMutex // Protects metrics.
metrics map[uint64][]metricWithLabelValues
desc *Desc
newMetric func(labelValues ...string) Metric
}
// Describe implements Collector. It will send exactly one Desc to the provided
// channel.
func (m *metricMap) Describe(ch chan<- *Desc) {
ch <- m.desc ch <- m.desc
} }
// Collect implements Collector. // Collect implements Collector.
func (m *MetricVec) Collect(ch chan<- Metric) { func (m *metricMap) Collect(ch chan<- Metric) {
m.mtx.RLock() m.mtx.RLock()
defer m.mtx.RUnlock() defer m.mtx.RUnlock()
for _, metrics := range m.children { for _, metrics := range m.metrics {
for _, metric := range metrics { for _, metric := range metrics {
ch <- metric.metric ch <- metric.metric
} }
} }
} }
// GetMetricWithLabelValues returns the Metric for the given slice of label // Reset deletes all metrics in this vector.
// values (same order as the VariableLabels in Desc). If that combination of func (m *metricMap) Reset() {
// label values is accessed for the first time, a new Metric is created.
//
// It is possible to call this method without using the returned Metric to only
// create the new Metric but leave it at its start value (e.g. a Summary or
// Histogram without any observations). See also the SummaryVec example.
//
// Keeping the Metric for later use is possible (and should be considered if
// performance is critical), but keep in mind that Reset, DeleteLabelValues and
// Delete can be used to delete the Metric from the MetricVec. In that case, the
// Metric will still exist, but it will not be exported anymore, even if a
// Metric with the same label values is created later. See also the CounterVec
// example.
//
// An error is returned if the number of label values is not the same as the
// number of VariableLabels in Desc.
//
// Note that for more than one label value, this method is prone to mistakes
// caused by an incorrect order of arguments. Consider GetMetricWith(Labels) as
// an alternative to avoid that type of mistake. For higher label numbers, the
// latter has a much more readable (albeit more verbose) syntax, but it comes
// with a performance overhead (for creating and processing the Labels map).
// See also the GaugeVec example.
func (m *MetricVec) GetMetricWithLabelValues(lvs ...string) (Metric, error) {
h, err := m.hashLabelValues(lvs)
if err != nil {
return nil, err
}
return m.getOrCreateMetricWithLabelValues(h, lvs), nil
}
// GetMetricWith returns the Metric for the given Labels map (the label names
// must match those of the VariableLabels in Desc). If that label map is
// accessed for the first time, a new Metric is created. Implications of
// creating a Metric without using it and keeping the Metric for later use are
// the same as for GetMetricWithLabelValues.
//
// An error is returned if the number and names of the Labels are inconsistent
// with those of the VariableLabels in Desc.
//
// This method is used for the same purpose as
// GetMetricWithLabelValues(...string). See there for pros and cons of the two
// methods.
func (m *MetricVec) GetMetricWith(labels Labels) (Metric, error) {
h, err := m.hashLabels(labels)
if err != nil {
return nil, err
}
return m.getOrCreateMetricWithLabels(h, labels), nil
}
// WithLabelValues works as GetMetricWithLabelValues, but panics if an error
// occurs. The method allows neat syntax like:
// httpReqs.WithLabelValues("404", "POST").Inc()
func (m *MetricVec) WithLabelValues(lvs ...string) Metric {
metric, err := m.GetMetricWithLabelValues(lvs...)
if err != nil {
panic(err)
}
return metric
}
// With works as GetMetricWith, but panics if an error occurs. The method allows
// neat syntax like:
// httpReqs.With(Labels{"status":"404", "method":"POST"}).Inc()
func (m *MetricVec) With(labels Labels) Metric {
metric, err := m.GetMetricWith(labels)
if err != nil {
panic(err)
}
return metric
}
// DeleteLabelValues removes the metric where the variable labels are the same
// as those passed in as labels (same order as the VariableLabels in Desc). It
// returns true if a metric was deleted.
//
// It is not an error if the number of label values is not the same as the
// number of VariableLabels in Desc. However, such inconsistent label count can
// never match an actual Metric, so the method will always return false in that
// case.
//
// Note that for more than one label value, this method is prone to mistakes
// caused by an incorrect order of arguments. Consider Delete(Labels) as an
// alternative to avoid that type of mistake. For higher label numbers, the
// latter has a much more readable (albeit more verbose) syntax, but it comes
// with a performance overhead (for creating and processing the Labels map).
// See also the CounterVec example.
func (m *MetricVec) DeleteLabelValues(lvs ...string) bool {
m.mtx.Lock() m.mtx.Lock()
defer m.mtx.Unlock() defer m.mtx.Unlock()
h, err := m.hashLabelValues(lvs) for h := range m.metrics {
if err != nil { delete(m.metrics, h)
return false
} }
return m.deleteByHashWithLabelValues(h, lvs)
}
// Delete deletes the metric where the variable labels are the same as those
// passed in as labels. It returns true if a metric was deleted.
//
// It is not an error if the number and names of the Labels are inconsistent
// with those of the VariableLabels in the Desc of the MetricVec. However, such
// inconsistent Labels can never match an actual Metric, so the method will
// always return false in that case.
//
// This method is used for the same purpose as DeleteLabelValues(...string). See
// there for pros and cons of the two methods.
func (m *MetricVec) Delete(labels Labels) bool {
m.mtx.Lock()
defer m.mtx.Unlock()
h, err := m.hashLabels(labels)
if err != nil {
return false
}
return m.deleteByHashWithLabels(h, labels)
} }
// deleteByHashWithLabelValues removes the metric from the hash bucket h. If // deleteByHashWithLabelValues removes the metric from the hash bucket h. If
// there are multiple matches in the bucket, use lvs to select a metric and // there are multiple matches in the bucket, use lvs to select a metric and
// remove only that metric. // remove only that metric.
func (m *MetricVec) deleteByHashWithLabelValues(h uint64, lvs []string) bool { func (m *metricMap) deleteByHashWithLabelValues(
metrics, ok := m.children[h] h uint64, lvs []string, curry []curriedLabelValue,
) bool {
m.mtx.Lock()
defer m.mtx.Unlock()
metrics, ok := m.metrics[h]
if !ok { if !ok {
return false return false
} }
i := m.findMetricWithLabelValues(metrics, lvs) i := findMetricWithLabelValues(metrics, lvs, curry)
if i >= len(metrics) { if i >= len(metrics) {
return false return false
} }
if len(metrics) > 1 { if len(metrics) > 1 {
m.children[h] = append(metrics[:i], metrics[i+1:]...) m.metrics[h] = append(metrics[:i], metrics[i+1:]...)
} else { } else {
delete(m.children, h) delete(m.metrics, h)
} }
return true return true
} }
@ -221,69 +274,38 @@ func (m *MetricVec) deleteByHashWithLabelValues(h uint64, lvs []string) bool {
// deleteByHashWithLabels removes the metric from the hash bucket h. If there // deleteByHashWithLabels removes the metric from the hash bucket h. If there
// are multiple matches in the bucket, use lvs to select a metric and remove // are multiple matches in the bucket, use lvs to select a metric and remove
// only that metric. // only that metric.
func (m *MetricVec) deleteByHashWithLabels(h uint64, labels Labels) bool { func (m *metricMap) deleteByHashWithLabels(
metrics, ok := m.children[h] h uint64, labels Labels, curry []curriedLabelValue,
) bool {
m.mtx.Lock()
defer m.mtx.Unlock()
metrics, ok := m.metrics[h]
if !ok { if !ok {
return false return false
} }
i := m.findMetricWithLabels(metrics, labels) i := findMetricWithLabels(m.desc, metrics, labels, curry)
if i >= len(metrics) { if i >= len(metrics) {
return false return false
} }
if len(metrics) > 1 { if len(metrics) > 1 {
m.children[h] = append(metrics[:i], metrics[i+1:]...) m.metrics[h] = append(metrics[:i], metrics[i+1:]...)
} else { } else {
delete(m.children, h) delete(m.metrics, h)
} }
return true return true
} }
// Reset deletes all metrics in this vector.
func (m *MetricVec) Reset() {
m.mtx.Lock()
defer m.mtx.Unlock()
for h := range m.children {
delete(m.children, h)
}
}
func (m *MetricVec) hashLabelValues(vals []string) (uint64, error) {
if len(vals) != len(m.desc.variableLabels) {
return 0, errInconsistentCardinality
}
h := hashNew()
for _, val := range vals {
h = m.hashAdd(h, val)
h = m.hashAddByte(h, model.SeparatorByte)
}
return h, nil
}
func (m *MetricVec) hashLabels(labels Labels) (uint64, error) {
if len(labels) != len(m.desc.variableLabels) {
return 0, errInconsistentCardinality
}
h := hashNew()
for _, label := range m.desc.variableLabels {
val, ok := labels[label]
if !ok {
return 0, fmt.Errorf("label name %q missing in label map", label)
}
h = m.hashAdd(h, val)
h = m.hashAddByte(h, model.SeparatorByte)
}
return h, nil
}
// getOrCreateMetricWithLabelValues retrieves the metric by hash and label value // getOrCreateMetricWithLabelValues retrieves the metric by hash and label value
// or creates it and returns the new one. // or creates it and returns the new one.
// //
// This function holds the mutex. // This function holds the mutex.
func (m *MetricVec) getOrCreateMetricWithLabelValues(hash uint64, lvs []string) Metric { func (m *metricMap) getOrCreateMetricWithLabelValues(
hash uint64, lvs []string, curry []curriedLabelValue,
) Metric {
m.mtx.RLock() m.mtx.RLock()
metric, ok := m.getMetricWithLabelValues(hash, lvs) metric, ok := m.getMetricWithHashAndLabelValues(hash, lvs, curry)
m.mtx.RUnlock() m.mtx.RUnlock()
if ok { if ok {
return metric return metric
@ -291,13 +313,11 @@ func (m *MetricVec) getOrCreateMetricWithLabelValues(hash uint64, lvs []string)
m.mtx.Lock() m.mtx.Lock()
defer m.mtx.Unlock() defer m.mtx.Unlock()
metric, ok = m.getMetricWithLabelValues(hash, lvs) metric, ok = m.getMetricWithHashAndLabelValues(hash, lvs, curry)
if !ok { if !ok {
// Copy to avoid allocation in case wo don't go down this code path. inlinedLVs := inlineLabelValues(lvs, curry)
copiedLVs := make([]string, len(lvs)) metric = m.newMetric(inlinedLVs...)
copy(copiedLVs, lvs) m.metrics[hash] = append(m.metrics[hash], metricWithLabelValues{values: inlinedLVs, metric: metric})
metric = m.newMetric(copiedLVs...)
m.children[hash] = append(m.children[hash], metricWithLabelValues{values: copiedLVs, metric: metric})
} }
return metric return metric
} }
@ -306,9 +326,11 @@ func (m *MetricVec) getOrCreateMetricWithLabelValues(hash uint64, lvs []string)
// or creates it and returns the new one. // or creates it and returns the new one.
// //
// This function holds the mutex. // This function holds the mutex.
func (m *MetricVec) getOrCreateMetricWithLabels(hash uint64, labels Labels) Metric { func (m *metricMap) getOrCreateMetricWithLabels(
hash uint64, labels Labels, curry []curriedLabelValue,
) Metric {
m.mtx.RLock() m.mtx.RLock()
metric, ok := m.getMetricWithLabels(hash, labels) metric, ok := m.getMetricWithHashAndLabels(hash, labels, curry)
m.mtx.RUnlock() m.mtx.RUnlock()
if ok { if ok {
return metric return metric
@ -316,33 +338,37 @@ func (m *MetricVec) getOrCreateMetricWithLabels(hash uint64, labels Labels) Metr
m.mtx.Lock() m.mtx.Lock()
defer m.mtx.Unlock() defer m.mtx.Unlock()
metric, ok = m.getMetricWithLabels(hash, labels) metric, ok = m.getMetricWithHashAndLabels(hash, labels, curry)
if !ok { if !ok {
lvs := m.extractLabelValues(labels) lvs := extractLabelValues(m.desc, labels, curry)
metric = m.newMetric(lvs...) metric = m.newMetric(lvs...)
m.children[hash] = append(m.children[hash], metricWithLabelValues{values: lvs, metric: metric}) m.metrics[hash] = append(m.metrics[hash], metricWithLabelValues{values: lvs, metric: metric})
} }
return metric return metric
} }
// getMetricWithLabelValues gets a metric while handling possible collisions in // getMetricWithHashAndLabelValues gets a metric while handling possible
// the hash space. Must be called while holding read mutex. // collisions in the hash space. Must be called while holding the read mutex.
func (m *MetricVec) getMetricWithLabelValues(h uint64, lvs []string) (Metric, bool) { func (m *metricMap) getMetricWithHashAndLabelValues(
metrics, ok := m.children[h] h uint64, lvs []string, curry []curriedLabelValue,
) (Metric, bool) {
metrics, ok := m.metrics[h]
if ok { if ok {
if i := m.findMetricWithLabelValues(metrics, lvs); i < len(metrics) { if i := findMetricWithLabelValues(metrics, lvs, curry); i < len(metrics) {
return metrics[i].metric, true return metrics[i].metric, true
} }
} }
return nil, false return nil, false
} }
// getMetricWithLabels gets a metric while handling possible collisions in // getMetricWithHashAndLabels gets a metric while handling possible collisions in
// the hash space. Must be called while holding read mutex. // the hash space. Must be called while holding read mutex.
func (m *MetricVec) getMetricWithLabels(h uint64, labels Labels) (Metric, bool) { func (m *metricMap) getMetricWithHashAndLabels(
metrics, ok := m.children[h] h uint64, labels Labels, curry []curriedLabelValue,
) (Metric, bool) {
metrics, ok := m.metrics[h]
if ok { if ok {
if i := m.findMetricWithLabels(metrics, labels); i < len(metrics) { if i := findMetricWithLabels(m.desc, metrics, labels, curry); i < len(metrics) {
return metrics[i].metric, true return metrics[i].metric, true
} }
} }
@ -351,9 +377,11 @@ func (m *MetricVec) getMetricWithLabels(h uint64, labels Labels) (Metric, bool)
// findMetricWithLabelValues returns the index of the matching metric or // findMetricWithLabelValues returns the index of the matching metric or
// len(metrics) if not found. // len(metrics) if not found.
func (m *MetricVec) findMetricWithLabelValues(metrics []metricWithLabelValues, lvs []string) int { func findMetricWithLabelValues(
metrics []metricWithLabelValues, lvs []string, curry []curriedLabelValue,
) int {
for i, metric := range metrics { for i, metric := range metrics {
if m.matchLabelValues(metric.values, lvs) { if matchLabelValues(metric.values, lvs, curry) {
return i return i
} }
} }
@ -362,32 +390,51 @@ func (m *MetricVec) findMetricWithLabelValues(metrics []metricWithLabelValues, l
// findMetricWithLabels returns the index of the matching metric or len(metrics) // findMetricWithLabels returns the index of the matching metric or len(metrics)
// if not found. // if not found.
func (m *MetricVec) findMetricWithLabels(metrics []metricWithLabelValues, labels Labels) int { func findMetricWithLabels(
desc *Desc, metrics []metricWithLabelValues, labels Labels, curry []curriedLabelValue,
) int {
for i, metric := range metrics { for i, metric := range metrics {
if m.matchLabels(metric.values, labels) { if matchLabels(desc, metric.values, labels, curry) {
return i return i
} }
} }
return len(metrics) return len(metrics)
} }
func (m *MetricVec) matchLabelValues(values []string, lvs []string) bool { func matchLabelValues(values []string, lvs []string, curry []curriedLabelValue) bool {
if len(values) != len(lvs) { if len(values) != len(lvs)+len(curry) {
return false return false
} }
var iLVs, iCurry int
for i, v := range values { for i, v := range values {
if v != lvs[i] { if iCurry < len(curry) && curry[iCurry].index == i {
if v != curry[iCurry].value {
return false return false
} }
iCurry++
continue
}
if v != lvs[iLVs] {
return false
}
iLVs++
} }
return true return true
} }
func (m *MetricVec) matchLabels(values []string, labels Labels) bool { func matchLabels(desc *Desc, values []string, labels Labels, curry []curriedLabelValue) bool {
if len(labels) != len(values) { if len(values) != len(labels)+len(curry) {
return false return false
} }
for i, k := range m.desc.variableLabels { iCurry := 0
for i, k := range desc.variableLabels {
if iCurry < len(curry) && curry[iCurry].index == i {
if values[i] != curry[iCurry].value {
return false
}
iCurry++
continue
}
if values[i] != labels[k] { if values[i] != labels[k] {
return false return false
} }
@ -395,10 +442,31 @@ func (m *MetricVec) matchLabels(values []string, labels Labels) bool {
return true return true
} }
func (m *MetricVec) extractLabelValues(labels Labels) []string { func extractLabelValues(desc *Desc, labels Labels, curry []curriedLabelValue) []string {
labelValues := make([]string, len(labels)) labelValues := make([]string, len(labels)+len(curry))
for i, k := range m.desc.variableLabels { iCurry := 0
for i, k := range desc.variableLabels {
if iCurry < len(curry) && curry[iCurry].index == i {
labelValues[i] = curry[iCurry].value
iCurry++
continue
}
labelValues[i] = labels[k] labelValues[i] = labels[k]
} }
return labelValues return labelValues
} }
func inlineLabelValues(lvs []string, curry []curriedLabelValue) []string {
labelValues := make([]string, len(lvs)+len(curry))
var iCurry, iLVs int
for i := range labelValues {
if iCurry < len(curry) && curry[iCurry].index == i {
labelValues[i] = curry[iCurry].value
iCurry++
continue
}
labelValues[i] = lvs[iLVs]
iLVs++
}
return labelValues
}

View file

@ -0,0 +1,200 @@
// Copyright 2018 The Prometheus Authors
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package prometheus
import (
"fmt"
"sort"
"github.com/golang/protobuf/proto"
dto "github.com/prometheus/client_model/go"
)
// WrapRegistererWith returns a Registerer wrapping the provided
// Registerer. Collectors registered with the returned Registerer will be
// registered with the wrapped Registerer in a modified way. The modified
// Collector adds the provided Labels to all Metrics it collects (as
// ConstLabels). The Metrics collected by the unmodified Collector must not
// duplicate any of those labels.
//
// WrapRegistererWith provides a way to add fixed labels to a subset of
// Collectors. It should not be used to add fixed labels to all metrics exposed.
//
// Conflicts between Collectors registered through the original Registerer with
// Collectors registered through the wrapping Registerer will still be
// detected. Any AlreadyRegisteredError returned by the Register method of
// either Registerer will contain the ExistingCollector in the form it was
// provided to the respective registry.
//
// The Collector example demonstrates a use of WrapRegistererWith.
func WrapRegistererWith(labels Labels, reg Registerer) Registerer {
return &wrappingRegisterer{
wrappedRegisterer: reg,
labels: labels,
}
}
// WrapRegistererWithPrefix returns a Registerer wrapping the provided
// Registerer. Collectors registered with the returned Registerer will be
// registered with the wrapped Registerer in a modified way. The modified
// Collector adds the provided prefix to the name of all Metrics it collects.
//
// WrapRegistererWithPrefix is useful to have one place to prefix all metrics of
// a sub-system. To make this work, register metrics of the sub-system with the
// wrapping Registerer returned by WrapRegistererWithPrefix. It is rarely useful
// to use the same prefix for all metrics exposed. In particular, do not prefix
// metric names that are standardized across applications, as that would break
// horizontal monitoring, for example the metrics provided by the Go collector
// (see NewGoCollector) and the process collector (see NewProcessCollector). (In
// fact, those metrics are already prefixed with “go_” or “process_”,
// respectively.)
//
// Conflicts between Collectors registered through the original Registerer with
// Collectors registered through the wrapping Registerer will still be
// detected. Any AlreadyRegisteredError returned by the Register method of
// either Registerer will contain the ExistingCollector in the form it was
// provided to the respective registry.
func WrapRegistererWithPrefix(prefix string, reg Registerer) Registerer {
return &wrappingRegisterer{
wrappedRegisterer: reg,
prefix: prefix,
}
}
type wrappingRegisterer struct {
wrappedRegisterer Registerer
prefix string
labels Labels
}
func (r *wrappingRegisterer) Register(c Collector) error {
return r.wrappedRegisterer.Register(&wrappingCollector{
wrappedCollector: c,
prefix: r.prefix,
labels: r.labels,
})
}
func (r *wrappingRegisterer) MustRegister(cs ...Collector) {
for _, c := range cs {
if err := r.Register(c); err != nil {
panic(err)
}
}
}
func (r *wrappingRegisterer) Unregister(c Collector) bool {
return r.wrappedRegisterer.Unregister(&wrappingCollector{
wrappedCollector: c,
prefix: r.prefix,
labels: r.labels,
})
}
type wrappingCollector struct {
wrappedCollector Collector
prefix string
labels Labels
}
func (c *wrappingCollector) Collect(ch chan<- Metric) {
wrappedCh := make(chan Metric)
go func() {
c.wrappedCollector.Collect(wrappedCh)
close(wrappedCh)
}()
for m := range wrappedCh {
ch <- &wrappingMetric{
wrappedMetric: m,
prefix: c.prefix,
labels: c.labels,
}
}
}
func (c *wrappingCollector) Describe(ch chan<- *Desc) {
wrappedCh := make(chan *Desc)
go func() {
c.wrappedCollector.Describe(wrappedCh)
close(wrappedCh)
}()
for desc := range wrappedCh {
ch <- wrapDesc(desc, c.prefix, c.labels)
}
}
func (c *wrappingCollector) unwrapRecursively() Collector {
switch wc := c.wrappedCollector.(type) {
case *wrappingCollector:
return wc.unwrapRecursively()
default:
return wc
}
}
type wrappingMetric struct {
wrappedMetric Metric
prefix string
labels Labels
}
func (m *wrappingMetric) Desc() *Desc {
return wrapDesc(m.wrappedMetric.Desc(), m.prefix, m.labels)
}
func (m *wrappingMetric) Write(out *dto.Metric) error {
if err := m.wrappedMetric.Write(out); err != nil {
return err
}
if len(m.labels) == 0 {
// No wrapping labels.
return nil
}
for ln, lv := range m.labels {
out.Label = append(out.Label, &dto.LabelPair{
Name: proto.String(ln),
Value: proto.String(lv),
})
}
sort.Sort(labelPairSorter(out.Label))
return nil
}
func wrapDesc(desc *Desc, prefix string, labels Labels) *Desc {
constLabels := Labels{}
for _, lp := range desc.constLabelPairs {
constLabels[*lp.Name] = *lp.Value
}
for ln, lv := range labels {
if _, alreadyUsed := constLabels[ln]; alreadyUsed {
return &Desc{
fqName: desc.fqName,
help: desc.help,
variableLabels: desc.variableLabels,
constLabelPairs: desc.constLabelPairs,
err: fmt.Errorf("attempted wrapping with already existing label name %q", ln),
}
}
constLabels[ln] = lv
}
// NewDesc will do remaining validations.
newDesc := NewDesc(prefix+desc.fqName, desc.help, desc.variableLabels, constLabels)
// Propagate errors if there was any. This will override any errer
// created by NewDesc above, i.e. earlier errors get precedence.
if desc.err != nil {
newDesc.err = desc.err
}
return newDesc
}

View file

@ -164,9 +164,9 @@ func (sd *SampleDecoder) Decode(s *model.Vector) error {
} }
// ExtractSamples builds a slice of samples from the provided metric // ExtractSamples builds a slice of samples from the provided metric
// families. If an error occurs during sample extraction, it continues to // families. If an error occurrs during sample extraction, it continues to
// extract from the remaining metric families. The returned error is the last // extract from the remaining metric families. The returned error is the last
// error that has occured. // error that has occurred.
func ExtractSamples(o *DecodeOptions, fams ...*dto.MetricFamily) (model.Vector, error) { func ExtractSamples(o *DecodeOptions, fams ...*dto.MetricFamily) (model.Vector, error) {
var ( var (
all model.Vector all model.Vector

View file

@ -26,7 +26,7 @@ const (
// The Content-Type values for the different wire protocols. // The Content-Type values for the different wire protocols.
FmtUnknown Format = `<unknown>` FmtUnknown Format = `<unknown>`
FmtText Format = `text/plain; version=` + TextVersion FmtText Format = `text/plain; version=` + TextVersion + `; charset=utf-8`
FmtProtoDelim Format = ProtoFmt + ` encoding=delimited` FmtProtoDelim Format = ProtoFmt + ` encoding=delimited`
FmtProtoText Format = ProtoFmt + ` encoding=text` FmtProtoText Format = ProtoFmt + ` encoding=text`
FmtProtoCompact Format = ProtoFmt + ` encoding=compact-text` FmtProtoCompact Format = ProtoFmt + ` encoding=compact-text`

View file

@ -14,13 +14,45 @@
package expfmt package expfmt
import ( import (
"bytes"
"fmt" "fmt"
"io" "io"
"math" "math"
"strconv"
"strings" "strings"
"sync"
"github.com/prometheus/common/model"
dto "github.com/prometheus/client_model/go" dto "github.com/prometheus/client_model/go"
"github.com/prometheus/common/model" )
// enhancedWriter has all the enhanced write functions needed here. bytes.Buffer
// implements it.
type enhancedWriter interface {
io.Writer
WriteRune(r rune) (n int, err error)
WriteString(s string) (n int, err error)
WriteByte(c byte) error
}
const (
initialBufSize = 512
initialNumBufSize = 24
)
var (
bufPool = sync.Pool{
New: func() interface{} {
return bytes.NewBuffer(make([]byte, 0, initialBufSize))
},
}
numBufPool = sync.Pool{
New: func() interface{} {
b := make([]byte, 0, initialNumBufSize)
return &b
},
}
) )
// MetricFamilyToText converts a MetricFamily proto message into text format and // MetricFamilyToText converts a MetricFamily proto message into text format and
@ -32,37 +64,92 @@ import (
// will result in invalid text format output. // will result in invalid text format output.
// //
// This method fulfills the type 'prometheus.encoder'. // This method fulfills the type 'prometheus.encoder'.
func MetricFamilyToText(out io.Writer, in *dto.MetricFamily) (int, error) { func MetricFamilyToText(out io.Writer, in *dto.MetricFamily) (written int, err error) {
var written int
// Fail-fast checks. // Fail-fast checks.
if len(in.Metric) == 0 { if len(in.Metric) == 0 {
return written, fmt.Errorf("MetricFamily has no metrics: %s", in) return 0, fmt.Errorf("MetricFamily has no metrics: %s", in)
} }
name := in.GetName() name := in.GetName()
if name == "" { if name == "" {
return written, fmt.Errorf("MetricFamily has no name: %s", in) return 0, fmt.Errorf("MetricFamily has no name: %s", in)
} }
// Try the interface upgrade. If it doesn't work, we'll use a
// bytes.Buffer from the sync.Pool and write out its content to out in a
// single go in the end.
w, ok := out.(enhancedWriter)
if !ok {
b := bufPool.Get().(*bytes.Buffer)
b.Reset()
w = b
defer func() {
bWritten, bErr := out.Write(b.Bytes())
written = bWritten
if err == nil {
err = bErr
}
bufPool.Put(b)
}()
}
var n int
// Comments, first HELP, then TYPE. // Comments, first HELP, then TYPE.
if in.Help != nil { if in.Help != nil {
n, err := fmt.Fprintf( n, err = w.WriteString("# HELP ")
out, "# HELP %s %s\n",
name, escapeString(*in.Help, false),
)
written += n written += n
if err != nil { if err != nil {
return written, err return
} }
n, err = w.WriteString(name)
written += n
if err != nil {
return
}
err = w.WriteByte(' ')
written++
if err != nil {
return
}
n, err = writeEscapedString(w, *in.Help, false)
written += n
if err != nil {
return
}
err = w.WriteByte('\n')
written++
if err != nil {
return
}
}
n, err = w.WriteString("# TYPE ")
written += n
if err != nil {
return
}
n, err = w.WriteString(name)
written += n
if err != nil {
return
} }
metricType := in.GetType() metricType := in.GetType()
n, err := fmt.Fprintf( switch metricType {
out, "# TYPE %s %s\n", case dto.MetricType_COUNTER:
name, strings.ToLower(metricType.String()), n, err = w.WriteString(" counter\n")
) case dto.MetricType_GAUGE:
n, err = w.WriteString(" gauge\n")
case dto.MetricType_SUMMARY:
n, err = w.WriteString(" summary\n")
case dto.MetricType_UNTYPED:
n, err = w.WriteString(" untyped\n")
case dto.MetricType_HISTOGRAM:
n, err = w.WriteString(" histogram\n")
default:
return written, fmt.Errorf("unknown metric type %s", metricType.String())
}
written += n written += n
if err != nil { if err != nil {
return written, err return
} }
// Finally the samples, one line for each. // Finally the samples, one line for each.
@ -75,9 +162,8 @@ func MetricFamilyToText(out io.Writer, in *dto.MetricFamily) (int, error) {
) )
} }
n, err = writeSample( n, err = writeSample(
name, metric, "", "", w, name, "", metric, "", 0,
metric.Counter.GetValue(), metric.Counter.GetValue(),
out,
) )
case dto.MetricType_GAUGE: case dto.MetricType_GAUGE:
if metric.Gauge == nil { if metric.Gauge == nil {
@ -86,9 +172,8 @@ func MetricFamilyToText(out io.Writer, in *dto.MetricFamily) (int, error) {
) )
} }
n, err = writeSample( n, err = writeSample(
name, metric, "", "", w, name, "", metric, "", 0,
metric.Gauge.GetValue(), metric.Gauge.GetValue(),
out,
) )
case dto.MetricType_UNTYPED: case dto.MetricType_UNTYPED:
if metric.Untyped == nil { if metric.Untyped == nil {
@ -97,9 +182,8 @@ func MetricFamilyToText(out io.Writer, in *dto.MetricFamily) (int, error) {
) )
} }
n, err = writeSample( n, err = writeSample(
name, metric, "", "", w, name, "", metric, "", 0,
metric.Untyped.GetValue(), metric.Untyped.GetValue(),
out,
) )
case dto.MetricType_SUMMARY: case dto.MetricType_SUMMARY:
if metric.Summary == nil { if metric.Summary == nil {
@ -109,29 +193,26 @@ func MetricFamilyToText(out io.Writer, in *dto.MetricFamily) (int, error) {
} }
for _, q := range metric.Summary.Quantile { for _, q := range metric.Summary.Quantile {
n, err = writeSample( n, err = writeSample(
name, metric, w, name, "", metric,
model.QuantileLabel, fmt.Sprint(q.GetQuantile()), model.QuantileLabel, q.GetQuantile(),
q.GetValue(), q.GetValue(),
out,
) )
written += n written += n
if err != nil { if err != nil {
return written, err return
} }
} }
n, err = writeSample( n, err = writeSample(
name+"_sum", metric, "", "", w, name, "_sum", metric, "", 0,
metric.Summary.GetSampleSum(), metric.Summary.GetSampleSum(),
out,
) )
if err != nil {
return written, err
}
written += n written += n
if err != nil {
return
}
n, err = writeSample( n, err = writeSample(
name+"_count", metric, "", "", w, name, "_count", metric, "", 0,
float64(metric.Summary.GetSampleCount()), float64(metric.Summary.GetSampleCount()),
out,
) )
case dto.MetricType_HISTOGRAM: case dto.MetricType_HISTOGRAM:
if metric.Histogram == nil { if metric.Histogram == nil {
@ -140,46 +221,42 @@ func MetricFamilyToText(out io.Writer, in *dto.MetricFamily) (int, error) {
) )
} }
infSeen := false infSeen := false
for _, q := range metric.Histogram.Bucket { for _, b := range metric.Histogram.Bucket {
n, err = writeSample( n, err = writeSample(
name+"_bucket", metric, w, name, "_bucket", metric,
model.BucketLabel, fmt.Sprint(q.GetUpperBound()), model.BucketLabel, b.GetUpperBound(),
float64(q.GetCumulativeCount()), float64(b.GetCumulativeCount()),
out,
) )
written += n written += n
if err != nil { if err != nil {
return written, err return
} }
if math.IsInf(q.GetUpperBound(), +1) { if math.IsInf(b.GetUpperBound(), +1) {
infSeen = true infSeen = true
} }
} }
if !infSeen { if !infSeen {
n, err = writeSample( n, err = writeSample(
name+"_bucket", metric, w, name, "_bucket", metric,
model.BucketLabel, "+Inf", model.BucketLabel, math.Inf(+1),
float64(metric.Histogram.GetSampleCount()), float64(metric.Histogram.GetSampleCount()),
out,
) )
if err != nil {
return written, err
}
written += n written += n
if err != nil {
return
}
} }
n, err = writeSample( n, err = writeSample(
name+"_sum", metric, "", "", w, name, "_sum", metric, "", 0,
metric.Histogram.GetSampleSum(), metric.Histogram.GetSampleSum(),
out,
) )
if err != nil {
return written, err
}
written += n written += n
if err != nil {
return
}
n, err = writeSample( n, err = writeSample(
name+"_count", metric, "", "", w, name, "_count", metric, "", 0,
float64(metric.Histogram.GetSampleCount()), float64(metric.Histogram.GetSampleCount()),
out,
) )
default: default:
return written, fmt.Errorf( return written, fmt.Errorf(
@ -188,116 +265,204 @@ func MetricFamilyToText(out io.Writer, in *dto.MetricFamily) (int, error) {
} }
written += n written += n
if err != nil { if err != nil {
return written, err return
} }
} }
return written, nil return
} }
// writeSample writes a single sample in text format to out, given the metric // writeSample writes a single sample in text format to w, given the metric
// name, the metric proto message itself, optionally an additional label name // name, the metric proto message itself, optionally an additional label name
// and value (use empty strings if not required), and the value. The function // with a float64 value (use empty string as label name if not required), and
// returns the number of bytes written and any error encountered. // the value. The function returns the number of bytes written and any error
// encountered.
func writeSample( func writeSample(
name string, w enhancedWriter,
name, suffix string,
metric *dto.Metric, metric *dto.Metric,
additionalLabelName, additionalLabelValue string, additionalLabelName string, additionalLabelValue float64,
value float64, value float64,
out io.Writer,
) (int, error) { ) (int, error) {
var written int var written int
n, err := fmt.Fprint(out, name) n, err := w.WriteString(name)
written += n written += n
if err != nil { if err != nil {
return written, err return written, err
} }
n, err = labelPairsToText( if suffix != "" {
metric.Label, n, err = w.WriteString(suffix)
additionalLabelName, additionalLabelValue, written += n
out, if err != nil {
return written, err
}
}
n, err = writeLabelPairs(
w, metric.Label, additionalLabelName, additionalLabelValue,
) )
written += n written += n
if err != nil { if err != nil {
return written, err return written, err
} }
n, err = fmt.Fprintf(out, " %v", value) err = w.WriteByte(' ')
written++
if err != nil {
return written, err
}
n, err = writeFloat(w, value)
written += n written += n
if err != nil { if err != nil {
return written, err return written, err
} }
if metric.TimestampMs != nil { if metric.TimestampMs != nil {
n, err = fmt.Fprintf(out, " %v", *metric.TimestampMs) err = w.WriteByte(' ')
written++
if err != nil {
return written, err
}
n, err = writeInt(w, *metric.TimestampMs)
written += n written += n
if err != nil { if err != nil {
return written, err return written, err
} }
} }
n, err = out.Write([]byte{'\n'}) err = w.WriteByte('\n')
written += n written++
if err != nil { if err != nil {
return written, err return written, err
} }
return written, nil return written, nil
} }
// labelPairsToText converts a slice of LabelPair proto messages plus the // writeLabelPairs converts a slice of LabelPair proto messages plus the
// explicitly given additional label pair into text formatted as required by the // explicitly given additional label pair into text formatted as required by the
// text format and writes it to 'out'. An empty slice in combination with an // text format and writes it to 'w'. An empty slice in combination with an empty
// empty string 'additionalLabelName' results in nothing being // string 'additionalLabelName' results in nothing being written. Otherwise, the
// written. Otherwise, the label pairs are written, escaped as required by the // label pairs are written, escaped as required by the text format, and enclosed
// text format, and enclosed in '{...}'. The function returns the number of // in '{...}'. The function returns the number of bytes written and any error
// bytes written and any error encountered. // encountered.
func labelPairsToText( func writeLabelPairs(
w enhancedWriter,
in []*dto.LabelPair, in []*dto.LabelPair,
additionalLabelName, additionalLabelValue string, additionalLabelName string, additionalLabelValue float64,
out io.Writer,
) (int, error) { ) (int, error) {
if len(in) == 0 && additionalLabelName == "" { if len(in) == 0 && additionalLabelName == "" {
return 0, nil return 0, nil
} }
var written int var (
separator := '{' written int
for _, lp := range in { separator byte = '{'
n, err := fmt.Fprintf(
out, `%c%s="%s"`,
separator, lp.GetName(), escapeString(lp.GetValue(), true),
) )
for _, lp := range in {
err := w.WriteByte(separator)
written++
if err != nil {
return written, err
}
n, err := w.WriteString(lp.GetName())
written += n written += n
if err != nil { if err != nil {
return written, err return written, err
} }
n, err = w.WriteString(`="`)
written += n
if err != nil {
return written, err
}
n, err = writeEscapedString(w, lp.GetValue(), true)
written += n
if err != nil {
return written, err
}
err = w.WriteByte('"')
written++
if err != nil {
return written, err
}
separator = ',' separator = ','
} }
if additionalLabelName != "" { if additionalLabelName != "" {
n, err := fmt.Fprintf( err := w.WriteByte(separator)
out, `%c%s="%s"`, written++
separator, additionalLabelName, if err != nil {
escapeString(additionalLabelValue, true), return written, err
) }
n, err := w.WriteString(additionalLabelName)
written += n written += n
if err != nil { if err != nil {
return written, err return written, err
} }
} n, err = w.WriteString(`="`)
n, err := out.Write([]byte{'}'})
written += n written += n
if err != nil { if err != nil {
return written, err return written, err
} }
n, err = writeFloat(w, additionalLabelValue)
written += n
if err != nil {
return written, err
}
err = w.WriteByte('"')
written++
if err != nil {
return written, err
}
}
err := w.WriteByte('}')
written++
if err != nil {
return written, err
}
return written, nil return written, nil
} }
// writeEscapedString replaces '\' by '\\', new line character by '\n', and - if
// includeDoubleQuote is true - '"' by '\"'.
var ( var (
escape = strings.NewReplacer("\\", `\\`, "\n", `\n`) escaper = strings.NewReplacer("\\", `\\`, "\n", `\n`)
escapeWithDoubleQuote = strings.NewReplacer("\\", `\\`, "\n", `\n`, "\"", `\"`) quotedEscaper = strings.NewReplacer("\\", `\\`, "\n", `\n`, "\"", `\"`)
) )
// escapeString replaces '\' by '\\', new line character by '\n', and - if func writeEscapedString(w enhancedWriter, v string, includeDoubleQuote bool) (int, error) {
// includeDoubleQuote is true - '"' by '\"'.
func escapeString(v string, includeDoubleQuote bool) string {
if includeDoubleQuote { if includeDoubleQuote {
return escapeWithDoubleQuote.Replace(v) return quotedEscaper.WriteString(w, v)
} else {
return escaper.WriteString(w, v)
}
} }
return escape.Replace(v) // writeFloat is equivalent to fmt.Fprint with a float64 argument but hardcodes
// a few common cases for increased efficiency. For non-hardcoded cases, it uses
// strconv.AppendFloat to avoid allocations, similar to writeInt.
func writeFloat(w enhancedWriter, f float64) (int, error) {
switch {
case f == 1:
return 1, w.WriteByte('1')
case f == 0:
return 1, w.WriteByte('0')
case f == -1:
return w.WriteString("-1")
case math.IsNaN(f):
return w.WriteString("NaN")
case math.IsInf(f, +1):
return w.WriteString("+Inf")
case math.IsInf(f, -1):
return w.WriteString("-Inf")
default:
bp := numBufPool.Get().(*[]byte)
*bp = strconv.AppendFloat((*bp)[:0], f, 'g', -1, 64)
written, err := w.Write(*bp)
numBufPool.Put(bp)
return written, err
}
}
// writeInt is equivalent to fmt.Fprint with an int64 argument but uses
// strconv.AppendInt with a byte slice taken from a sync.Pool to avoid
// allocations.
func writeInt(w enhancedWriter, i int64) (int, error) {
bp := numBufPool.Get().(*[]byte)
*bp = strconv.AppendInt((*bp)[:0], i, 10)
written, err := w.Write(*bp)
numBufPool.Put(bp)
return written, err
} }

View file

@ -315,6 +315,10 @@ func (p *TextParser) startLabelValue() stateFn {
if p.readTokenAsLabelValue(); p.err != nil { if p.readTokenAsLabelValue(); p.err != nil {
return nil return nil
} }
if !model.LabelValue(p.currentToken.String()).IsValid() {
p.parseError(fmt.Sprintf("invalid label value %q", p.currentToken.String()))
return nil
}
p.currentLabelPair.Value = proto.String(p.currentToken.String()) p.currentLabelPair.Value = proto.String(p.currentToken.String())
// Special treatment of summaries: // Special treatment of summaries:
// - Quantile labels are special, will result in dto.Quantile later. // - Quantile labels are special, will result in dto.Quantile later.
@ -355,7 +359,7 @@ func (p *TextParser) startLabelValue() stateFn {
} }
return p.readingValue return p.readingValue
default: default:
p.parseError(fmt.Sprintf("unexpected end of label value %q", p.currentLabelPair.Value)) p.parseError(fmt.Sprintf("unexpected end of label value %q", p.currentLabelPair.GetValue()))
return nil return nil
} }
} }
@ -552,8 +556,8 @@ func (p *TextParser) readTokenUntilWhitespace() {
// byte considered is the byte already read (now in p.currentByte). The first // byte considered is the byte already read (now in p.currentByte). The first
// newline byte encountered is still copied into p.currentByte, but not into // newline byte encountered is still copied into p.currentByte, but not into
// p.currentToken. If recognizeEscapeSequence is true, two escape sequences are // p.currentToken. If recognizeEscapeSequence is true, two escape sequences are
// recognized: '\\' tranlates into '\', and '\n' into a line-feed character. All // recognized: '\\' translates into '\', and '\n' into a line-feed character.
// other escape sequences are invalid and cause an error. // All other escape sequences are invalid and cause an error.
func (p *TextParser) readTokenUntilNewline(recognizeEscapeSequence bool) { func (p *TextParser) readTokenUntilNewline(recognizeEscapeSequence bool) {
p.currentToken.Reset() p.currentToken.Reset()
escaped := false escaped := false

View file

@ -1,12 +1,12 @@
/* /*
Copyright (c) 2011, Open Knowledge Foundation Ltd.
All rights reserved.
HTTP Content-Type Autonegotiation. HTTP Content-Type Autonegotiation.
The functions in this package implement the behaviour specified in The functions in this package implement the behaviour specified in
http://www.w3.org/Protocols/rfc2616/rfc2616-sec14.html http://www.w3.org/Protocols/rfc2616/rfc2616-sec14.html
Copyright (c) 2011, Open Knowledge Foundation Ltd.
All rights reserved.
Redistribution and use in source and binary forms, with or without Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are modification, are permitted provided that the following conditions are
met: met:

View file

@ -21,7 +21,6 @@ import (
) )
var ( var (
separator = []byte{0}
// MetricNameRE is a regular expression matching valid metric // MetricNameRE is a regular expression matching valid metric
// names. Note that the IsValidMetricName function performs the same // names. Note that the IsValidMetricName function performs the same
// check but faster than a match with this regular expression. // check but faster than a match with this regular expression.

View file

@ -59,8 +59,8 @@ func (m *Matcher) Validate() error {
return nil return nil
} }
// Silence defines the representation of a silence definiton // Silence defines the representation of a silence definition in the Prometheus
// in the Prometheus eco-system. // eco-system.
type Silence struct { type Silence struct {
ID uint64 `json:"id,omitempty"` ID uint64 `json:"id,omitempty"`

View file

@ -43,7 +43,7 @@ const (
// (1970-01-01 00:00 UTC) excluding leap seconds. // (1970-01-01 00:00 UTC) excluding leap seconds.
type Time int64 type Time int64
// Interval describes and interval between two timestamps. // Interval describes an interval between two timestamps.
type Interval struct { type Interval struct {
Start, End Time Start, End Time
} }
@ -150,7 +150,13 @@ func (t *Time) UnmarshalJSON(b []byte) error {
return err return err
} }
// If the value was something like -0.1 the negative is lost in the
// parsing because of the leading zero, this ensures that we capture it.
if len(p[0]) > 0 && p[0][0] == '-' && v+va > 0 {
*t = Time(v+va) * -1
} else {
*t = Time(v + va) *t = Time(v + va)
}
default: default:
return fmt.Errorf("invalid time %q", string(b)) return fmt.Errorf("invalid time %q", string(b))
@ -163,9 +169,21 @@ func (t *Time) UnmarshalJSON(b []byte) error {
// This type should not propagate beyond the scope of input/output processing. // This type should not propagate beyond the scope of input/output processing.
type Duration time.Duration type Duration time.Duration
// Set implements pflag/flag.Value
func (d *Duration) Set(s string) error {
var err error
*d, err = ParseDuration(s)
return err
}
// Type implements pflag.Value
func (d *Duration) Type() string {
return "duration"
}
var durationRE = regexp.MustCompile("^([0-9]+)(y|w|d|h|m|s|ms)$") var durationRE = regexp.MustCompile("^([0-9]+)(y|w|d|h|m|s|ms)$")
// StringToDuration parses a string into a time.Duration, assuming that a year // ParseDuration parses a string into a time.Duration, assuming that a year
// always has 365d, a week always has 7d, and a day always has 24h. // always has 365d, a week always has 7d, and a day always has 24h.
func ParseDuration(durationStr string) (Duration, error) { func ParseDuration(durationStr string) (Duration, error) {
matches := durationRE.FindStringSubmatch(durationStr) matches := durationRE.FindStringSubmatch(durationStr)
@ -202,6 +220,9 @@ func (d Duration) String() string {
ms = int64(time.Duration(d) / time.Millisecond) ms = int64(time.Duration(d) / time.Millisecond)
unit = "ms" unit = "ms"
) )
if ms == 0 {
return "0s"
}
factors := map[string]int64{ factors := map[string]int64{
"y": 1000 * 60 * 60 * 24 * 365, "y": 1000 * 60 * 60 * 24 * 365,
"w": 1000 * 60 * 60 * 24 * 7, "w": 1000 * 60 * 60 * 24 * 7,

View file

@ -100,7 +100,7 @@ func (s *SamplePair) UnmarshalJSON(b []byte) error {
} }
// Equal returns true if this SamplePair and o have equal Values and equal // Equal returns true if this SamplePair and o have equal Values and equal
// Timestamps. The sematics of Value equality is defined by SampleValue.Equal. // Timestamps. The semantics of Value equality is defined by SampleValue.Equal.
func (s *SamplePair) Equal(o *SamplePair) bool { func (s *SamplePair) Equal(o *SamplePair) bool {
return s == o || (s.Value.Equal(o.Value) && s.Timestamp.Equal(o.Timestamp)) return s == o || (s.Value.Equal(o.Value) && s.Timestamp.Equal(o.Timestamp))
} }
@ -117,7 +117,7 @@ type Sample struct {
} }
// Equal compares first the metrics, then the timestamp, then the value. The // Equal compares first the metrics, then the timestamp, then the value. The
// sematics of value equality is defined by SampleValue.Equal. // semantics of value equality is defined by SampleValue.Equal.
func (s *Sample) Equal(o *Sample) bool { func (s *Sample) Equal(o *Sample) bool {
if s == o { if s == o {
return true return true

View file

@ -31,19 +31,9 @@ type BuddyInfo struct {
Sizes []float64 Sizes []float64
} }
// NewBuddyInfo reads the buddyinfo statistics.
func NewBuddyInfo() ([]BuddyInfo, error) {
fs, err := NewFS(DefaultMountPoint)
if err != nil {
return nil, err
}
return fs.NewBuddyInfo()
}
// NewBuddyInfo reads the buddyinfo statistics from the specified `proc` filesystem. // NewBuddyInfo reads the buddyinfo statistics from the specified `proc` filesystem.
func (fs FS) NewBuddyInfo() ([]BuddyInfo, error) { func (fs FS) BuddyInfo() ([]BuddyInfo, error) {
file, err := os.Open(fs.Path("buddyinfo")) file, err := os.Open(fs.proc.Path("buddyinfo"))
if err != nil { if err != nil {
return nil, err return nil, err
} }
@ -62,7 +52,7 @@ func parseBuddyInfo(r io.Reader) ([]BuddyInfo, error) {
for scanner.Scan() { for scanner.Scan() {
var err error var err error
line := scanner.Text() line := scanner.Text()
parts := strings.Fields(string(line)) parts := strings.Fields(line)
if len(parts) < 4 { if len(parts) < 4 {
return nil, fmt.Errorf("invalid number of fields when parsing buddyinfo") return nil, fmt.Errorf("invalid number of fields when parsing buddyinfo")

View file

@ -1,46 +1,43 @@
// Copyright 2018 The Prometheus Authors
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package procfs package procfs
import ( import (
"fmt" "github.com/prometheus/procfs/internal/fs"
"os"
"path"
"github.com/prometheus/procfs/xfs"
) )
// FS represents the pseudo-filesystem proc, which provides an interface to // FS represents the pseudo-filesystem sys, which provides an interface to
// kernel data structures. // kernel data structures.
type FS string type FS struct {
proc fs.FS
}
// DefaultMountPoint is the common mount point of the proc filesystem. // DefaultMountPoint is the common mount point of the proc filesystem.
const DefaultMountPoint = "/proc" const DefaultMountPoint = fs.DefaultProcMountPoint
// NewFS returns a new FS mounted under the given mountPoint. It will error // NewDefaultFS returns a new proc FS mounted under the default proc mountPoint.
// if the mount point can't be read. // It will error if the mount point directory can't be read or is a file.
func NewDefaultFS() (FS, error) {
return NewFS(DefaultMountPoint)
}
// NewFS returns a new proc FS mounted under the given proc mountPoint. It will error
// if the mount point directory can't be read or is a file.
func NewFS(mountPoint string) (FS, error) { func NewFS(mountPoint string) (FS, error) {
info, err := os.Stat(mountPoint) fs, err := fs.NewFS(mountPoint)
if err != nil { if err != nil {
return "", fmt.Errorf("could not read %s: %s", mountPoint, err) return FS{}, err
} }
if !info.IsDir() { return FS{fs}, nil
return "", fmt.Errorf("mount point %s is not a directory", mountPoint)
}
return FS(mountPoint), nil
}
// Path returns the path of the given subsystem relative to the procfs root.
func (fs FS) Path(p ...string) string {
return path.Join(append([]string{string(fs)}, p...)...)
}
// XFSStats retrieves XFS filesystem runtime statistics.
func (fs FS) XFSStats() (*xfs.Stats, error) {
f, err := os.Open(fs.Path("fs/xfs/stat"))
if err != nil {
return nil, err
}
defer f.Close()
return xfs.ParseStats(f)
} }

55
vendor/github.com/prometheus/procfs/internal/fs/fs.go generated vendored Normal file
View file

@ -0,0 +1,55 @@
// Copyright 2019 The Prometheus Authors
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package fs
import (
"fmt"
"os"
"path/filepath"
)
const (
// DefaultProcMountPoint is the common mount point of the proc filesystem.
DefaultProcMountPoint = "/proc"
// DefaultSysMountPoint is the common mount point of the sys filesystem.
DefaultSysMountPoint = "/sys"
// DefaultConfigfsMountPoint is the commont mount point of the configfs
DefaultConfigfsMountPoint = "/sys/kernel/config"
)
// FS represents a pseudo-filesystem, normally /proc or /sys, which provides an
// interface to kernel data structures.
type FS string
// NewFS returns a new FS mounted under the given mountPoint. It will error
// if the mount point can't be read.
func NewFS(mountPoint string) (FS, error) {
info, err := os.Stat(mountPoint)
if err != nil {
return "", fmt.Errorf("could not read %s: %s", mountPoint, err)
}
if !info.IsDir() {
return "", fmt.Errorf("mount point %s is not a directory", mountPoint)
}
return FS(mountPoint), nil
}
// Path appends the given path elements to the filesystem path, adding separators
// as necessary.
func (fs FS) Path(p ...string) string {
return filepath.Join(append([]string{string(fs)}, p...)...)
}

View file

@ -1,3 +1,16 @@
// Copyright 2018 The Prometheus Authors
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package procfs package procfs
import ( import (
@ -31,14 +44,16 @@ type IPVSStats struct {
type IPVSBackendStatus struct { type IPVSBackendStatus struct {
// The local (virtual) IP address. // The local (virtual) IP address.
LocalAddress net.IP LocalAddress net.IP
// The local (virtual) port.
LocalPort uint16
// The transport protocol (TCP, UDP).
Proto string
// The remote (real) IP address. // The remote (real) IP address.
RemoteAddress net.IP RemoteAddress net.IP
// The local (virtual) port.
LocalPort uint16
// The remote (real) port. // The remote (real) port.
RemotePort uint16 RemotePort uint16
// The local firewall mark
LocalMark string
// The transport protocol (TCP, UDP).
Proto string
// The current number of active connections for this virtual/real address pair. // The current number of active connections for this virtual/real address pair.
ActiveConn uint64 ActiveConn uint64
// The current number of inactive connections for this virtual/real address pair. // The current number of inactive connections for this virtual/real address pair.
@ -47,19 +62,9 @@ type IPVSBackendStatus struct {
Weight uint64 Weight uint64
} }
// NewIPVSStats reads the IPVS statistics. // IPVSStats reads the IPVS statistics from the specified `proc` filesystem.
func NewIPVSStats() (IPVSStats, error) { func (fs FS) IPVSStats() (IPVSStats, error) {
fs, err := NewFS(DefaultMountPoint) file, err := os.Open(fs.proc.Path("net/ip_vs_stats"))
if err != nil {
return IPVSStats{}, err
}
return fs.NewIPVSStats()
}
// NewIPVSStats reads the IPVS statistics from the specified `proc` filesystem.
func (fs FS) NewIPVSStats() (IPVSStats, error) {
file, err := os.Open(fs.Path("net/ip_vs_stats"))
if err != nil { if err != nil {
return IPVSStats{}, err return IPVSStats{}, err
} }
@ -116,19 +121,9 @@ func parseIPVSStats(file io.Reader) (IPVSStats, error) {
return stats, nil return stats, nil
} }
// NewIPVSBackendStatus reads and returns the status of all (virtual,real) server pairs. // IPVSBackendStatus reads and returns the status of all (virtual,real) server pairs from the specified `proc` filesystem.
func NewIPVSBackendStatus() ([]IPVSBackendStatus, error) { func (fs FS) IPVSBackendStatus() ([]IPVSBackendStatus, error) {
fs, err := NewFS(DefaultMountPoint) file, err := os.Open(fs.proc.Path("net/ip_vs"))
if err != nil {
return []IPVSBackendStatus{}, err
}
return fs.NewIPVSBackendStatus()
}
// NewIPVSBackendStatus reads and returns the status of all (virtual,real) server pairs from the specified `proc` filesystem.
func (fs FS) NewIPVSBackendStatus() ([]IPVSBackendStatus, error) {
file, err := os.Open(fs.Path("net/ip_vs"))
if err != nil { if err != nil {
return nil, err return nil, err
} }
@ -142,13 +137,14 @@ func parseIPVSBackendStatus(file io.Reader) ([]IPVSBackendStatus, error) {
status []IPVSBackendStatus status []IPVSBackendStatus
scanner = bufio.NewScanner(file) scanner = bufio.NewScanner(file)
proto string proto string
localMark string
localAddress net.IP localAddress net.IP
localPort uint16 localPort uint16
err error err error
) )
for scanner.Scan() { for scanner.Scan() {
fields := strings.Fields(string(scanner.Text())) fields := strings.Fields(scanner.Text())
if len(fields) == 0 { if len(fields) == 0 {
continue continue
} }
@ -160,10 +156,19 @@ func parseIPVSBackendStatus(file io.Reader) ([]IPVSBackendStatus, error) {
continue continue
} }
proto = fields[0] proto = fields[0]
localMark = ""
localAddress, localPort, err = parseIPPort(fields[1]) localAddress, localPort, err = parseIPPort(fields[1])
if err != nil { if err != nil {
return nil, err return nil, err
} }
case fields[0] == "FWM":
if len(fields) < 2 {
continue
}
proto = fields[0]
localMark = fields[1]
localAddress = nil
localPort = 0
case fields[0] == "->": case fields[0] == "->":
if len(fields) < 6 { if len(fields) < 6 {
continue continue
@ -187,6 +192,7 @@ func parseIPVSBackendStatus(file io.Reader) ([]IPVSBackendStatus, error) {
status = append(status, IPVSBackendStatus{ status = append(status, IPVSBackendStatus{
LocalAddress: localAddress, LocalAddress: localAddress,
LocalPort: localPort, LocalPort: localPort,
LocalMark: localMark,
RemoteAddress: remoteAddress, RemoteAddress: remoteAddress,
RemotePort: remotePort, RemotePort: remotePort,
Proto: proto, Proto: proto,
@ -200,22 +206,31 @@ func parseIPVSBackendStatus(file io.Reader) ([]IPVSBackendStatus, error) {
} }
func parseIPPort(s string) (net.IP, uint16, error) { func parseIPPort(s string) (net.IP, uint16, error) {
tmp := strings.SplitN(s, ":", 2) var (
ip net.IP
err error
)
if len(tmp) != 2 { switch len(s) {
return nil, 0, fmt.Errorf("invalid IP:Port: %s", s) case 13:
} ip, err = hex.DecodeString(s[0:8])
if len(tmp[0]) != 8 && len(tmp[0]) != 32 {
return nil, 0, fmt.Errorf("invalid IP: %s", tmp[0])
}
ip, err := hex.DecodeString(tmp[0])
if err != nil { if err != nil {
return nil, 0, err return nil, 0, err
} }
case 46:
ip = net.ParseIP(s[1:40])
if ip == nil {
return nil, 0, fmt.Errorf("invalid IPv6 address: %s", s[1:40])
}
default:
return nil, 0, fmt.Errorf("unexpected IP:Port: %s", s)
}
port, err := strconv.ParseUint(tmp[1], 16, 16) portString := s[len(s)-4:]
if len(portString) != 4 {
return nil, 0, fmt.Errorf("unexpected port string format: %s", portString)
}
port, err := strconv.ParseUint(portString, 16, 16)
if err != nil { if err != nil {
return nil, 0, err return nil, 0, err
} }

View file

@ -1,3 +1,16 @@
// Copyright 2018 The Prometheus Authors
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package procfs package procfs
import ( import (
@ -9,8 +22,8 @@ import (
) )
var ( var (
statuslineRE = regexp.MustCompile(`(\d+) blocks .*\[(\d+)/(\d+)\] \[[U_]+\]`) statusLineRE = regexp.MustCompile(`(\d+) blocks .*\[(\d+)/(\d+)\] \[[U_]+\]`)
buildlineRE = regexp.MustCompile(`\((\d+)/\d+\)`) recoveryLineRE = regexp.MustCompile(`\((\d+)/\d+\)`)
) )
// MDStat holds info parsed from /proc/mdstat. // MDStat holds info parsed from /proc/mdstat.
@ -21,117 +34,160 @@ type MDStat struct {
ActivityState string ActivityState string
// Number of active disks. // Number of active disks.
DisksActive int64 DisksActive int64
// Total number of disks the device consists of. // Total number of disks the device requires.
DisksTotal int64 DisksTotal int64
// Number of failed disks.
DisksFailed int64
// Spare disks in the device.
DisksSpare int64
// Number of blocks the device holds. // Number of blocks the device holds.
BlocksTotal int64 BlocksTotal int64
// Number of blocks on the device that are in sync. // Number of blocks on the device that are in sync.
BlocksSynced int64 BlocksSynced int64
} }
// ParseMDStat parses an mdstat-file and returns a struct with the relevant infos. // MDStat parses an mdstat-file (/proc/mdstat) and returns a slice of
func (fs FS) ParseMDStat() (mdstates []MDStat, err error) { // structs containing the relevant info. More information available here:
mdStatusFilePath := fs.Path("mdstat") // https://raid.wiki.kernel.org/index.php/Mdstat
content, err := ioutil.ReadFile(mdStatusFilePath) func (fs FS) MDStat() ([]MDStat, error) {
data, err := ioutil.ReadFile(fs.proc.Path("mdstat"))
if err != nil { if err != nil {
return []MDStat{}, fmt.Errorf("error parsing %s: %s", mdStatusFilePath, err) return nil, fmt.Errorf("error parsing mdstat %s: %s", fs.proc.Path("mdstat"), err)
}
mdstat, err := parseMDStat(data)
if err != nil {
return nil, fmt.Errorf("error parsing mdstat %s: %s", fs.proc.Path("mdstat"), err)
}
return mdstat, nil
} }
mdStates := []MDStat{} // parseMDStat parses data from mdstat file (/proc/mdstat) and returns a slice of
lines := strings.Split(string(content), "\n") // structs containing the relevant info.
for i, l := range lines { func parseMDStat(mdStatData []byte) ([]MDStat, error) {
if l == "" { mdStats := []MDStat{}
continue lines := strings.Split(string(mdStatData), "\n")
}
if l[0] == ' ' { for i, line := range lines {
continue if strings.TrimSpace(line) == "" || line[0] == ' ' ||
} strings.HasPrefix(line, "Personalities") ||
if strings.HasPrefix(l, "Personalities") || strings.HasPrefix(l, "unused") { strings.HasPrefix(line, "unused") {
continue continue
} }
mainLine := strings.Split(l, " ") deviceFields := strings.Fields(line)
if len(mainLine) < 3 { if len(deviceFields) < 3 {
return mdStates, fmt.Errorf("error parsing mdline: %s", l) return nil, fmt.Errorf("not enough fields in mdline (expected at least 3): %s", line)
} }
mdName := mainLine[0] mdName := deviceFields[0] // mdx
activityState := mainLine[2] state := deviceFields[2] // active or inactive
if len(lines) <= i+3 { if len(lines) <= i+3 {
return mdStates, fmt.Errorf( return nil, fmt.Errorf(
"error parsing %s: too few lines for md device %s", "error parsing %s: too few lines for md device",
mdStatusFilePath,
mdName, mdName,
) )
} }
active, total, size, err := evalStatusline(lines[i+1]) // Failed disks have the suffix (F) & Spare disks have the suffix (S).
fail := int64(strings.Count(line, "(F)"))
spare := int64(strings.Count(line, "(S)"))
active, total, size, err := evalStatusLine(lines[i], lines[i+1])
if err != nil { if err != nil {
return mdStates, fmt.Errorf("error parsing %s: %s", mdStatusFilePath, err) return nil, fmt.Errorf("error parsing md device lines: %s", err)
} }
// j is the line number of the syncing-line. syncLineIdx := i + 2
j := i + 2
if strings.Contains(lines[i+2], "bitmap") { // skip bitmap line if strings.Contains(lines[i+2], "bitmap") { // skip bitmap line
j = i + 3 syncLineIdx++
} }
// If device is syncing at the moment, get the number of currently // If device is syncing at the moment, get the number of currently
// synced bytes, otherwise that number equals the size of the device. // synced bytes, otherwise that number equals the size of the device.
syncedBlocks := size syncedBlocks := size
if strings.Contains(lines[j], "recovery") || strings.Contains(lines[j], "resync") { recovering := strings.Contains(lines[syncLineIdx], "recovery")
syncedBlocks, err = evalBuildline(lines[j]) resyncing := strings.Contains(lines[syncLineIdx], "resync")
// Append recovery and resyncing state info.
if recovering || resyncing {
if recovering {
state = "recovering"
} else {
state = "resyncing"
}
// Handle case when resync=PENDING or resync=DELAYED.
if strings.Contains(lines[syncLineIdx], "PENDING") ||
strings.Contains(lines[syncLineIdx], "DELAYED") {
syncedBlocks = 0
} else {
syncedBlocks, err = evalRecoveryLine(lines[syncLineIdx])
if err != nil { if err != nil {
return mdStates, fmt.Errorf("error parsing %s: %s", mdStatusFilePath, err) return nil, fmt.Errorf("error parsing sync line in md device %s: %s", mdName, err)
}
} }
} }
mdStates = append(mdStates, MDStat{ mdStats = append(mdStats, MDStat{
Name: mdName, Name: mdName,
ActivityState: activityState, ActivityState: state,
DisksActive: active, DisksActive: active,
DisksFailed: fail,
DisksSpare: spare,
DisksTotal: total, DisksTotal: total,
BlocksTotal: size, BlocksTotal: size,
BlocksSynced: syncedBlocks, BlocksSynced: syncedBlocks,
}) })
} }
return mdStates, nil return mdStats, nil
} }
func evalStatusline(statusline string) (active, total, size int64, err error) { func evalStatusLine(deviceLine, statusLine string) (active, total, size int64, err error) {
matches := statuslineRE.FindStringSubmatch(statusline)
if len(matches) != 4 {
return 0, 0, 0, fmt.Errorf("unexpected statusline: %s", statusline)
}
size, err = strconv.ParseInt(matches[1], 10, 64) sizeStr := strings.Fields(statusLine)[0]
size, err = strconv.ParseInt(sizeStr, 10, 64)
if err != nil { if err != nil {
return 0, 0, 0, fmt.Errorf("unexpected statusline %s: %s", statusline, err) return 0, 0, 0, fmt.Errorf("unexpected statusLine %s: %s", statusLine, err)
}
if strings.Contains(deviceLine, "raid0") || strings.Contains(deviceLine, "linear") {
// In the device deviceLine, only disks have a number associated with them in [].
total = int64(strings.Count(deviceLine, "["))
return total, total, size, nil
}
if strings.Contains(deviceLine, "inactive") {
return 0, 0, size, nil
}
matches := statusLineRE.FindStringSubmatch(statusLine)
if len(matches) != 4 {
return 0, 0, 0, fmt.Errorf("couldn't find all the substring matches: %s", statusLine)
} }
total, err = strconv.ParseInt(matches[2], 10, 64) total, err = strconv.ParseInt(matches[2], 10, 64)
if err != nil { if err != nil {
return 0, 0, 0, fmt.Errorf("unexpected statusline %s: %s", statusline, err) return 0, 0, 0, fmt.Errorf("unexpected statusLine %s: %s", statusLine, err)
} }
active, err = strconv.ParseInt(matches[3], 10, 64) active, err = strconv.ParseInt(matches[3], 10, 64)
if err != nil { if err != nil {
return 0, 0, 0, fmt.Errorf("unexpected statusline %s: %s", statusline, err) return 0, 0, 0, fmt.Errorf("unexpected statusLine %s: %s", statusLine, err)
} }
return active, total, size, nil return active, total, size, nil
} }
func evalBuildline(buildline string) (syncedBlocks int64, err error) { func evalRecoveryLine(recoveryLine string) (syncedBlocks int64, err error) {
matches := buildlineRE.FindStringSubmatch(buildline) matches := recoveryLineRE.FindStringSubmatch(recoveryLine)
if len(matches) != 2 { if len(matches) != 2 {
return 0, fmt.Errorf("unexpected buildline: %s", buildline) return 0, fmt.Errorf("unexpected recoveryLine: %s", recoveryLine)
} }
syncedBlocks, err = strconv.ParseInt(matches[1], 10, 64) syncedBlocks, err = strconv.ParseInt(matches[1], 10, 64)
if err != nil { if err != nil {
return 0, fmt.Errorf("%s in buildline: %s", err, buildline) return 0, fmt.Errorf("%s in recoveryLine: %s", err, recoveryLine)
} }
return syncedBlocks, nil return syncedBlocks, nil

178
vendor/github.com/prometheus/procfs/mountinfo.go generated vendored Normal file
View file

@ -0,0 +1,178 @@
// Copyright 2019 The Prometheus Authors
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package procfs
import (
"bufio"
"fmt"
"io"
"os"
"strconv"
"strings"
)
var validOptionalFields = map[string]bool{
"shared": true,
"master": true,
"propagate_from": true,
"unbindable": true,
}
// A MountInfo is a type that describes the details, options
// for each mount, parsed from /proc/self/mountinfo.
// The fields described in each entry of /proc/self/mountinfo
// is described in the following man page.
// http://man7.org/linux/man-pages/man5/proc.5.html
type MountInfo struct {
// Unique Id for the mount
MountId int
// The Id of the parent mount
ParentId int
// The value of `st_dev` for the files on this FS
MajorMinorVer string
// The pathname of the directory in the FS that forms
// the root for this mount
Root string
// The pathname of the mount point relative to the root
MountPoint string
// Mount options
Options map[string]string
// Zero or more optional fields
OptionalFields map[string]string
// The Filesystem type
FSType string
// FS specific information or "none"
Source string
// Superblock options
SuperOptions map[string]string
}
// Returns part of the mountinfo line, if it exists, else an empty string.
func getStringSliceElement(parts []string, idx int, defaultValue string) string {
if idx >= len(parts) {
return defaultValue
}
return parts[idx]
}
// Reads each line of the mountinfo file, and returns a list of formatted MountInfo structs.
func parseMountInfo(r io.Reader) ([]*MountInfo, error) {
mounts := []*MountInfo{}
scanner := bufio.NewScanner(r)
for scanner.Scan() {
mountString := scanner.Text()
parsedMounts, err := parseMountInfoString(mountString)
if err != nil {
return nil, err
}
mounts = append(mounts, parsedMounts)
}
err := scanner.Err()
return mounts, err
}
// Parses a mountinfo file line, and converts it to a MountInfo struct.
// An important check here is to see if the hyphen separator, as if it does not exist,
// it means that the line is malformed.
func parseMountInfoString(mountString string) (*MountInfo, error) {
var err error
// OptionalFields can be zero, hence these checks to ensure we do not populate the wrong values in the wrong spots
separatorIndex := strings.Index(mountString, "-")
if separatorIndex == -1 {
return nil, fmt.Errorf("no separator found in mountinfo string: %s", mountString)
}
beforeFields := strings.Fields(mountString[:separatorIndex])
afterFields := strings.Fields(mountString[separatorIndex+1:])
if (len(beforeFields) + len(afterFields)) < 7 {
return nil, fmt.Errorf("too few fields")
}
mount := &MountInfo{
MajorMinorVer: getStringSliceElement(beforeFields, 2, ""),
Root: getStringSliceElement(beforeFields, 3, ""),
MountPoint: getStringSliceElement(beforeFields, 4, ""),
Options: mountOptionsParser(getStringSliceElement(beforeFields, 5, "")),
OptionalFields: nil,
FSType: getStringSliceElement(afterFields, 0, ""),
Source: getStringSliceElement(afterFields, 1, ""),
SuperOptions: mountOptionsParser(getStringSliceElement(afterFields, 2, "")),
}
mount.MountId, err = strconv.Atoi(getStringSliceElement(beforeFields, 0, ""))
if err != nil {
return nil, fmt.Errorf("failed to parse mount ID")
}
mount.ParentId, err = strconv.Atoi(getStringSliceElement(beforeFields, 1, ""))
if err != nil {
return nil, fmt.Errorf("failed to parse parent ID")
}
// Has optional fields, which is a space separated list of values.
// Example: shared:2 master:7
if len(beforeFields) > 6 {
mount.OptionalFields = make(map[string]string)
optionalFields := beforeFields[6:]
for _, field := range optionalFields {
optionSplit := strings.Split(field, ":")
target, value := optionSplit[0], ""
if len(optionSplit) == 2 {
value = optionSplit[1]
}
// Checks if the 'keys' in the optional fields in the mountinfo line are acceptable.
// Allowed 'keys' are shared, master, propagate_from, unbindable.
if _, ok := validOptionalFields[target]; ok {
mount.OptionalFields[target] = value
}
}
}
return mount, nil
}
// Parses the mount options, superblock options.
func mountOptionsParser(mountOptions string) map[string]string {
opts := make(map[string]string)
options := strings.Split(mountOptions, ",")
for _, opt := range options {
splitOption := strings.Split(opt, "=")
if len(splitOption) < 2 {
key := splitOption[0]
opts[key] = ""
} else {
key, value := splitOption[0], splitOption[1]
opts[key] = value
}
}
return opts
}
// Retrieves mountinfo information from `/proc/self/mountinfo`.
func GetMounts() ([]*MountInfo, error) {
f, err := os.Open("/proc/self/mountinfo")
if err != nil {
return nil, err
}
defer f.Close()
return parseMountInfo(f)
}
// Retrieves mountinfo information from a processes' `/proc/<pid>/mountinfo`.
func GetProcMounts(pid int) ([]*MountInfo, error) {
f, err := os.Open(fmt.Sprintf("/proc/%d/mountinfo", pid))
if err != nil {
return nil, err
}
defer f.Close()
return parseMountInfo(f)
}

View file

@ -1,3 +1,16 @@
// Copyright 2018 The Prometheus Authors
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package procfs package procfs
// While implementing parsing of /proc/[pid]/mountstats, this blog was used // While implementing parsing of /proc/[pid]/mountstats, this blog was used
@ -26,8 +39,11 @@ const (
statVersion10 = "1.0" statVersion10 = "1.0"
statVersion11 = "1.1" statVersion11 = "1.1"
fieldTransport10Len = 10 fieldTransport10TCPLen = 10
fieldTransport11Len = 13 fieldTransport10UDPLen = 7
fieldTransport11TCPLen = 13
fieldTransport11UDPLen = 10
) )
// A Mount is a device mount parsed from /proc/[pid]/mountstats. // A Mount is a device mount parsed from /proc/[pid]/mountstats.
@ -53,6 +69,8 @@ type MountStats interface {
type MountStatsNFS struct { type MountStatsNFS struct {
// The version of statistics provided. // The version of statistics provided.
StatVersion string StatVersion string
// The mount options of the NFS mount.
Opts map[string]string
// The age of the NFS mount. // The age of the NFS mount.
Age time.Duration Age time.Duration
// Statistics related to byte counters for various operations. // Statistics related to byte counters for various operations.
@ -163,16 +181,18 @@ type NFSOperationStats struct {
// Number of bytes received for this operation, including RPC headers and payload. // Number of bytes received for this operation, including RPC headers and payload.
BytesReceived uint64 BytesReceived uint64
// Duration all requests spent queued for transmission before they were sent. // Duration all requests spent queued for transmission before they were sent.
CumulativeQueueTime time.Duration CumulativeQueueMilliseconds uint64
// Duration it took to get a reply back after the request was transmitted. // Duration it took to get a reply back after the request was transmitted.
CumulativeTotalResponseTime time.Duration CumulativeTotalResponseMilliseconds uint64
// Duration from when a request was enqueued to when it was completely handled. // Duration from when a request was enqueued to when it was completely handled.
CumulativeTotalRequestTime time.Duration CumulativeTotalRequestMilliseconds uint64
} }
// A NFSTransportStats contains statistics for the NFS mount RPC requests and // A NFSTransportStats contains statistics for the NFS mount RPC requests and
// responses. // responses.
type NFSTransportStats struct { type NFSTransportStats struct {
// The transport protocol used for the NFS mount.
Protocol string
// The local port used for the NFS mount. // The local port used for the NFS mount.
Port uint64 Port uint64
// Number of times the client has had to establish a connection from scratch // Number of times the client has had to establish a connection from scratch
@ -184,7 +204,7 @@ type NFSTransportStats struct {
// spent waiting for connections to the server to be established. // spent waiting for connections to the server to be established.
ConnectIdleTime uint64 ConnectIdleTime uint64
// Duration since the NFS mount last saw any RPC traffic. // Duration since the NFS mount last saw any RPC traffic.
IdleTime time.Duration IdleTimeSeconds uint64
// Number of RPC requests for this mount sent to the NFS server. // Number of RPC requests for this mount sent to the NFS server.
Sends uint64 Sends uint64
// Number of RPC responses for this mount received from the NFS server. // Number of RPC responses for this mount received from the NFS server.
@ -299,6 +319,7 @@ func parseMount(ss []string) (*Mount, error) {
func parseMountStatsNFS(s *bufio.Scanner, statVersion string) (*MountStatsNFS, error) { func parseMountStatsNFS(s *bufio.Scanner, statVersion string) (*MountStatsNFS, error) {
// Field indicators for parsing specific types of data // Field indicators for parsing specific types of data
const ( const (
fieldOpts = "opts:"
fieldAge = "age:" fieldAge = "age:"
fieldBytes = "bytes:" fieldBytes = "bytes:"
fieldEvents = "events:" fieldEvents = "events:"
@ -320,6 +341,18 @@ func parseMountStatsNFS(s *bufio.Scanner, statVersion string) (*MountStatsNFS, e
} }
switch ss[0] { switch ss[0] {
case fieldOpts:
if stats.Opts == nil {
stats.Opts = map[string]string{}
}
for _, opt := range strings.Split(ss[1], ",") {
split := strings.Split(opt, "=")
if len(split) == 2 {
stats.Opts[split[0]] = split[1]
} else {
stats.Opts[opt] = ""
}
}
case fieldAge: case fieldAge:
// Age integer is in seconds // Age integer is in seconds
d, err := time.ParseDuration(ss[1] + "s") d, err := time.ParseDuration(ss[1] + "s")
@ -347,7 +380,7 @@ func parseMountStatsNFS(s *bufio.Scanner, statVersion string) (*MountStatsNFS, e
return nil, fmt.Errorf("not enough information for NFS transport stats: %v", ss) return nil, fmt.Errorf("not enough information for NFS transport stats: %v", ss)
} }
tstats, err := parseNFSTransportStats(ss[2:], statVersion) tstats, err := parseNFSTransportStats(ss[1:], statVersion)
if err != nil { if err != nil {
return nil, err return nil, err
} }
@ -497,9 +530,9 @@ func parseNFSOperationStats(s *bufio.Scanner) ([]NFSOperationStats, error) {
MajorTimeouts: ns[2], MajorTimeouts: ns[2],
BytesSent: ns[3], BytesSent: ns[3],
BytesReceived: ns[4], BytesReceived: ns[4],
CumulativeQueueTime: time.Duration(ns[5]) * time.Millisecond, CumulativeQueueMilliseconds: ns[5],
CumulativeTotalResponseTime: time.Duration(ns[6]) * time.Millisecond, CumulativeTotalResponseMilliseconds: ns[6],
CumulativeTotalRequestTime: time.Duration(ns[7]) * time.Millisecond, CumulativeTotalRequestMilliseconds: ns[7],
}) })
} }
@ -509,13 +542,33 @@ func parseNFSOperationStats(s *bufio.Scanner) ([]NFSOperationStats, error) {
// parseNFSTransportStats parses a NFSTransportStats line using an input set of // parseNFSTransportStats parses a NFSTransportStats line using an input set of
// integer fields matched to a specific stats version. // integer fields matched to a specific stats version.
func parseNFSTransportStats(ss []string, statVersion string) (*NFSTransportStats, error) { func parseNFSTransportStats(ss []string, statVersion string) (*NFSTransportStats, error) {
// Extract the protocol field. It is the only string value in the line
protocol := ss[0]
ss = ss[1:]
switch statVersion { switch statVersion {
case statVersion10: case statVersion10:
if len(ss) != fieldTransport10Len { var expectedLength int
if protocol == "tcp" {
expectedLength = fieldTransport10TCPLen
} else if protocol == "udp" {
expectedLength = fieldTransport10UDPLen
} else {
return nil, fmt.Errorf("invalid NFS protocol \"%s\" in stats 1.0 statement: %v", protocol, ss)
}
if len(ss) != expectedLength {
return nil, fmt.Errorf("invalid NFS transport stats 1.0 statement: %v", ss) return nil, fmt.Errorf("invalid NFS transport stats 1.0 statement: %v", ss)
} }
case statVersion11: case statVersion11:
if len(ss) != fieldTransport11Len { var expectedLength int
if protocol == "tcp" {
expectedLength = fieldTransport11TCPLen
} else if protocol == "udp" {
expectedLength = fieldTransport11UDPLen
} else {
return nil, fmt.Errorf("invalid NFS protocol \"%s\" in stats 1.1 statement: %v", protocol, ss)
}
if len(ss) != expectedLength {
return nil, fmt.Errorf("invalid NFS transport stats 1.1 statement: %v", ss) return nil, fmt.Errorf("invalid NFS transport stats 1.1 statement: %v", ss)
} }
default: default:
@ -523,23 +576,39 @@ func parseNFSTransportStats(ss []string, statVersion string) (*NFSTransportStats
} }
// Allocate enough for v1.1 stats since zero value for v1.1 stats will be okay // Allocate enough for v1.1 stats since zero value for v1.1 stats will be okay
// in a v1.0 response // in a v1.0 response. Since the stat length is bigger for TCP stats, we use
ns := make([]uint64, 0, fieldTransport11Len) // the TCP length here.
for _, s := range ss { //
// Note: slice length must be set to length of v1.1 stats to avoid a panic when
// only v1.0 stats are present.
// See: https://github.com/prometheus/node_exporter/issues/571.
ns := make([]uint64, fieldTransport11TCPLen)
for i, s := range ss {
n, err := strconv.ParseUint(s, 10, 64) n, err := strconv.ParseUint(s, 10, 64)
if err != nil { if err != nil {
return nil, err return nil, err
} }
ns = append(ns, n) ns[i] = n
}
// The fields differ depending on the transport protocol (TCP or UDP)
// From https://utcc.utoronto.ca/%7Ecks/space/blog/linux/NFSMountstatsXprt
//
// For the udp RPC transport there is no connection count, connect idle time,
// or idle time (fields #3, #4, and #5); all other fields are the same. So
// we set them to 0 here.
if protocol == "udp" {
ns = append(ns[:2], append(make([]uint64, 3), ns[2:]...)...)
} }
return &NFSTransportStats{ return &NFSTransportStats{
Protocol: protocol,
Port: ns[0], Port: ns[0],
Bind: ns[1], Bind: ns[1],
Connect: ns[2], Connect: ns[2],
ConnectIdleTime: ns[3], ConnectIdleTime: ns[3],
IdleTime: time.Duration(ns[4]) * time.Second, IdleTimeSeconds: ns[4],
Sends: ns[5], Sends: ns[5],
Receives: ns[6], Receives: ns[6],
BadTransactionIDs: ns[7], BadTransactionIDs: ns[7],

206
vendor/github.com/prometheus/procfs/net_dev.go generated vendored Normal file
View file

@ -0,0 +1,206 @@
// Copyright 2018 The Prometheus Authors
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package procfs
import (
"bufio"
"errors"
"os"
"sort"
"strconv"
"strings"
)
// NetDevLine is single line parsed from /proc/net/dev or /proc/[pid]/net/dev.
type NetDevLine struct {
Name string `json:"name"` // The name of the interface.
RxBytes uint64 `json:"rx_bytes"` // Cumulative count of bytes received.
RxPackets uint64 `json:"rx_packets"` // Cumulative count of packets received.
RxErrors uint64 `json:"rx_errors"` // Cumulative count of receive errors encountered.
RxDropped uint64 `json:"rx_dropped"` // Cumulative count of packets dropped while receiving.
RxFIFO uint64 `json:"rx_fifo"` // Cumulative count of FIFO buffer errors.
RxFrame uint64 `json:"rx_frame"` // Cumulative count of packet framing errors.
RxCompressed uint64 `json:"rx_compressed"` // Cumulative count of compressed packets received by the device driver.
RxMulticast uint64 `json:"rx_multicast"` // Cumulative count of multicast frames received by the device driver.
TxBytes uint64 `json:"tx_bytes"` // Cumulative count of bytes transmitted.
TxPackets uint64 `json:"tx_packets"` // Cumulative count of packets transmitted.
TxErrors uint64 `json:"tx_errors"` // Cumulative count of transmit errors encountered.
TxDropped uint64 `json:"tx_dropped"` // Cumulative count of packets dropped while transmitting.
TxFIFO uint64 `json:"tx_fifo"` // Cumulative count of FIFO buffer errors.
TxCollisions uint64 `json:"tx_collisions"` // Cumulative count of collisions detected on the interface.
TxCarrier uint64 `json:"tx_carrier"` // Cumulative count of carrier losses detected by the device driver.
TxCompressed uint64 `json:"tx_compressed"` // Cumulative count of compressed packets transmitted by the device driver.
}
// NetDev is parsed from /proc/net/dev or /proc/[pid]/net/dev. The map keys
// are interface names.
type NetDev map[string]NetDevLine
// NetDev returns kernel/system statistics read from /proc/net/dev.
func (fs FS) NetDev() (NetDev, error) {
return newNetDev(fs.proc.Path("net/dev"))
}
// NetDev returns kernel/system statistics read from /proc/[pid]/net/dev.
func (p Proc) NetDev() (NetDev, error) {
return newNetDev(p.path("net/dev"))
}
// newNetDev creates a new NetDev from the contents of the given file.
func newNetDev(file string) (NetDev, error) {
f, err := os.Open(file)
if err != nil {
return NetDev{}, err
}
defer f.Close()
netDev := NetDev{}
s := bufio.NewScanner(f)
for n := 0; s.Scan(); n++ {
// Skip the 2 header lines.
if n < 2 {
continue
}
line, err := netDev.parseLine(s.Text())
if err != nil {
return netDev, err
}
netDev[line.Name] = *line
}
return netDev, s.Err()
}
// parseLine parses a single line from the /proc/net/dev file. Header lines
// must be filtered prior to calling this method.
func (netDev NetDev) parseLine(rawLine string) (*NetDevLine, error) {
parts := strings.SplitN(rawLine, ":", 2)
if len(parts) != 2 {
return nil, errors.New("invalid net/dev line, missing colon")
}
fields := strings.Fields(strings.TrimSpace(parts[1]))
var err error
line := &NetDevLine{}
// Interface Name
line.Name = strings.TrimSpace(parts[0])
if line.Name == "" {
return nil, errors.New("invalid net/dev line, empty interface name")
}
// RX
line.RxBytes, err = strconv.ParseUint(fields[0], 10, 64)
if err != nil {
return nil, err
}
line.RxPackets, err = strconv.ParseUint(fields[1], 10, 64)
if err != nil {
return nil, err
}
line.RxErrors, err = strconv.ParseUint(fields[2], 10, 64)
if err != nil {
return nil, err
}
line.RxDropped, err = strconv.ParseUint(fields[3], 10, 64)
if err != nil {
return nil, err
}
line.RxFIFO, err = strconv.ParseUint(fields[4], 10, 64)
if err != nil {
return nil, err
}
line.RxFrame, err = strconv.ParseUint(fields[5], 10, 64)
if err != nil {
return nil, err
}
line.RxCompressed, err = strconv.ParseUint(fields[6], 10, 64)
if err != nil {
return nil, err
}
line.RxMulticast, err = strconv.ParseUint(fields[7], 10, 64)
if err != nil {
return nil, err
}
// TX
line.TxBytes, err = strconv.ParseUint(fields[8], 10, 64)
if err != nil {
return nil, err
}
line.TxPackets, err = strconv.ParseUint(fields[9], 10, 64)
if err != nil {
return nil, err
}
line.TxErrors, err = strconv.ParseUint(fields[10], 10, 64)
if err != nil {
return nil, err
}
line.TxDropped, err = strconv.ParseUint(fields[11], 10, 64)
if err != nil {
return nil, err
}
line.TxFIFO, err = strconv.ParseUint(fields[12], 10, 64)
if err != nil {
return nil, err
}
line.TxCollisions, err = strconv.ParseUint(fields[13], 10, 64)
if err != nil {
return nil, err
}
line.TxCarrier, err = strconv.ParseUint(fields[14], 10, 64)
if err != nil {
return nil, err
}
line.TxCompressed, err = strconv.ParseUint(fields[15], 10, 64)
if err != nil {
return nil, err
}
return line, nil
}
// Total aggregates the values across interfaces and returns a new NetDevLine.
// The Name field will be a sorted comma separated list of interface names.
func (netDev NetDev) Total() NetDevLine {
total := NetDevLine{}
names := make([]string, 0, len(netDev))
for _, ifc := range netDev {
names = append(names, ifc.Name)
total.RxBytes += ifc.RxBytes
total.RxPackets += ifc.RxPackets
total.RxPackets += ifc.RxPackets
total.RxErrors += ifc.RxErrors
total.RxDropped += ifc.RxDropped
total.RxFIFO += ifc.RxFIFO
total.RxFrame += ifc.RxFrame
total.RxCompressed += ifc.RxCompressed
total.RxMulticast += ifc.RxMulticast
total.TxBytes += ifc.TxBytes
total.TxPackets += ifc.TxPackets
total.TxErrors += ifc.TxErrors
total.TxDropped += ifc.TxDropped
total.TxFIFO += ifc.TxFIFO
total.TxCollisions += ifc.TxCollisions
total.TxCarrier += ifc.TxCarrier
total.TxCompressed += ifc.TxCompressed
}
sort.Strings(names)
total.Name = strings.Join(names, ", ")
return total
}

275
vendor/github.com/prometheus/procfs/net_unix.go generated vendored Normal file
View file

@ -0,0 +1,275 @@
// Copyright 2018 The Prometheus Authors
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package procfs
import (
"bufio"
"errors"
"fmt"
"io"
"os"
"strconv"
"strings"
)
// For the proc file format details,
// see https://elixir.bootlin.com/linux/v4.17/source/net/unix/af_unix.c#L2815
// and https://elixir.bootlin.com/linux/latest/source/include/uapi/linux/net.h#L48.
const (
netUnixKernelPtrIdx = iota
netUnixRefCountIdx
_
netUnixFlagsIdx
netUnixTypeIdx
netUnixStateIdx
netUnixInodeIdx
// Inode and Path are optional.
netUnixStaticFieldsCnt = 6
)
const (
netUnixTypeStream = 1
netUnixTypeDgram = 2
netUnixTypeSeqpacket = 5
netUnixFlagListen = 1 << 16
netUnixStateUnconnected = 1
netUnixStateConnecting = 2
netUnixStateConnected = 3
netUnixStateDisconnected = 4
)
var errInvalidKernelPtrFmt = errors.New("Invalid Num(the kernel table slot number) format")
// NetUnixType is the type of the type field.
type NetUnixType uint64
// NetUnixFlags is the type of the flags field.
type NetUnixFlags uint64
// NetUnixState is the type of the state field.
type NetUnixState uint64
// NetUnixLine represents a line of /proc/net/unix.
type NetUnixLine struct {
KernelPtr string
RefCount uint64
Protocol uint64
Flags NetUnixFlags
Type NetUnixType
State NetUnixState
Inode uint64
Path string
}
// NetUnix holds the data read from /proc/net/unix.
type NetUnix struct {
Rows []*NetUnixLine
}
// NewNetUnix returns data read from /proc/net/unix.
func NewNetUnix() (*NetUnix, error) {
fs, err := NewFS(DefaultMountPoint)
if err != nil {
return nil, err
}
return fs.NewNetUnix()
}
// NewNetUnix returns data read from /proc/net/unix.
func (fs FS) NewNetUnix() (*NetUnix, error) {
return NewNetUnixByPath(fs.proc.Path("net/unix"))
}
// NewNetUnixByPath returns data read from /proc/net/unix by file path.
// It might returns an error with partial parsed data, if an error occur after some data parsed.
func NewNetUnixByPath(path string) (*NetUnix, error) {
f, err := os.Open(path)
if err != nil {
return nil, err
}
defer f.Close()
return NewNetUnixByReader(f)
}
// NewNetUnixByReader returns data read from /proc/net/unix by a reader.
// It might returns an error with partial parsed data, if an error occur after some data parsed.
func NewNetUnixByReader(reader io.Reader) (*NetUnix, error) {
nu := &NetUnix{
Rows: make([]*NetUnixLine, 0, 32),
}
scanner := bufio.NewScanner(reader)
// Omit the header line.
scanner.Scan()
header := scanner.Text()
// From the man page of proc(5), it does not contain an Inode field,
// but in actually it exists.
// This code works for both cases.
hasInode := strings.Contains(header, "Inode")
minFieldsCnt := netUnixStaticFieldsCnt
if hasInode {
minFieldsCnt++
}
for scanner.Scan() {
line := scanner.Text()
item, err := nu.parseLine(line, hasInode, minFieldsCnt)
if err != nil {
return nu, err
}
nu.Rows = append(nu.Rows, item)
}
return nu, scanner.Err()
}
func (u *NetUnix) parseLine(line string, hasInode bool, minFieldsCnt int) (*NetUnixLine, error) {
fields := strings.Fields(line)
fieldsLen := len(fields)
if fieldsLen < minFieldsCnt {
return nil, fmt.Errorf(
"Parse Unix domain failed: expect at least %d fields but got %d",
minFieldsCnt, fieldsLen)
}
kernelPtr, err := u.parseKernelPtr(fields[netUnixKernelPtrIdx])
if err != nil {
return nil, fmt.Errorf("Parse Unix domain num(%s) failed: %s", fields[netUnixKernelPtrIdx], err)
}
users, err := u.parseUsers(fields[netUnixRefCountIdx])
if err != nil {
return nil, fmt.Errorf("Parse Unix domain ref count(%s) failed: %s", fields[netUnixRefCountIdx], err)
}
flags, err := u.parseFlags(fields[netUnixFlagsIdx])
if err != nil {
return nil, fmt.Errorf("Parse Unix domain flags(%s) failed: %s", fields[netUnixFlagsIdx], err)
}
typ, err := u.parseType(fields[netUnixTypeIdx])
if err != nil {
return nil, fmt.Errorf("Parse Unix domain type(%s) failed: %s", fields[netUnixTypeIdx], err)
}
state, err := u.parseState(fields[netUnixStateIdx])
if err != nil {
return nil, fmt.Errorf("Parse Unix domain state(%s) failed: %s", fields[netUnixStateIdx], err)
}
var inode uint64
if hasInode {
inodeStr := fields[netUnixInodeIdx]
inode, err = u.parseInode(inodeStr)
if err != nil {
return nil, fmt.Errorf("Parse Unix domain inode(%s) failed: %s", inodeStr, err)
}
}
nuLine := &NetUnixLine{
KernelPtr: kernelPtr,
RefCount: users,
Type: typ,
Flags: flags,
State: state,
Inode: inode,
}
// Path field is optional.
if fieldsLen > minFieldsCnt {
pathIdx := netUnixInodeIdx + 1
if !hasInode {
pathIdx--
}
nuLine.Path = fields[pathIdx]
}
return nuLine, nil
}
func (u NetUnix) parseKernelPtr(str string) (string, error) {
if !strings.HasSuffix(str, ":") {
return "", errInvalidKernelPtrFmt
}
return str[:len(str)-1], nil
}
func (u NetUnix) parseUsers(hexStr string) (uint64, error) {
return strconv.ParseUint(hexStr, 16, 32)
}
func (u NetUnix) parseProtocol(hexStr string) (uint64, error) {
return strconv.ParseUint(hexStr, 16, 32)
}
func (u NetUnix) parseType(hexStr string) (NetUnixType, error) {
typ, err := strconv.ParseUint(hexStr, 16, 16)
if err != nil {
return 0, err
}
return NetUnixType(typ), nil
}
func (u NetUnix) parseFlags(hexStr string) (NetUnixFlags, error) {
flags, err := strconv.ParseUint(hexStr, 16, 32)
if err != nil {
return 0, err
}
return NetUnixFlags(flags), nil
}
func (u NetUnix) parseState(hexStr string) (NetUnixState, error) {
st, err := strconv.ParseInt(hexStr, 16, 8)
if err != nil {
return 0, err
}
return NetUnixState(st), nil
}
func (u NetUnix) parseInode(inodeStr string) (uint64, error) {
return strconv.ParseUint(inodeStr, 10, 64)
}
func (t NetUnixType) String() string {
switch t {
case netUnixTypeStream:
return "stream"
case netUnixTypeDgram:
return "dgram"
case netUnixTypeSeqpacket:
return "seqpacket"
}
return "unknown"
}
func (f NetUnixFlags) String() string {
switch f {
case netUnixFlagListen:
return "listen"
default:
return "default"
}
}
func (s NetUnixState) String() string {
switch s {
case netUnixStateUnconnected:
return "unconnected"
case netUnixStateConnecting:
return "connecting"
case netUnixStateConnected:
return "connected"
case netUnixStateDisconnected:
return "disconnected"
}
return "unknown"
}

View file

@ -1,11 +1,27 @@
// Copyright 2018 The Prometheus Authors
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package procfs package procfs
import ( import (
"bytes"
"fmt" "fmt"
"io/ioutil" "io/ioutil"
"os" "os"
"strconv" "strconv"
"strings" "strings"
"github.com/prometheus/procfs/internal/fs"
) )
// Proc provides information about a running process. // Proc provides information about a running process.
@ -13,7 +29,7 @@ type Proc struct {
// The process ID. // The process ID.
PID int PID int
fs FS fs fs.FS
} }
// Procs represents a list of Proc structs. // Procs represents a list of Proc structs.
@ -38,7 +54,7 @@ func NewProc(pid int) (Proc, error) {
if err != nil { if err != nil {
return Proc{}, err return Proc{}, err
} }
return fs.NewProc(pid) return fs.Proc(pid)
} }
// AllProcs returns a list of all currently available processes under /proc. // AllProcs returns a list of all currently available processes under /proc.
@ -52,28 +68,35 @@ func AllProcs() (Procs, error) {
// Self returns a process for the current process. // Self returns a process for the current process.
func (fs FS) Self() (Proc, error) { func (fs FS) Self() (Proc, error) {
p, err := os.Readlink(fs.Path("self")) p, err := os.Readlink(fs.proc.Path("self"))
if err != nil { if err != nil {
return Proc{}, err return Proc{}, err
} }
pid, err := strconv.Atoi(strings.Replace(p, string(fs), "", -1)) pid, err := strconv.Atoi(strings.Replace(p, string(fs.proc), "", -1))
if err != nil { if err != nil {
return Proc{}, err return Proc{}, err
} }
return fs.NewProc(pid) return fs.Proc(pid)
} }
// NewProc returns a process for the given pid. // NewProc returns a process for the given pid.
//
// Deprecated: use fs.Proc() instead
func (fs FS) NewProc(pid int) (Proc, error) { func (fs FS) NewProc(pid int) (Proc, error) {
if _, err := os.Stat(fs.Path(strconv.Itoa(pid))); err != nil { return fs.Proc(pid)
}
// Proc returns a process for the given pid.
func (fs FS) Proc(pid int) (Proc, error) {
if _, err := os.Stat(fs.proc.Path(strconv.Itoa(pid))); err != nil {
return Proc{}, err return Proc{}, err
} }
return Proc{PID: pid, fs: fs}, nil return Proc{PID: pid, fs: fs.proc}, nil
} }
// AllProcs returns a list of all currently available processes. // AllProcs returns a list of all currently available processes.
func (fs FS) AllProcs() (Procs, error) { func (fs FS) AllProcs() (Procs, error) {
d, err := os.Open(fs.Path()) d, err := os.Open(fs.proc.Path())
if err != nil { if err != nil {
return Procs{}, err return Procs{}, err
} }
@ -90,7 +113,7 @@ func (fs FS) AllProcs() (Procs, error) {
if err != nil { if err != nil {
continue continue
} }
p = append(p, Proc{PID: int(pid), fs: fs}) p = append(p, Proc{PID: int(pid), fs: fs.proc})
} }
return p, nil return p, nil
@ -113,7 +136,7 @@ func (p Proc) CmdLine() ([]string, error) {
return []string{}, nil return []string{}, nil
} }
return strings.Split(string(data[:len(data)-1]), string(byte(0))), nil return strings.Split(string(bytes.TrimRight(data, string("\x00"))), string(byte(0))), nil
} }
// Comm returns the command name of a process. // Comm returns the command name of a process.
@ -142,6 +165,26 @@ func (p Proc) Executable() (string, error) {
return exe, err return exe, err
} }
// Cwd returns the absolute path to the current working directory of the process.
func (p Proc) Cwd() (string, error) {
wd, err := os.Readlink(p.path("cwd"))
if os.IsNotExist(err) {
return "", nil
}
return wd, err
}
// RootDir returns the absolute path to the process's root directory (as set by chroot)
func (p Proc) RootDir() (string, error) {
rdir, err := os.Readlink(p.path("root"))
if os.IsNotExist(err) {
return "", nil
}
return rdir, err
}
// FileDescriptors returns the currently open file descriptors of a process. // FileDescriptors returns the currently open file descriptors of a process.
func (p Proc) FileDescriptors() ([]uintptr, error) { func (p Proc) FileDescriptors() ([]uintptr, error) {
names, err := p.fileDescriptors() names, err := p.fileDescriptors()
@ -204,6 +247,20 @@ func (p Proc) MountStats() ([]*Mount, error) {
return parseMountStats(f) return parseMountStats(f)
} }
// MountInfo retrieves mount information for mount points in a
// process's namespace.
// It supplies information missing in `/proc/self/mounts` and
// fixes various other problems with that file too.
func (p Proc) MountInfo() ([]*MountInfo, error) {
f, err := os.Open(p.path("mountinfo"))
if err != nil {
return nil, err
}
defer f.Close()
return parseMountInfo(f)
}
func (p Proc) fileDescriptors() ([]string, error) { func (p Proc) fileDescriptors() ([]string, error) {
d, err := os.Open(p.path("fd")) d, err := os.Open(p.path("fd"))
if err != nil { if err != nil {

Some files were not shown because too many files have changed in this diff Show more