From 00d7c5972f6e14227fbef411f2d042b2112c0a97 Mon Sep 17 00:00:00 2001 From: Aditya C S Date: Wed, 8 Nov 2017 19:44:03 +0530 Subject: [PATCH] Add InfluxDB support for traefik metrics --- README.md | 2 +- cmd/traefik/configuration.go | 4 + docs/configuration/backends/web.md | 25 + docs/index.md | 2 +- glide.lock | 14 +- glide.yaml | 9 +- metrics/influxdb.go | 90 + metrics/influxdb_test.go | 53 + metrics/statsd.go | 12 +- metrics/statsd_test.go | 2 +- server/server.go | 5 + types/types.go | 9 +- .../VividCortex/gohistogram/LICENSE | 19 + .../VividCortex/gohistogram/histogram.go | 23 + .../gohistogram/numerichistogram.go | 160 ++ .../gohistogram/weightedhistogram.go | 190 ++ .../go-kit/kit/metrics/generic/generic.go | 218 ++ .../go-kit/kit/metrics/influx/influx.go | 255 ++ vendor/github.com/influxdata/influxdb/LICENSE | 20 + .../influxdb/LICENSE_OF_DEPENDENCIES.md | 25 + .../influxdata/influxdb/client/v2/client.go | 609 +++++ .../influxdata/influxdb/client/v2/udp.go | 112 + .../github.com/influxdata/influxdb/errors.go | 42 + .../influxdata/influxdb/influxdb.go | 6 + .../influxdata/influxdb/models/consistency.go | 48 + .../influxdata/influxdb/models/inline_fnv.go | 32 + .../influxdb/models/inline_strconv_parse.go | 38 + .../influxdata/influxdb/models/points.go | 2231 +++++++++++++++++ .../influxdata/influxdb/models/rows.go | 62 + .../influxdata/influxdb/models/statistic.go | 42 + .../influxdata/influxdb/models/time.go | 74 + vendor/github.com/influxdata/influxdb/node.go | 121 + .../influxdata/influxdb/pkg/escape/bytes.go | 111 + .../influxdata/influxdb/pkg/escape/strings.go | 21 + vendor/github.com/stvp/go-udp-testing/udp.go | 35 +- 35 files changed, 4693 insertions(+), 28 deletions(-) create mode 100644 metrics/influxdb.go create mode 100644 metrics/influxdb_test.go create mode 100644 vendor/github.com/VividCortex/gohistogram/LICENSE create mode 100644 vendor/github.com/VividCortex/gohistogram/histogram.go create mode 100644 vendor/github.com/VividCortex/gohistogram/numerichistogram.go create mode 100644 vendor/github.com/VividCortex/gohistogram/weightedhistogram.go create mode 100644 vendor/github.com/go-kit/kit/metrics/generic/generic.go create mode 100644 vendor/github.com/go-kit/kit/metrics/influx/influx.go create mode 100644 vendor/github.com/influxdata/influxdb/LICENSE create mode 100644 vendor/github.com/influxdata/influxdb/LICENSE_OF_DEPENDENCIES.md create mode 100644 vendor/github.com/influxdata/influxdb/client/v2/client.go create mode 100644 vendor/github.com/influxdata/influxdb/client/v2/udp.go create mode 100644 vendor/github.com/influxdata/influxdb/errors.go create mode 100644 vendor/github.com/influxdata/influxdb/influxdb.go create mode 100644 vendor/github.com/influxdata/influxdb/models/consistency.go create mode 100644 vendor/github.com/influxdata/influxdb/models/inline_fnv.go create mode 100644 vendor/github.com/influxdata/influxdb/models/inline_strconv_parse.go create mode 100644 vendor/github.com/influxdata/influxdb/models/points.go create mode 100644 vendor/github.com/influxdata/influxdb/models/rows.go create mode 100644 vendor/github.com/influxdata/influxdb/models/statistic.go create mode 100644 vendor/github.com/influxdata/influxdb/models/time.go create mode 100644 vendor/github.com/influxdata/influxdb/node.go create mode 100644 vendor/github.com/influxdata/influxdb/pkg/escape/bytes.go create mode 100644 vendor/github.com/influxdata/influxdb/pkg/escape/strings.go diff --git a/README.md b/README.md index 11ab9c1b0..b8c09ca96 100644 --- a/README.md +++ b/README.md @@ -66,7 +66,7 @@ Run it and forget it! - Hot-reloading of configuration. No need to restart the process - Circuit breakers, retry - Round Robin, rebalancer load-balancers -- Metrics (Rest, Prometheus, Datadog, Statd) +- Metrics (Rest, Prometheus, Datadog, Statsd, InfluxDB) - Clean AngularJS Web UI - Websocket, HTTP/2, GRPC ready - Access Logs (JSON, CLF) diff --git a/cmd/traefik/configuration.go b/cmd/traefik/configuration.go index 6f2404a62..0ec5ae26f 100644 --- a/cmd/traefik/configuration.go +++ b/cmd/traefik/configuration.go @@ -63,6 +63,10 @@ func NewTraefikDefaultPointersConfiguration() *TraefikConfiguration { Address: "localhost:8125", PushInterval: "10s", }, + InfluxDB: &types.InfluxDB{ + Address: "localhost:8089", + PushInterval: "10s", + }, } // default Marathon diff --git a/docs/configuration/backends/web.md b/docs/configuration/backends/web.md index 0d88b96f8..5e0573d32 100644 --- a/docs/configuration/backends/web.md +++ b/docs/configuration/backends/web.md @@ -158,6 +158,31 @@ pushinterval = "10s" # ... ``` +### InfluxDB + +```toml +[web] +# ... + +# InfluxDB metrics exporter type +[web.metrics.influxdb] + +# InfluxDB's address. +# +# Required +# Default: "localhost:8089" +# +address = "localhost:8089" + +# InfluxDB push interval +# +# Optional +# Default: "10s" +# +pushinterval = "10s" + +# ... +``` ## Statistics diff --git a/docs/index.md b/docs/index.md index 4900aa90e..0ee4f79d9 100644 --- a/docs/index.md +++ b/docs/index.md @@ -44,7 +44,7 @@ Run it and forget it! - Hot-reloading of configuration. No need to restart the process - Circuit breakers, retry - Round Robin, rebalancer load-balancers -- Metrics (Rest, Prometheus, Datadog, Statd) +- Metrics (Rest, Prometheus, Datadog, Statsd, InfluxDB) - Clean AngularJS Web UI - Websocket, HTTP/2, GRPC ready - Access Logs (JSON, CLF) diff --git a/glide.lock b/glide.lock index 021c4e213..37d5b4c9d 100644 --- a/glide.lock +++ b/glide.lock @@ -1,4 +1,4 @@ -hash: 1d18b9c76989feed304a8781b18ea24b43e858091b775316af97bfc210a06ea0 +hash: 7fd36649e80749e16bbfa69777e0f90a017fbc2f67d7efd46373716a16b1a60a updated: 2017-11-02T11:39:20.438135-04:00 imports: - name: cloud.google.com/go @@ -250,6 +250,8 @@ imports: - log - metrics - metrics/dogstatsd + - metrics/generic + - metrics/influx - metrics/internal/lv - metrics/internal/ratemap - metrics/multi @@ -311,6 +313,12 @@ imports: version: 3959339b333561bf62a38b424fd41517c2c90f40 - name: github.com/imdario/mergo version: 3e95a51e0639b4cf372f2ccf74c86749d747fbdc +- name: github.com/influxdata/influxdb + version: 2d474a3089bcfce6b472779be9470a1f0ef3d5e4 + subpackages: + - client/v2 + - models + - pkg/escape - name: github.com/JamesClonk/vultr version: 2fd0705ce648e602e6c9c57329a174270a4f6688 subpackages: @@ -483,6 +491,8 @@ imports: version: 824e85271811af89640ea25620c67f6c2eed987e - name: github.com/urfave/negroni version: 490e6a555d47ca891a89a150d0c1ef3922dfffe9 +- name: github.com/VividCortex/gohistogram + version: 51564d9861991fb0ad0f531c99ef602d0f9866e6 - name: github.com/vulcand/oxy version: 7e9763c4dc71b9758379da3581e6495c145caaab repo: https://github.com/containous/oxy.git @@ -805,7 +815,7 @@ testImports: - libcontainer/system - libcontainer/user - name: github.com/stvp/go-udp-testing - version: 06eb4f886d9f8242b0c176cf0d3ce5ec2cedda05 + version: c4434f09ec131ecf30f986d5dcb1636508bfa49a - name: github.com/vdemeester/shakers version: 24d7f1d6a71aa5d9cbe7390e4afb66b7eef9e1b3 - name: github.com/xeipuuv/gojsonpointer diff --git a/glide.yaml b/glide.yaml index 700019e91..403d0a1b2 100644 --- a/glide.yaml +++ b/glide.yaml @@ -85,6 +85,10 @@ import: version: ^1.1.0 - package: k8s.io/client-go version: v2.0.0 +- package: github.com/influxdata/influxdb + version: v1.3.7 + subpackages: + - client/v2 - package: github.com/gambol99/go-marathon version: dd6cbd4c2d71294a19fb89158f2a00d427f174ab - package: github.com/ArthurHlt/go-eureka-client @@ -103,10 +107,13 @@ import: - log - metrics - metrics/dogstatsd + - metrics/internal/lv + - metrics/internal/ratemap - metrics/multi - metrics/prometheus - metrics/statsd - util/conn + - metrics/influx - package: github.com/prometheus/client_golang version: 08fd2e12372a66e68e30523c7642e0cbc3e4fbde subpackages: @@ -221,4 +228,4 @@ testImport: - package: github.com/mattn/go-shellwords - package: github.com/vdemeester/shakers - package: github.com/docker/cli - version: d95fd2f38cfc23e077530c6181330727d561b6a0 + version: d95fd2f38cfc23e077530c6181330727d561b6a0 \ No newline at end of file diff --git a/metrics/influxdb.go b/metrics/influxdb.go new file mode 100644 index 000000000..463ea2fa4 --- /dev/null +++ b/metrics/influxdb.go @@ -0,0 +1,90 @@ +package metrics + +import ( + "bytes" + "time" + + "github.com/containous/traefik/log" + "github.com/containous/traefik/safe" + "github.com/containous/traefik/types" + kitlog "github.com/go-kit/kit/log" + "github.com/go-kit/kit/metrics/influx" + influxdb "github.com/influxdata/influxdb/client/v2" +) + +var influxDBClient = influx.New(map[string]string{}, influxdb.BatchPointsConfig{}, kitlog.LoggerFunc(func(keyvals ...interface{}) error { + log.Info(keyvals) + return nil +})) + +type influxDBWriter struct { + buf bytes.Buffer + config *types.InfluxDB +} + +var influxDBTicker *time.Ticker + +const ( + influxDBMetricsReqsName = "traefik.requests.total" + influxDBMetricsLatencyName = "traefik.request.duration" + influxDBRetriesTotalName = "traefik.backend.retries.total" +) + +// RegisterInfluxDB registers the metrics pusher if this didn't happen yet and creates a InfluxDB Registry instance. +func RegisterInfluxDB(config *types.InfluxDB) Registry { + if influxDBTicker == nil { + influxDBTicker = initInfluxDBTicker(config) + } + + return &standardRegistry{ + enabled: true, + reqsCounter: influxDBClient.NewCounter(influxDBMetricsReqsName), + reqDurationHistogram: influxDBClient.NewHistogram(influxDBMetricsLatencyName), + retriesCounter: influxDBClient.NewCounter(influxDBRetriesTotalName), + } +} + +// initInfluxDBTicker initializes metrics pusher and creates a influxDBClient if not created already +func initInfluxDBTicker(config *types.InfluxDB) *time.Ticker { + address := config.Address + if len(address) == 0 { + address = "localhost:8089" + } + + pushInterval, err := time.ParseDuration(config.PushInterval) + if err != nil { + log.Warnf("Unable to parse %s into pushInterval, using 10s as default value", config.PushInterval) + pushInterval = 10 * time.Second + } + + report := time.NewTicker(pushInterval) + + safe.Go(func() { + var buf bytes.Buffer + influxDBClient.WriteLoop(report.C, &influxDBWriter{buf: buf, config: config}) + }) + + return report +} + +// StopInfluxDB stops internal influxDBTicker which controls the pushing of metrics to InfluxDB Agent and resets it to `nil` +func StopInfluxDB() { + if influxDBTicker != nil { + influxDBTicker.Stop() + } + influxDBTicker = nil +} + +func (w *influxDBWriter) Write(bp influxdb.BatchPoints) error { + c, err := influxdb.NewUDPClient(influxdb.UDPConfig{ + Addr: w.config.Address, + }) + + if err != nil { + return err + } + + defer c.Close() + + return c.Write(bp) +} diff --git a/metrics/influxdb_test.go b/metrics/influxdb_test.go new file mode 100644 index 000000000..0470ccbcb --- /dev/null +++ b/metrics/influxdb_test.go @@ -0,0 +1,53 @@ +package metrics + +import ( + "net/http" + "regexp" + "strconv" + "testing" + "time" + + "github.com/containous/traefik/types" + "github.com/stvp/go-udp-testing" +) + +func TestInfluxDB(t *testing.T) { + udp.SetAddr(":8089") + // This is needed to make sure that UDP Listener listens for data a bit longer, otherwise it will quit after a millisecond + udp.Timeout = 5 * time.Second + + influxDBRegistry := RegisterInfluxDB(&types.InfluxDB{Address: ":8089", PushInterval: "1s"}) + defer StopInfluxDB() + + if !influxDBRegistry.IsEnabled() { + t.Fatalf("InfluxDB registry must be enabled") + } + + expected := []string{ + "(traefik.requests.total,code=200,method=GET,service=test count=1) [0-9]{19}", + "(traefik.requests.total,code=404,method=GET,service=test count=1) [0-9]{19}", + "(traefik.request.duration,code=200,method=GET,service=test p50=10000,p90=10000,p95=10000,p99=10000) [0-9]{19}", + "(traefik.backend.retries.total,code=404,method=GET,service=test count=2) [0-9]{19}", + } + + msg := udp.ReceiveString(t, func() { + influxDBRegistry.ReqsCounter().With("service", "test", "code", strconv.Itoa(http.StatusOK), "method", http.MethodGet).Add(1) + influxDBRegistry.ReqsCounter().With("service", "test", "code", strconv.Itoa(http.StatusNotFound), "method", http.MethodGet).Add(1) + influxDBRegistry.RetriesCounter().With("service", "test").Add(1) + influxDBRegistry.RetriesCounter().With("service", "test").Add(1) + influxDBRegistry.ReqDurationHistogram().With("service", "test", "code", strconv.Itoa(http.StatusOK)).Observe(10000) + }) + + extractAndMatchMessage(t, expected, msg) +} + +func extractAndMatchMessage(t *testing.T, patterns []string, msg string) { + t.Helper() + for _, pattern := range patterns { + re := regexp.MustCompile(pattern) + match := re.FindStringSubmatch(msg) + if len(match) != 2 { + t.Errorf("Got %q %v, want %q", msg, match, pattern) + } + } +} diff --git a/metrics/statsd.go b/metrics/statsd.go index 29a56b6cd..9dd58e6f6 100644 --- a/metrics/statsd.go +++ b/metrics/statsd.go @@ -17,6 +17,12 @@ var statsdClient = statsd.New("traefik.", kitlog.LoggerFunc(func(keyvals ...inte var statsdTicker *time.Ticker +const ( + statsdMetricsReqsName = "requests.total" + statsdMetricsLatencyName = "request.duration" + statsdRetriesTotalName = "backend.retries.total" +) + // RegisterStatsd registers the metrics pusher if this didn't happen yet and creates a statsd Registry instance. func RegisterStatsd(config *types.Statsd) Registry { if statsdTicker == nil { @@ -25,9 +31,9 @@ func RegisterStatsd(config *types.Statsd) Registry { return &standardRegistry{ enabled: true, - reqsCounter: statsdClient.NewCounter(ddMetricsReqsName, 1.0), - reqDurationHistogram: statsdClient.NewTiming(ddMetricsLatencyName, 1.0), - retriesCounter: statsdClient.NewCounter(ddRetriesTotalName, 1.0), + reqsCounter: statsdClient.NewCounter(statsdMetricsReqsName, 1.0), + reqDurationHistogram: statsdClient.NewTiming(statsdMetricsLatencyName, 1.0), + retriesCounter: statsdClient.NewCounter(statsdRetriesTotalName, 1.0), } } diff --git a/metrics/statsd_test.go b/metrics/statsd_test.go index 7e8c683fc..8f23a4432 100644 --- a/metrics/statsd_test.go +++ b/metrics/statsd_test.go @@ -18,7 +18,7 @@ func TestStatsD(t *testing.T) { defer StopStatsd() if !statsdRegistry.IsEnabled() { - t.Errorf("PrometheusRegistry should return true for IsEnabled()") + t.Errorf("Statsd registry should return true for IsEnabled()") } expected := []string{ diff --git a/server/server.go b/server/server.go index e19609bbf..beda9d6e6 100644 --- a/server/server.go +++ b/server/server.go @@ -1244,6 +1244,10 @@ func (server *Server) registerMetricClients(metricsConfig *types.Metrics) { registries = append(registries, metrics.RegisterStatsd(metricsConfig.StatsD)) log.Debugf("Configured StatsD metrics pushing to %s once every %s", metricsConfig.StatsD.Address, metricsConfig.StatsD.PushInterval) } + if metricsConfig.InfluxDB != nil { + registries = append(registries, metrics.RegisterInfluxDB(metricsConfig.InfluxDB)) + log.Debugf("Configured InfluxDB metrics pushing to %s once every %s", metricsConfig.InfluxDB.Address, metricsConfig.InfluxDB.PushInterval) + } if len(registries) > 0 { server.metricsRegistry = metrics.NewMultiRegistry(registries) @@ -1253,6 +1257,7 @@ func (server *Server) registerMetricClients(metricsConfig *types.Metrics) { func stopMetricsClients() { metrics.StopDatadog() metrics.StopStatsd() + metrics.StopInfluxDB() } func (server *Server) buildRateLimiter(handler http.Handler, rlConfig *types.RateLimit) (http.Handler, error) { diff --git a/types/types.go b/types/types.go index 283eb1d19..eb731d1e2 100644 --- a/types/types.go +++ b/types/types.go @@ -368,6 +368,7 @@ type Metrics struct { Prometheus *Prometheus `description:"Prometheus metrics exporter type" export:"true"` Datadog *Datadog `description:"DataDog metrics exporter type" export:"true"` StatsD *Statsd `description:"StatsD metrics exporter type" export:"true"` + InfluxDB *InfluxDB `description:"InfluxDB metrics exporter type"` } // Prometheus can contain specific configuration used by the Prometheus Metrics exporter @@ -384,7 +385,13 @@ type Datadog struct { // Statsd contains address and metrics pushing interval configuration type Statsd struct { Address string `description:"StatsD address"` - PushInterval string `description:"DataDog push interval" export:"true"` + PushInterval string `description:"StatsD push interval" export:"true"` +} + +// InfluxDB contains address and metrics pushing interval configuration +type InfluxDB struct { + Address string `description:"InfluxDB address"` + PushInterval string `description:"InfluxDB push interval"` } // Buckets holds Prometheus Buckets diff --git a/vendor/github.com/VividCortex/gohistogram/LICENSE b/vendor/github.com/VividCortex/gohistogram/LICENSE new file mode 100644 index 000000000..d23fea365 --- /dev/null +++ b/vendor/github.com/VividCortex/gohistogram/LICENSE @@ -0,0 +1,19 @@ +Copyright (c) 2013 VividCortex + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +THE SOFTWARE. diff --git a/vendor/github.com/VividCortex/gohistogram/histogram.go b/vendor/github.com/VividCortex/gohistogram/histogram.go new file mode 100644 index 000000000..ede21fd31 --- /dev/null +++ b/vendor/github.com/VividCortex/gohistogram/histogram.go @@ -0,0 +1,23 @@ +package gohistogram + +// Copyright (c) 2013 VividCortex, Inc. All rights reserved. +// Please see the LICENSE file for applicable license terms. + +// Histogram is the interface that wraps the Add and Quantile methods. +type Histogram interface { + // Add adds a new value, n, to the histogram. Trimming is done + // automatically. + Add(n float64) + + // Quantile returns an approximation. + Quantile(n float64) (q float64) + + // String returns a string reprentation of the histogram, + // which is useful for printing to a terminal. + String() (str string) +} + +type bin struct { + value float64 + count float64 +} diff --git a/vendor/github.com/VividCortex/gohistogram/numerichistogram.go b/vendor/github.com/VividCortex/gohistogram/numerichistogram.go new file mode 100644 index 000000000..20dea740d --- /dev/null +++ b/vendor/github.com/VividCortex/gohistogram/numerichistogram.go @@ -0,0 +1,160 @@ +package gohistogram + +// Copyright (c) 2013 VividCortex, Inc. All rights reserved. +// Please see the LICENSE file for applicable license terms. + +import ( + "fmt" +) + +type NumericHistogram struct { + bins []bin + maxbins int + total uint64 +} + +// NewHistogram returns a new NumericHistogram with a maximum of n bins. +// +// There is no "optimal" bin count, but somewhere between 20 and 80 bins +// should be sufficient. +func NewHistogram(n int) *NumericHistogram { + return &NumericHistogram{ + bins: make([]bin, 0), + maxbins: n, + total: 0, + } +} + +func (h *NumericHistogram) Add(n float64) { + defer h.trim() + h.total++ + for i := range h.bins { + if h.bins[i].value == n { + h.bins[i].count++ + return + } + + if h.bins[i].value > n { + + newbin := bin{value: n, count: 1} + head := append(make([]bin, 0), h.bins[0:i]...) + + head = append(head, newbin) + tail := h.bins[i:] + h.bins = append(head, tail...) + return + } + } + + h.bins = append(h.bins, bin{count: 1, value: n}) +} + +func (h *NumericHistogram) Quantile(q float64) float64 { + count := q * float64(h.total) + for i := range h.bins { + count -= float64(h.bins[i].count) + + if count <= 0 { + return h.bins[i].value + } + } + + return -1 +} + +// CDF returns the value of the cumulative distribution function +// at x +func (h *NumericHistogram) CDF(x float64) float64 { + count := 0.0 + for i := range h.bins { + if h.bins[i].value <= x { + count += float64(h.bins[i].count) + } + } + + return count / float64(h.total) +} + +// Mean returns the sample mean of the distribution +func (h *NumericHistogram) Mean() float64 { + if h.total == 0 { + return 0 + } + + sum := 0.0 + + for i := range h.bins { + sum += h.bins[i].value * h.bins[i].count + } + + return sum / float64(h.total) +} + +// Variance returns the variance of the distribution +func (h *NumericHistogram) Variance() float64 { + if h.total == 0 { + return 0 + } + + sum := 0.0 + mean := h.Mean() + + for i := range h.bins { + sum += (h.bins[i].count * (h.bins[i].value - mean) * (h.bins[i].value - mean)) + } + + return sum / float64(h.total) +} + +func (h *NumericHistogram) Count() float64 { + return float64(h.total) +} + +// trim merges adjacent bins to decrease the bin count to the maximum value +func (h *NumericHistogram) trim() { + for len(h.bins) > h.maxbins { + // Find closest bins in terms of value + minDelta := 1e99 + minDeltaIndex := 0 + for i := range h.bins { + if i == 0 { + continue + } + + if delta := h.bins[i].value - h.bins[i-1].value; delta < minDelta { + minDelta = delta + minDeltaIndex = i + } + } + + // We need to merge bins minDeltaIndex-1 and minDeltaIndex + totalCount := h.bins[minDeltaIndex-1].count + h.bins[minDeltaIndex].count + mergedbin := bin{ + value: (h.bins[minDeltaIndex-1].value* + h.bins[minDeltaIndex-1].count + + h.bins[minDeltaIndex].value* + h.bins[minDeltaIndex].count) / + totalCount, // weighted average + count: totalCount, // summed heights + } + head := append(make([]bin, 0), h.bins[0:minDeltaIndex-1]...) + tail := append([]bin{mergedbin}, h.bins[minDeltaIndex+1:]...) + h.bins = append(head, tail...) + } +} + +// String returns a string reprentation of the histogram, +// which is useful for printing to a terminal. +func (h *NumericHistogram) String() (str string) { + str += fmt.Sprintln("Total:", h.total) + + for i := range h.bins { + var bar string + for j := 0; j < int(float64(h.bins[i].count)/float64(h.total)*200); j++ { + bar += "." + } + str += fmt.Sprintln(h.bins[i].value, "\t", bar) + } + + return +} diff --git a/vendor/github.com/VividCortex/gohistogram/weightedhistogram.go b/vendor/github.com/VividCortex/gohistogram/weightedhistogram.go new file mode 100644 index 000000000..16eed3719 --- /dev/null +++ b/vendor/github.com/VividCortex/gohistogram/weightedhistogram.go @@ -0,0 +1,190 @@ +// Package gohistogram contains implementations of weighted and exponential histograms. +package gohistogram + +// Copyright (c) 2013 VividCortex, Inc. All rights reserved. +// Please see the LICENSE file for applicable license terms. + +import "fmt" + +// A WeightedHistogram implements Histogram. A WeightedHistogram has bins that have values +// which are exponentially weighted moving averages. This allows you keep inserting large +// amounts of data into the histogram and approximate quantiles with recency factored in. +type WeightedHistogram struct { + bins []bin + maxbins int + total float64 + alpha float64 +} + +// NewWeightedHistogram returns a new WeightedHistogram with a maximum of n bins with a decay factor +// of alpha. +// +// There is no "optimal" bin count, but somewhere between 20 and 80 bins should be +// sufficient. +// +// Alpha should be set to 2 / (N+1), where N represents the average age of the moving window. +// For example, a 60-second window with an average age of 30 seconds would yield an +// alpha of 0.064516129. +func NewWeightedHistogram(n int, alpha float64) *WeightedHistogram { + return &WeightedHistogram{ + bins: make([]bin, 0), + maxbins: n, + total: 0, + alpha: alpha, + } +} + +func ewma(existingVal float64, newVal float64, alpha float64) (result float64) { + result = newVal*(1-alpha) + existingVal*alpha + return +} + +func (h *WeightedHistogram) scaleDown(except int) { + for i := range h.bins { + if i != except { + h.bins[i].count = ewma(h.bins[i].count, 0, h.alpha) + } + } +} + +func (h *WeightedHistogram) Add(n float64) { + defer h.trim() + for i := range h.bins { + if h.bins[i].value == n { + h.bins[i].count++ + + defer h.scaleDown(i) + return + } + + if h.bins[i].value > n { + + newbin := bin{value: n, count: 1} + head := append(make([]bin, 0), h.bins[0:i]...) + + head = append(head, newbin) + tail := h.bins[i:] + h.bins = append(head, tail...) + + defer h.scaleDown(i) + return + } + } + + h.bins = append(h.bins, bin{count: 1, value: n}) +} + +func (h *WeightedHistogram) Quantile(q float64) float64 { + count := q * h.total + for i := range h.bins { + count -= float64(h.bins[i].count) + + if count <= 0 { + return h.bins[i].value + } + } + + return -1 +} + +// CDF returns the value of the cumulative distribution function +// at x +func (h *WeightedHistogram) CDF(x float64) float64 { + count := 0.0 + for i := range h.bins { + if h.bins[i].value <= x { + count += float64(h.bins[i].count) + } + } + + return count / h.total +} + +// Mean returns the sample mean of the distribution +func (h *WeightedHistogram) Mean() float64 { + if h.total == 0 { + return 0 + } + + sum := 0.0 + + for i := range h.bins { + sum += h.bins[i].value * h.bins[i].count + } + + return sum / h.total +} + +// Variance returns the variance of the distribution +func (h *WeightedHistogram) Variance() float64 { + if h.total == 0 { + return 0 + } + + sum := 0.0 + mean := h.Mean() + + for i := range h.bins { + sum += (h.bins[i].count * (h.bins[i].value - mean) * (h.bins[i].value - mean)) + } + + return sum / h.total +} + +func (h *WeightedHistogram) Count() float64 { + return h.total +} + +func (h *WeightedHistogram) trim() { + total := 0.0 + for i := range h.bins { + total += h.bins[i].count + } + h.total = total + for len(h.bins) > h.maxbins { + + // Find closest bins in terms of value + minDelta := 1e99 + minDeltaIndex := 0 + for i := range h.bins { + if i == 0 { + continue + } + + if delta := h.bins[i].value - h.bins[i-1].value; delta < minDelta { + minDelta = delta + minDeltaIndex = i + } + } + + // We need to merge bins minDeltaIndex-1 and minDeltaIndex + totalCount := h.bins[minDeltaIndex-1].count + h.bins[minDeltaIndex].count + mergedbin := bin{ + value: (h.bins[minDeltaIndex-1].value* + h.bins[minDeltaIndex-1].count + + h.bins[minDeltaIndex].value* + h.bins[minDeltaIndex].count) / + totalCount, // weighted average + count: totalCount, // summed heights + } + head := append(make([]bin, 0), h.bins[0:minDeltaIndex-1]...) + tail := append([]bin{mergedbin}, h.bins[minDeltaIndex+1:]...) + h.bins = append(head, tail...) + } +} + +// String returns a string reprentation of the histogram, +// which is useful for printing to a terminal. +func (h *WeightedHistogram) String() (str string) { + str += fmt.Sprintln("Total:", h.total) + + for i := range h.bins { + var bar string + for j := 0; j < int(float64(h.bins[i].count)/float64(h.total)*200); j++ { + bar += "." + } + str += fmt.Sprintln(h.bins[i].value, "\t", bar) + } + + return +} diff --git a/vendor/github.com/go-kit/kit/metrics/generic/generic.go b/vendor/github.com/go-kit/kit/metrics/generic/generic.go new file mode 100644 index 000000000..9fc6e0178 --- /dev/null +++ b/vendor/github.com/go-kit/kit/metrics/generic/generic.go @@ -0,0 +1,218 @@ +// Package generic implements generic versions of each of the metric types. They +// can be embedded by other implementations, and converted to specific formats +// as necessary. +package generic + +import ( + "fmt" + "io" + "math" + "sync" + "sync/atomic" + + "github.com/VividCortex/gohistogram" + + "github.com/go-kit/kit/metrics" + "github.com/go-kit/kit/metrics/internal/lv" +) + +// Counter is an in-memory implementation of a Counter. +type Counter struct { + Name string + lvs lv.LabelValues + bits uint64 +} + +// NewCounter returns a new, usable Counter. +func NewCounter(name string) *Counter { + return &Counter{ + Name: name, + } +} + +// With implements Counter. +func (c *Counter) With(labelValues ...string) metrics.Counter { + return &Counter{ + bits: atomic.LoadUint64(&c.bits), + lvs: c.lvs.With(labelValues...), + } +} + +// Add implements Counter. +func (c *Counter) Add(delta float64) { + for { + var ( + old = atomic.LoadUint64(&c.bits) + newf = math.Float64frombits(old) + delta + new = math.Float64bits(newf) + ) + if atomic.CompareAndSwapUint64(&c.bits, old, new) { + break + } + } +} + +// Value returns the current value of the counter. +func (c *Counter) Value() float64 { + return math.Float64frombits(atomic.LoadUint64(&c.bits)) +} + +// ValueReset returns the current value of the counter, and resets it to zero. +// This is useful for metrics backends whose counter aggregations expect deltas, +// like Graphite. +func (c *Counter) ValueReset() float64 { + for { + var ( + old = atomic.LoadUint64(&c.bits) + newf = 0.0 + new = math.Float64bits(newf) + ) + if atomic.CompareAndSwapUint64(&c.bits, old, new) { + return math.Float64frombits(old) + } + } +} + +// LabelValues returns the set of label values attached to the counter. +func (c *Counter) LabelValues() []string { + return c.lvs +} + +// Gauge is an in-memory implementation of a Gauge. +type Gauge struct { + Name string + lvs lv.LabelValues + bits uint64 +} + +// NewGauge returns a new, usable Gauge. +func NewGauge(name string) *Gauge { + return &Gauge{ + Name: name, + } +} + +// With implements Gauge. +func (g *Gauge) With(labelValues ...string) metrics.Gauge { + return &Gauge{ + bits: atomic.LoadUint64(&g.bits), + lvs: g.lvs.With(labelValues...), + } +} + +// Set implements Gauge. +func (g *Gauge) Set(value float64) { + atomic.StoreUint64(&g.bits, math.Float64bits(value)) +} + +// Value returns the current value of the gauge. +func (g *Gauge) Value() float64 { + return math.Float64frombits(atomic.LoadUint64(&g.bits)) +} + +// LabelValues returns the set of label values attached to the gauge. +func (g *Gauge) LabelValues() []string { + return g.lvs +} + +// Histogram is an in-memory implementation of a streaming histogram, based on +// VividCortex/gohistogram. It dynamically computes quantiles, so it's not +// suitable for aggregation. +type Histogram struct { + Name string + lvs lv.LabelValues + h gohistogram.Histogram +} + +// NewHistogram returns a numeric histogram based on VividCortex/gohistogram. A +// good default value for buckets is 50. +func NewHistogram(name string, buckets int) *Histogram { + return &Histogram{ + Name: name, + h: gohistogram.NewHistogram(buckets), + } +} + +// With implements Histogram. +func (h *Histogram) With(labelValues ...string) metrics.Histogram { + return &Histogram{ + lvs: h.lvs.With(labelValues...), + h: h.h, + } +} + +// Observe implements Histogram. +func (h *Histogram) Observe(value float64) { + h.h.Add(value) +} + +// Quantile returns the value of the quantile q, 0.0 < q < 1.0. +func (h *Histogram) Quantile(q float64) float64 { + return h.h.Quantile(q) +} + +// LabelValues returns the set of label values attached to the histogram. +func (h *Histogram) LabelValues() []string { + return h.lvs +} + +// Print writes a string representation of the histogram to the passed writer. +// Useful for printing to a terminal. +func (h *Histogram) Print(w io.Writer) { + fmt.Fprintf(w, h.h.String()) +} + +// Bucket is a range in a histogram which aggregates observations. +type Bucket struct { + From, To, Count int64 +} + +// Quantile is a pair of a quantile (0..100) and its observed maximum value. +type Quantile struct { + Quantile int // 0..100 + Value int64 +} + +// SimpleHistogram is an in-memory implementation of a Histogram. It only tracks +// an approximate moving average, so is likely too naïve for many use cases. +type SimpleHistogram struct { + mtx sync.RWMutex + lvs lv.LabelValues + avg float64 + n uint64 +} + +// NewSimpleHistogram returns a SimpleHistogram, ready for observations. +func NewSimpleHistogram() *SimpleHistogram { + return &SimpleHistogram{} +} + +// With implements Histogram. +func (h *SimpleHistogram) With(labelValues ...string) metrics.Histogram { + return &SimpleHistogram{ + lvs: h.lvs.With(labelValues...), + avg: h.avg, + n: h.n, + } +} + +// Observe implements Histogram. +func (h *SimpleHistogram) Observe(value float64) { + h.mtx.Lock() + defer h.mtx.Unlock() + h.n++ + h.avg -= h.avg / float64(h.n) + h.avg += value / float64(h.n) +} + +// ApproximateMovingAverage returns the approximate moving average of observations. +func (h *SimpleHistogram) ApproximateMovingAverage() float64 { + h.mtx.RLock() + defer h.mtx.RUnlock() + return h.avg +} + +// LabelValues returns the set of label values attached to the histogram. +func (h *SimpleHistogram) LabelValues() []string { + return h.lvs +} diff --git a/vendor/github.com/go-kit/kit/metrics/influx/influx.go b/vendor/github.com/go-kit/kit/metrics/influx/influx.go new file mode 100644 index 000000000..0c555e116 --- /dev/null +++ b/vendor/github.com/go-kit/kit/metrics/influx/influx.go @@ -0,0 +1,255 @@ +// Package influx provides an InfluxDB implementation for metrics. The model is +// similar to other push-based instrumentation systems. Observations are +// aggregated locally and emitted to the Influx server on regular intervals. +package influx + +import ( + "time" + + influxdb "github.com/influxdata/influxdb/client/v2" + + "github.com/go-kit/kit/log" + "github.com/go-kit/kit/metrics" + "github.com/go-kit/kit/metrics/generic" + "github.com/go-kit/kit/metrics/internal/lv" +) + +// Influx is a store for metrics that will be emitted to an Influx database. +// +// Influx is a general purpose time-series database, and has no native concepts +// of counters, gauges, or histograms. Counters are modeled as a timeseries with +// one data point per flush, with a "count" field that reflects all adds since +// the last flush. Gauges are modeled as a timeseries with one data point per +// flush, with a "value" field that reflects the current state of the gauge. +// Histograms are modeled as a timeseries with one data point per combination of tags, +// with a set of quantile fields that reflects the p50, p90, p95 & p99. +// +// Influx tags are attached to the Influx object, can be given to each +// metric at construction and can be updated anytime via With function. Influx fields +// are mapped to Go kit label values directly by this collector. Actual metric +// values are provided as fields with specific names depending on the metric. +// +// All observations are collected in memory locally, and flushed on demand. +type Influx struct { + counters *lv.Space + gauges *lv.Space + histograms *lv.Space + tags map[string]string + conf influxdb.BatchPointsConfig + logger log.Logger +} + +// New returns an Influx, ready to create metrics and collect observations. Tags +// are applied to all metrics created from this object. The BatchPointsConfig is +// used during flushing. +func New(tags map[string]string, conf influxdb.BatchPointsConfig, logger log.Logger) *Influx { + return &Influx{ + counters: lv.NewSpace(), + gauges: lv.NewSpace(), + histograms: lv.NewSpace(), + tags: tags, + conf: conf, + logger: logger, + } +} + +// NewCounter returns an Influx counter. +func (in *Influx) NewCounter(name string) *Counter { + return &Counter{ + name: name, + obs: in.counters.Observe, + } +} + +// NewGauge returns an Influx gauge. +func (in *Influx) NewGauge(name string) *Gauge { + return &Gauge{ + name: name, + obs: in.gauges.Observe, + } +} + +// NewHistogram returns an Influx histogram. +func (in *Influx) NewHistogram(name string) *Histogram { + return &Histogram{ + name: name, + obs: in.histograms.Observe, + } +} + +// BatchPointsWriter captures a subset of the influxdb.Client methods necessary +// for emitting metrics observations. +type BatchPointsWriter interface { + Write(influxdb.BatchPoints) error +} + +// WriteLoop is a helper method that invokes WriteTo to the passed writer every +// time the passed channel fires. This method blocks until the channel is +// closed, so clients probably want to run it in its own goroutine. For typical +// usage, create a time.Ticker and pass its C channel to this method. +func (in *Influx) WriteLoop(c <-chan time.Time, w BatchPointsWriter) { + for range c { + if err := in.WriteTo(w); err != nil { + in.logger.Log("during", "WriteTo", "err", err) + } + } +} + +// WriteTo flushes the buffered content of the metrics to the writer, in an +// Influx BatchPoints format. WriteTo abides best-effort semantics, so +// observations are lost if there is a problem with the write. Clients should be +// sure to call WriteTo regularly, ideally through the WriteLoop helper method. +func (in *Influx) WriteTo(w BatchPointsWriter) (err error) { + bp, err := influxdb.NewBatchPoints(in.conf) + if err != nil { + return err + } + + now := time.Now() + + in.counters.Reset().Walk(func(name string, lvs lv.LabelValues, values []float64) bool { + tags := mergeTags(in.tags, lvs) + var p *influxdb.Point + fields := map[string]interface{}{"count": sum(values)} + p, err = influxdb.NewPoint(name, tags, fields, now) + if err != nil { + return false + } + bp.AddPoint(p) + return true + }) + if err != nil { + return err + } + + in.gauges.Reset().Walk(func(name string, lvs lv.LabelValues, values []float64) bool { + tags := mergeTags(in.tags, lvs) + var p *influxdb.Point + fields := map[string]interface{}{"value": last(values)} + p, err = influxdb.NewPoint(name, tags, fields, now) + if err != nil { + return false + } + bp.AddPoint(p) + return true + }) + if err != nil { + return err + } + + in.histograms.Reset().Walk(func(name string, lvs lv.LabelValues, values []float64) bool { + histogram := generic.NewHistogram(name, 50) + tags := mergeTags(in.tags, lvs) + var p *influxdb.Point + for _, v := range values { + histogram.Observe(v) + } + fields := map[string]interface{}{ + "p50": histogram.Quantile(0.50), + "p90": histogram.Quantile(0.90), + "p95": histogram.Quantile(0.95), + "p99": histogram.Quantile(0.99), + } + p, err = influxdb.NewPoint(name, tags, fields, now) + if err != nil { + return false + } + bp.AddPoint(p) + return true + }) + if err != nil { + return err + } + + return w.Write(bp) +} + +func mergeTags(tags map[string]string, labelValues []string) map[string]string { + if len(labelValues)%2 != 0 { + panic("mergeTags received a labelValues with an odd number of strings") + } + for i := 0; i < len(labelValues); i += 2 { + tags[labelValues[i]] = labelValues[i+1] + } + return tags +} + +func sum(a []float64) float64 { + var v float64 + for _, f := range a { + v += f + } + return v +} + +func last(a []float64) float64 { + return a[len(a)-1] +} + +type observeFunc func(name string, lvs lv.LabelValues, value float64) + +// Counter is an Influx counter. Observations are forwarded to an Influx +// object, and aggregated (summed) per timeseries. +type Counter struct { + name string + lvs lv.LabelValues + obs observeFunc +} + +// With implements metrics.Counter. +func (c *Counter) With(labelValues ...string) metrics.Counter { + return &Counter{ + name: c.name, + lvs: c.lvs.With(labelValues...), + obs: c.obs, + } +} + +// Add implements metrics.Counter. +func (c *Counter) Add(delta float64) { + c.obs(c.name, c.lvs, delta) +} + +// Gauge is an Influx gauge. Observations are forwarded to a Dogstatsd +// object, and aggregated (the last observation selected) per timeseries. +type Gauge struct { + name string + lvs lv.LabelValues + obs observeFunc +} + +// With implements metrics.Gauge. +func (g *Gauge) With(labelValues ...string) metrics.Gauge { + return &Gauge{ + name: g.name, + lvs: g.lvs.With(labelValues...), + obs: g.obs, + } +} + +// Set implements metrics.Gauge. +func (g *Gauge) Set(value float64) { + g.obs(g.name, g.lvs, value) +} + +// Histogram is an Influx histrogram. Observations are aggregated into a +// generic.Histogram and emitted as per-quantile gauges to the Influx server. +type Histogram struct { + name string + lvs lv.LabelValues + obs observeFunc +} + +// With implements metrics.Histogram. +func (h *Histogram) With(labelValues ...string) metrics.Histogram { + return &Histogram{ + name: h.name, + lvs: h.lvs.With(labelValues...), + obs: h.obs, + } +} + +// Observe implements metrics.Histogram. +func (h *Histogram) Observe(value float64) { + h.obs(h.name, h.lvs, value) +} diff --git a/vendor/github.com/influxdata/influxdb/LICENSE b/vendor/github.com/influxdata/influxdb/LICENSE new file mode 100644 index 000000000..63cef79ba --- /dev/null +++ b/vendor/github.com/influxdata/influxdb/LICENSE @@ -0,0 +1,20 @@ +The MIT License (MIT) + +Copyright (c) 2013-2016 Errplane Inc. + +Permission is hereby granted, free of charge, to any person obtaining a copy of +this software and associated documentation files (the "Software"), to deal in +the Software without restriction, including without limitation the rights to +use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of +the Software, and to permit persons to whom the Software is furnished to do so, +subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS +FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR +COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. diff --git a/vendor/github.com/influxdata/influxdb/LICENSE_OF_DEPENDENCIES.md b/vendor/github.com/influxdata/influxdb/LICENSE_OF_DEPENDENCIES.md new file mode 100644 index 000000000..949a7b3c8 --- /dev/null +++ b/vendor/github.com/influxdata/influxdb/LICENSE_OF_DEPENDENCIES.md @@ -0,0 +1,25 @@ +# List +- bootstrap 3.3.5 [MIT LICENSE](https://github.com/twbs/bootstrap/blob/master/LICENSE) +- collectd.org [ISC LICENSE](https://github.com/collectd/go-collectd/blob/master/LICENSE) +- github.com/BurntSushi/toml [WTFPL LICENSE](https://github.com/BurntSushi/toml/blob/master/COPYING) +- github.com/bmizerany/pat [MIT LICENSE](https://github.com/bmizerany/pat#license) +- github.com/boltdb/bolt [MIT LICENSE](https://github.com/boltdb/bolt/blob/master/LICENSE) +- github.com/cespare/xxhash [MIT LICENSE](https://github.com/cespare/xxhash/blob/master/LICENSE.txt) +- github.com/clarkduvall/hyperloglog [MIT LICENSE](https://github.com/clarkduvall/hyperloglog/blob/master/LICENSE) +- github.com/davecgh/go-spew/spew [ISC LICENSE](https://github.com/davecgh/go-spew/blob/master/LICENSE) +- github.com/dgrijalva/jwt-go [MIT LICENSE](https://github.com/dgrijalva/jwt-go/blob/master/LICENSE) +- github.com/dgryski/go-bits [MIT LICENSE](https://github.com/dgryski/go-bits/blob/master/LICENSE) +- github.com/dgryski/go-bitstream [MIT LICENSE](https://github.com/dgryski/go-bitstream/blob/master/LICENSE) +- github.com/gogo/protobuf/proto [BSD LICENSE](https://github.com/gogo/protobuf/blob/master/LICENSE) +- github.com/golang/snappy [BSD LICENSE](https://github.com/golang/snappy/blob/master/LICENSE) +- github.com/google/go-cmp [BSD LICENSE](https://github.com/google/go-cmp/blob/master/LICENSE) +- github.com/influxdata/usage-client [MIT LICENSE](https://github.com/influxdata/usage-client/blob/master/LICENSE.txt) +- github.com/jwilder/encoding [MIT LICENSE](https://github.com/jwilder/encoding/blob/master/LICENSE) +- github.com/paulbellamy/ratecounter [MIT LICENSE](https://github.com/paulbellamy/ratecounter/blob/master/LICENSE) +- github.com/peterh/liner [MIT LICENSE](https://github.com/peterh/liner/blob/master/COPYING) +- github.com/rakyll/statik [APACHE LICENSE](https://github.com/rakyll/statik/blob/master/LICENSE) +- github.com/retailnext/hllpp [BSD LICENSE](https://github.com/retailnext/hllpp/blob/master/LICENSE) +- github.com/uber-go/atomic [MIT LICENSE](https://github.com/uber-go/atomic/blob/master/LICENSE.txt) +- github.com/uber-go/zap [MIT LICENSE](https://github.com/uber-go/zap/blob/master/LICENSE.txt) +- golang.org/x/crypto [BSD LICENSE](https://github.com/golang/crypto/blob/master/LICENSE) +- jquery 2.1.4 [MIT LICENSE](https://github.com/jquery/jquery/blob/master/LICENSE.txt) diff --git a/vendor/github.com/influxdata/influxdb/client/v2/client.go b/vendor/github.com/influxdata/influxdb/client/v2/client.go new file mode 100644 index 000000000..7a057c13c --- /dev/null +++ b/vendor/github.com/influxdata/influxdb/client/v2/client.go @@ -0,0 +1,609 @@ +// Package client (v2) is the current official Go client for InfluxDB. +package client // import "github.com/influxdata/influxdb/client/v2" + +import ( + "bytes" + "crypto/tls" + "encoding/json" + "errors" + "fmt" + "io" + "io/ioutil" + "net/http" + "net/url" + "strconv" + "strings" + "time" + + "github.com/influxdata/influxdb/models" +) + +// HTTPConfig is the config data needed to create an HTTP Client. +type HTTPConfig struct { + // Addr should be of the form "http://host:port" + // or "http://[ipv6-host%zone]:port". + Addr string + + // Username is the influxdb username, optional. + Username string + + // Password is the influxdb password, optional. + Password string + + // UserAgent is the http User Agent, defaults to "InfluxDBClient". + UserAgent string + + // Timeout for influxdb writes, defaults to no timeout. + Timeout time.Duration + + // InsecureSkipVerify gets passed to the http client, if true, it will + // skip https certificate verification. Defaults to false. + InsecureSkipVerify bool + + // TLSConfig allows the user to set their own TLS config for the HTTP + // Client. If set, this option overrides InsecureSkipVerify. + TLSConfig *tls.Config +} + +// BatchPointsConfig is the config data needed to create an instance of the BatchPoints struct. +type BatchPointsConfig struct { + // Precision is the write precision of the points, defaults to "ns". + Precision string + + // Database is the database to write points to. + Database string + + // RetentionPolicy is the retention policy of the points. + RetentionPolicy string + + // Write consistency is the number of servers required to confirm write. + WriteConsistency string +} + +// Client is a client interface for writing & querying the database. +type Client interface { + // Ping checks that status of cluster, and will always return 0 time and no + // error for UDP clients. + Ping(timeout time.Duration) (time.Duration, string, error) + + // Write takes a BatchPoints object and writes all Points to InfluxDB. + Write(bp BatchPoints) error + + // Query makes an InfluxDB Query on the database. This will fail if using + // the UDP client. + Query(q Query) (*Response, error) + + // Close releases any resources a Client may be using. + Close() error +} + +// NewHTTPClient returns a new Client from the provided config. +// Client is safe for concurrent use by multiple goroutines. +func NewHTTPClient(conf HTTPConfig) (Client, error) { + if conf.UserAgent == "" { + conf.UserAgent = "InfluxDBClient" + } + + u, err := url.Parse(conf.Addr) + if err != nil { + return nil, err + } else if u.Scheme != "http" && u.Scheme != "https" { + m := fmt.Sprintf("Unsupported protocol scheme: %s, your address"+ + " must start with http:// or https://", u.Scheme) + return nil, errors.New(m) + } + + tr := &http.Transport{ + TLSClientConfig: &tls.Config{ + InsecureSkipVerify: conf.InsecureSkipVerify, + }, + } + if conf.TLSConfig != nil { + tr.TLSClientConfig = conf.TLSConfig + } + return &client{ + url: *u, + username: conf.Username, + password: conf.Password, + useragent: conf.UserAgent, + httpClient: &http.Client{ + Timeout: conf.Timeout, + Transport: tr, + }, + transport: tr, + }, nil +} + +// Ping will check to see if the server is up with an optional timeout on waiting for leader. +// Ping returns how long the request took, the version of the server it connected to, and an error if one occurred. +func (c *client) Ping(timeout time.Duration) (time.Duration, string, error) { + now := time.Now() + u := c.url + u.Path = "ping" + + req, err := http.NewRequest("GET", u.String(), nil) + if err != nil { + return 0, "", err + } + + req.Header.Set("User-Agent", c.useragent) + + if c.username != "" { + req.SetBasicAuth(c.username, c.password) + } + + if timeout > 0 { + params := req.URL.Query() + params.Set("wait_for_leader", fmt.Sprintf("%.0fs", timeout.Seconds())) + req.URL.RawQuery = params.Encode() + } + + resp, err := c.httpClient.Do(req) + if err != nil { + return 0, "", err + } + defer resp.Body.Close() + + body, err := ioutil.ReadAll(resp.Body) + if err != nil { + return 0, "", err + } + + if resp.StatusCode != http.StatusNoContent { + var err = fmt.Errorf(string(body)) + return 0, "", err + } + + version := resp.Header.Get("X-Influxdb-Version") + return time.Since(now), version, nil +} + +// Close releases the client's resources. +func (c *client) Close() error { + c.transport.CloseIdleConnections() + return nil +} + +// client is safe for concurrent use as the fields are all read-only +// once the client is instantiated. +type client struct { + // N.B - if url.UserInfo is accessed in future modifications to the + // methods on client, you will need to syncronise access to url. + url url.URL + username string + password string + useragent string + httpClient *http.Client + transport *http.Transport +} + +// BatchPoints is an interface into a batched grouping of points to write into +// InfluxDB together. BatchPoints is NOT thread-safe, you must create a separate +// batch for each goroutine. +type BatchPoints interface { + // AddPoint adds the given point to the Batch of points. + AddPoint(p *Point) + // AddPoints adds the given points to the Batch of points. + AddPoints(ps []*Point) + // Points lists the points in the Batch. + Points() []*Point + + // Precision returns the currently set precision of this Batch. + Precision() string + // SetPrecision sets the precision of this batch. + SetPrecision(s string) error + + // Database returns the currently set database of this Batch. + Database() string + // SetDatabase sets the database of this Batch. + SetDatabase(s string) + + // WriteConsistency returns the currently set write consistency of this Batch. + WriteConsistency() string + // SetWriteConsistency sets the write consistency of this Batch. + SetWriteConsistency(s string) + + // RetentionPolicy returns the currently set retention policy of this Batch. + RetentionPolicy() string + // SetRetentionPolicy sets the retention policy of this Batch. + SetRetentionPolicy(s string) +} + +// NewBatchPoints returns a BatchPoints interface based on the given config. +func NewBatchPoints(conf BatchPointsConfig) (BatchPoints, error) { + if conf.Precision == "" { + conf.Precision = "ns" + } + if _, err := time.ParseDuration("1" + conf.Precision); err != nil { + return nil, err + } + bp := &batchpoints{ + database: conf.Database, + precision: conf.Precision, + retentionPolicy: conf.RetentionPolicy, + writeConsistency: conf.WriteConsistency, + } + return bp, nil +} + +type batchpoints struct { + points []*Point + database string + precision string + retentionPolicy string + writeConsistency string +} + +func (bp *batchpoints) AddPoint(p *Point) { + bp.points = append(bp.points, p) +} + +func (bp *batchpoints) AddPoints(ps []*Point) { + bp.points = append(bp.points, ps...) +} + +func (bp *batchpoints) Points() []*Point { + return bp.points +} + +func (bp *batchpoints) Precision() string { + return bp.precision +} + +func (bp *batchpoints) Database() string { + return bp.database +} + +func (bp *batchpoints) WriteConsistency() string { + return bp.writeConsistency +} + +func (bp *batchpoints) RetentionPolicy() string { + return bp.retentionPolicy +} + +func (bp *batchpoints) SetPrecision(p string) error { + if _, err := time.ParseDuration("1" + p); err != nil { + return err + } + bp.precision = p + return nil +} + +func (bp *batchpoints) SetDatabase(db string) { + bp.database = db +} + +func (bp *batchpoints) SetWriteConsistency(wc string) { + bp.writeConsistency = wc +} + +func (bp *batchpoints) SetRetentionPolicy(rp string) { + bp.retentionPolicy = rp +} + +// Point represents a single data point. +type Point struct { + pt models.Point +} + +// NewPoint returns a point with the given timestamp. If a timestamp is not +// given, then data is sent to the database without a timestamp, in which case +// the server will assign local time upon reception. NOTE: it is recommended to +// send data with a timestamp. +func NewPoint( + name string, + tags map[string]string, + fields map[string]interface{}, + t ...time.Time, +) (*Point, error) { + var T time.Time + if len(t) > 0 { + T = t[0] + } + + pt, err := models.NewPoint(name, models.NewTags(tags), fields, T) + if err != nil { + return nil, err + } + return &Point{ + pt: pt, + }, nil +} + +// String returns a line-protocol string of the Point. +func (p *Point) String() string { + return p.pt.String() +} + +// PrecisionString returns a line-protocol string of the Point, +// with the timestamp formatted for the given precision. +func (p *Point) PrecisionString(precison string) string { + return p.pt.PrecisionString(precison) +} + +// Name returns the measurement name of the point. +func (p *Point) Name() string { + return string(p.pt.Name()) +} + +// Tags returns the tags associated with the point. +func (p *Point) Tags() map[string]string { + return p.pt.Tags().Map() +} + +// Time return the timestamp for the point. +func (p *Point) Time() time.Time { + return p.pt.Time() +} + +// UnixNano returns timestamp of the point in nanoseconds since Unix epoch. +func (p *Point) UnixNano() int64 { + return p.pt.UnixNano() +} + +// Fields returns the fields for the point. +func (p *Point) Fields() (map[string]interface{}, error) { + return p.pt.Fields() +} + +// NewPointFrom returns a point from the provided models.Point. +func NewPointFrom(pt models.Point) *Point { + return &Point{pt: pt} +} + +func (c *client) Write(bp BatchPoints) error { + var b bytes.Buffer + + for _, p := range bp.Points() { + if _, err := b.WriteString(p.pt.PrecisionString(bp.Precision())); err != nil { + return err + } + + if err := b.WriteByte('\n'); err != nil { + return err + } + } + + u := c.url + u.Path = "write" + req, err := http.NewRequest("POST", u.String(), &b) + if err != nil { + return err + } + req.Header.Set("Content-Type", "") + req.Header.Set("User-Agent", c.useragent) + if c.username != "" { + req.SetBasicAuth(c.username, c.password) + } + + params := req.URL.Query() + params.Set("db", bp.Database()) + params.Set("rp", bp.RetentionPolicy()) + params.Set("precision", bp.Precision()) + params.Set("consistency", bp.WriteConsistency()) + req.URL.RawQuery = params.Encode() + + resp, err := c.httpClient.Do(req) + if err != nil { + return err + } + defer resp.Body.Close() + + body, err := ioutil.ReadAll(resp.Body) + if err != nil { + return err + } + + if resp.StatusCode != http.StatusNoContent && resp.StatusCode != http.StatusOK { + var err = fmt.Errorf(string(body)) + return err + } + + return nil +} + +// Query defines a query to send to the server. +type Query struct { + Command string + Database string + Precision string + Chunked bool + ChunkSize int + Parameters map[string]interface{} +} + +// NewQuery returns a query object. +// The database and precision arguments can be empty strings if they are not needed for the query. +func NewQuery(command, database, precision string) Query { + return Query{ + Command: command, + Database: database, + Precision: precision, + Parameters: make(map[string]interface{}), + } +} + +// NewQueryWithParameters returns a query object. +// The database and precision arguments can be empty strings if they are not needed for the query. +// parameters is a map of the parameter names used in the command to their values. +func NewQueryWithParameters(command, database, precision string, parameters map[string]interface{}) Query { + return Query{ + Command: command, + Database: database, + Precision: precision, + Parameters: parameters, + } +} + +// Response represents a list of statement results. +type Response struct { + Results []Result + Err string `json:"error,omitempty"` +} + +// Error returns the first error from any statement. +// It returns nil if no errors occurred on any statements. +func (r *Response) Error() error { + if r.Err != "" { + return fmt.Errorf(r.Err) + } + for _, result := range r.Results { + if result.Err != "" { + return fmt.Errorf(result.Err) + } + } + return nil +} + +// Message represents a user message. +type Message struct { + Level string + Text string +} + +// Result represents a resultset returned from a single statement. +type Result struct { + Series []models.Row + Messages []*Message + Err string `json:"error,omitempty"` +} + +// Query sends a command to the server and returns the Response. +func (c *client) Query(q Query) (*Response, error) { + u := c.url + u.Path = "query" + + jsonParameters, err := json.Marshal(q.Parameters) + + if err != nil { + return nil, err + } + + req, err := http.NewRequest("POST", u.String(), nil) + if err != nil { + return nil, err + } + + req.Header.Set("Content-Type", "") + req.Header.Set("User-Agent", c.useragent) + + if c.username != "" { + req.SetBasicAuth(c.username, c.password) + } + + params := req.URL.Query() + params.Set("q", q.Command) + params.Set("db", q.Database) + params.Set("params", string(jsonParameters)) + if q.Chunked { + params.Set("chunked", "true") + if q.ChunkSize > 0 { + params.Set("chunk_size", strconv.Itoa(q.ChunkSize)) + } + } + + if q.Precision != "" { + params.Set("epoch", q.Precision) + } + req.URL.RawQuery = params.Encode() + + resp, err := c.httpClient.Do(req) + if err != nil { + return nil, err + } + defer resp.Body.Close() + + var response Response + if q.Chunked { + cr := NewChunkedResponse(resp.Body) + for { + r, err := cr.NextResponse() + if err != nil { + // If we got an error while decoding the response, send that back. + return nil, err + } + + if r == nil { + break + } + + response.Results = append(response.Results, r.Results...) + if r.Err != "" { + response.Err = r.Err + break + } + } + } else { + dec := json.NewDecoder(resp.Body) + dec.UseNumber() + decErr := dec.Decode(&response) + + // ignore this error if we got an invalid status code + if decErr != nil && decErr.Error() == "EOF" && resp.StatusCode != http.StatusOK { + decErr = nil + } + // If we got a valid decode error, send that back + if decErr != nil { + return nil, fmt.Errorf("unable to decode json: received status code %d err: %s", resp.StatusCode, decErr) + } + } + // If we don't have an error in our json response, and didn't get statusOK + // then send back an error + if resp.StatusCode != http.StatusOK && response.Error() == nil { + return &response, fmt.Errorf("received status code %d from server", + resp.StatusCode) + } + return &response, nil +} + +// duplexReader reads responses and writes it to another writer while +// satisfying the reader interface. +type duplexReader struct { + r io.Reader + w io.Writer +} + +func (r *duplexReader) Read(p []byte) (n int, err error) { + n, err = r.r.Read(p) + if err == nil { + r.w.Write(p[:n]) + } + return n, err +} + +// ChunkedResponse represents a response from the server that +// uses chunking to stream the output. +type ChunkedResponse struct { + dec *json.Decoder + duplex *duplexReader + buf bytes.Buffer +} + +// NewChunkedResponse reads a stream and produces responses from the stream. +func NewChunkedResponse(r io.Reader) *ChunkedResponse { + resp := &ChunkedResponse{} + resp.duplex = &duplexReader{r: r, w: &resp.buf} + resp.dec = json.NewDecoder(resp.duplex) + resp.dec.UseNumber() + return resp +} + +// NextResponse reads the next line of the stream and returns a response. +func (r *ChunkedResponse) NextResponse() (*Response, error) { + var response Response + + if err := r.dec.Decode(&response); err != nil { + if err == io.EOF { + return nil, nil + } + // A decoding error happened. This probably means the server crashed + // and sent a last-ditch error message to us. Ensure we have read the + // entirety of the connection to get any remaining error text. + io.Copy(ioutil.Discard, r.duplex) + return nil, errors.New(strings.TrimSpace(r.buf.String())) + } + + r.buf.Reset() + return &response, nil +} diff --git a/vendor/github.com/influxdata/influxdb/client/v2/udp.go b/vendor/github.com/influxdata/influxdb/client/v2/udp.go new file mode 100644 index 000000000..779a28b33 --- /dev/null +++ b/vendor/github.com/influxdata/influxdb/client/v2/udp.go @@ -0,0 +1,112 @@ +package client + +import ( + "fmt" + "io" + "net" + "time" +) + +const ( + // UDPPayloadSize is a reasonable default payload size for UDP packets that + // could be travelling over the internet. + UDPPayloadSize = 512 +) + +// UDPConfig is the config data needed to create a UDP Client. +type UDPConfig struct { + // Addr should be of the form "host:port" + // or "[ipv6-host%zone]:port". + Addr string + + // PayloadSize is the maximum size of a UDP client message, optional + // Tune this based on your network. Defaults to UDPPayloadSize. + PayloadSize int +} + +// NewUDPClient returns a client interface for writing to an InfluxDB UDP +// service from the given config. +func NewUDPClient(conf UDPConfig) (Client, error) { + var udpAddr *net.UDPAddr + udpAddr, err := net.ResolveUDPAddr("udp", conf.Addr) + if err != nil { + return nil, err + } + + conn, err := net.DialUDP("udp", nil, udpAddr) + if err != nil { + return nil, err + } + + payloadSize := conf.PayloadSize + if payloadSize == 0 { + payloadSize = UDPPayloadSize + } + + return &udpclient{ + conn: conn, + payloadSize: payloadSize, + }, nil +} + +// Close releases the udpclient's resources. +func (uc *udpclient) Close() error { + return uc.conn.Close() +} + +type udpclient struct { + conn io.WriteCloser + payloadSize int +} + +func (uc *udpclient) Write(bp BatchPoints) error { + var b = make([]byte, 0, uc.payloadSize) // initial buffer size, it will grow as needed + var d, _ = time.ParseDuration("1" + bp.Precision()) + + var delayedError error + + var checkBuffer = func(n int) { + if len(b) > 0 && len(b)+n > uc.payloadSize { + if _, err := uc.conn.Write(b); err != nil { + delayedError = err + } + b = b[:0] + } + } + + for _, p := range bp.Points() { + p.pt.Round(d) + pointSize := p.pt.StringSize() + 1 // include newline in size + //point := p.pt.RoundedString(d) + "\n" + + checkBuffer(pointSize) + + if p.Time().IsZero() || pointSize <= uc.payloadSize { + b = p.pt.AppendString(b) + b = append(b, '\n') + continue + } + + points := p.pt.Split(uc.payloadSize - 1) // account for newline character + for _, sp := range points { + checkBuffer(sp.StringSize() + 1) + b = sp.AppendString(b) + b = append(b, '\n') + } + } + + if len(b) > 0 { + if _, err := uc.conn.Write(b); err != nil { + return err + } + } + return delayedError +} + +func (uc *udpclient) Query(q Query) (*Response, error) { + return nil, fmt.Errorf("Querying via UDP is not supported") +} + +func (uc *udpclient) Ping(timeout time.Duration) (time.Duration, string, error) { + return 0, "", nil +} diff --git a/vendor/github.com/influxdata/influxdb/errors.go b/vendor/github.com/influxdata/influxdb/errors.go new file mode 100644 index 000000000..9bc6b9988 --- /dev/null +++ b/vendor/github.com/influxdata/influxdb/errors.go @@ -0,0 +1,42 @@ +package influxdb + +import ( + "errors" + "fmt" + "strings" +) + +// ErrFieldTypeConflict is returned when a new field already exists with a +// different type. +var ErrFieldTypeConflict = errors.New("field type conflict") + +// ErrDatabaseNotFound indicates that a database operation failed on the +// specified database because the specified database does not exist. +func ErrDatabaseNotFound(name string) error { return fmt.Errorf("database not found: %s", name) } + +// ErrRetentionPolicyNotFound indicates that the named retention policy could +// not be found in the database. +func ErrRetentionPolicyNotFound(name string) error { + return fmt.Errorf("retention policy not found: %s", name) +} + +// IsAuthorizationError indicates whether an error is due to an authorization failure +func IsAuthorizationError(err error) bool { + e, ok := err.(interface { + AuthorizationFailed() bool + }) + return ok && e.AuthorizationFailed() +} + +// IsClientError indicates whether an error is a known client error. +func IsClientError(err error) bool { + if err == nil { + return false + } + + if strings.HasPrefix(err.Error(), ErrFieldTypeConflict.Error()) { + return true + } + + return false +} diff --git a/vendor/github.com/influxdata/influxdb/influxdb.go b/vendor/github.com/influxdata/influxdb/influxdb.go new file mode 100644 index 000000000..a59417507 --- /dev/null +++ b/vendor/github.com/influxdata/influxdb/influxdb.go @@ -0,0 +1,6 @@ +// Package influxdb is the root package of InfluxDB, +// the scalable datastore for metrics, events, and real-time analytics. +// +// If you're looking for the Go HTTP client for InfluxDB, +// see package github.com/influxdata/influxdb/client/v2. +package influxdb // import "github.com/influxdata/influxdb" diff --git a/vendor/github.com/influxdata/influxdb/models/consistency.go b/vendor/github.com/influxdata/influxdb/models/consistency.go new file mode 100644 index 000000000..2a3269bca --- /dev/null +++ b/vendor/github.com/influxdata/influxdb/models/consistency.go @@ -0,0 +1,48 @@ +package models + +import ( + "errors" + "strings" +) + +// ConsistencyLevel represent a required replication criteria before a write can +// be returned as successful. +// +// The consistency level is handled in open-source InfluxDB but only applicable to clusters. +type ConsistencyLevel int + +const ( + // ConsistencyLevelAny allows for hinted handoff, potentially no write happened yet. + ConsistencyLevelAny ConsistencyLevel = iota + + // ConsistencyLevelOne requires at least one data node acknowledged a write. + ConsistencyLevelOne + + // ConsistencyLevelQuorum requires a quorum of data nodes to acknowledge a write. + ConsistencyLevelQuorum + + // ConsistencyLevelAll requires all data nodes to acknowledge a write. + ConsistencyLevelAll +) + +var ( + // ErrInvalidConsistencyLevel is returned when parsing the string version + // of a consistency level. + ErrInvalidConsistencyLevel = errors.New("invalid consistency level") +) + +// ParseConsistencyLevel converts a consistency level string to the corresponding ConsistencyLevel const. +func ParseConsistencyLevel(level string) (ConsistencyLevel, error) { + switch strings.ToLower(level) { + case "any": + return ConsistencyLevelAny, nil + case "one": + return ConsistencyLevelOne, nil + case "quorum": + return ConsistencyLevelQuorum, nil + case "all": + return ConsistencyLevelAll, nil + default: + return 0, ErrInvalidConsistencyLevel + } +} diff --git a/vendor/github.com/influxdata/influxdb/models/inline_fnv.go b/vendor/github.com/influxdata/influxdb/models/inline_fnv.go new file mode 100644 index 000000000..eec1ae8b0 --- /dev/null +++ b/vendor/github.com/influxdata/influxdb/models/inline_fnv.go @@ -0,0 +1,32 @@ +package models // import "github.com/influxdata/influxdb/models" + +// from stdlib hash/fnv/fnv.go +const ( + prime64 = 1099511628211 + offset64 = 14695981039346656037 +) + +// InlineFNV64a is an alloc-free port of the standard library's fnv64a. +// See https://en.wikipedia.org/wiki/Fowler%E2%80%93Noll%E2%80%93Vo_hash_function. +type InlineFNV64a uint64 + +// NewInlineFNV64a returns a new instance of InlineFNV64a. +func NewInlineFNV64a() InlineFNV64a { + return offset64 +} + +// Write adds data to the running hash. +func (s *InlineFNV64a) Write(data []byte) (int, error) { + hash := uint64(*s) + for _, c := range data { + hash ^= uint64(c) + hash *= prime64 + } + *s = InlineFNV64a(hash) + return len(data), nil +} + +// Sum64 returns the uint64 of the current resulting hash. +func (s *InlineFNV64a) Sum64() uint64 { + return uint64(*s) +} diff --git a/vendor/github.com/influxdata/influxdb/models/inline_strconv_parse.go b/vendor/github.com/influxdata/influxdb/models/inline_strconv_parse.go new file mode 100644 index 000000000..dcc8ae402 --- /dev/null +++ b/vendor/github.com/influxdata/influxdb/models/inline_strconv_parse.go @@ -0,0 +1,38 @@ +package models // import "github.com/influxdata/influxdb/models" + +import ( + "reflect" + "strconv" + "unsafe" +) + +// parseIntBytes is a zero-alloc wrapper around strconv.ParseInt. +func parseIntBytes(b []byte, base int, bitSize int) (i int64, err error) { + s := unsafeBytesToString(b) + return strconv.ParseInt(s, base, bitSize) +} + +// parseFloatBytes is a zero-alloc wrapper around strconv.ParseFloat. +func parseFloatBytes(b []byte, bitSize int) (float64, error) { + s := unsafeBytesToString(b) + return strconv.ParseFloat(s, bitSize) +} + +// parseBoolBytes is a zero-alloc wrapper around strconv.ParseBool. +func parseBoolBytes(b []byte) (bool, error) { + return strconv.ParseBool(unsafeBytesToString(b)) +} + +// unsafeBytesToString converts a []byte to a string without a heap allocation. +// +// It is unsafe, and is intended to prepare input to short-lived functions +// that require strings. +func unsafeBytesToString(in []byte) string { + src := *(*reflect.SliceHeader)(unsafe.Pointer(&in)) + dst := reflect.StringHeader{ + Data: src.Data, + Len: src.Len, + } + s := *(*string)(unsafe.Pointer(&dst)) + return s +} diff --git a/vendor/github.com/influxdata/influxdb/models/points.go b/vendor/github.com/influxdata/influxdb/models/points.go new file mode 100644 index 000000000..b2d234811 --- /dev/null +++ b/vendor/github.com/influxdata/influxdb/models/points.go @@ -0,0 +1,2231 @@ +// Package models implements basic objects used throughout the TICK stack. +package models // import "github.com/influxdata/influxdb/models" + +import ( + "bytes" + "encoding/binary" + "errors" + "fmt" + "io" + "math" + "sort" + "strconv" + "strings" + "time" + + "github.com/influxdata/influxdb/pkg/escape" +) + +var ( + measurementEscapeCodes = map[byte][]byte{ + ',': []byte(`\,`), + ' ': []byte(`\ `), + } + + tagEscapeCodes = map[byte][]byte{ + ',': []byte(`\,`), + ' ': []byte(`\ `), + '=': []byte(`\=`), + } + + // ErrPointMustHaveAField is returned when operating on a point that does not have any fields. + ErrPointMustHaveAField = errors.New("point without fields is unsupported") + + // ErrInvalidNumber is returned when a number is expected but not provided. + ErrInvalidNumber = errors.New("invalid number") + + // ErrInvalidPoint is returned when a point cannot be parsed correctly. + ErrInvalidPoint = errors.New("point is invalid") +) + +const ( + // MaxKeyLength is the largest allowed size of the combined measurement and tag keys. + MaxKeyLength = 65535 +) + +// Point defines the values that will be written to the database. +type Point interface { + // Name return the measurement name for the point. + Name() []byte + + // SetName updates the measurement name for the point. + SetName(string) + + // Tags returns the tag set for the point. + Tags() Tags + + // AddTag adds or replaces a tag value for a point. + AddTag(key, value string) + + // SetTags replaces the tags for the point. + SetTags(tags Tags) + + // HasTag returns true if the tag exists for the point. + HasTag(tag []byte) bool + + // Fields returns the fields for the point. + Fields() (Fields, error) + + // Time return the timestamp for the point. + Time() time.Time + + // SetTime updates the timestamp for the point. + SetTime(t time.Time) + + // UnixNano returns the timestamp of the point as nanoseconds since Unix epoch. + UnixNano() int64 + + // HashID returns a non-cryptographic checksum of the point's key. + HashID() uint64 + + // Key returns the key (measurement joined with tags) of the point. + Key() []byte + + // String returns a string representation of the point. If there is a + // timestamp associated with the point then it will be specified with the default + // precision of nanoseconds. + String() string + + // MarshalBinary returns a binary representation of the point. + MarshalBinary() ([]byte, error) + + // PrecisionString returns a string representation of the point. If there + // is a timestamp associated with the point then it will be specified in the + // given unit. + PrecisionString(precision string) string + + // RoundedString returns a string representation of the point. If there + // is a timestamp associated with the point, then it will be rounded to the + // given duration. + RoundedString(d time.Duration) string + + // Split will attempt to return multiple points with the same timestamp whose + // string representations are no longer than size. Points with a single field or + // a point without a timestamp may exceed the requested size. + Split(size int) []Point + + // Round will round the timestamp of the point to the given duration. + Round(d time.Duration) + + // StringSize returns the length of the string that would be returned by String(). + StringSize() int + + // AppendString appends the result of String() to the provided buffer and returns + // the result, potentially reducing string allocations. + AppendString(buf []byte) []byte + + // FieldIterator retuns a FieldIterator that can be used to traverse the + // fields of a point without constructing the in-memory map. + FieldIterator() FieldIterator +} + +// FieldType represents the type of a field. +type FieldType int + +const ( + // Integer indicates the field's type is integer. + Integer FieldType = iota + + // Float indicates the field's type is float. + Float + + // Boolean indicates the field's type is boolean. + Boolean + + // String indicates the field's type is string. + String + + // Empty is used to indicate that there is no field. + Empty +) + +// FieldIterator provides a low-allocation interface to iterate through a point's fields. +type FieldIterator interface { + // Next indicates whether there any fields remaining. + Next() bool + + // FieldKey returns the key of the current field. + FieldKey() []byte + + // Type returns the FieldType of the current field. + Type() FieldType + + // StringValue returns the string value of the current field. + StringValue() string + + // IntegerValue returns the integer value of the current field. + IntegerValue() (int64, error) + + // BooleanValue returns the boolean value of the current field. + BooleanValue() (bool, error) + + // FloatValue returns the float value of the current field. + FloatValue() (float64, error) + + // Reset resets the iterator to its initial state. + Reset() +} + +// Points represents a sortable list of points by timestamp. +type Points []Point + +// Len implements sort.Interface. +func (a Points) Len() int { return len(a) } + +// Less implements sort.Interface. +func (a Points) Less(i, j int) bool { return a[i].Time().Before(a[j].Time()) } + +// Swap implements sort.Interface. +func (a Points) Swap(i, j int) { a[i], a[j] = a[j], a[i] } + +// point is the default implementation of Point. +type point struct { + time time.Time + + // text encoding of measurement and tags + // key must always be stored sorted by tags, if the original line was not sorted, + // we need to resort it + key []byte + + // text encoding of field data + fields []byte + + // text encoding of timestamp + ts []byte + + // cached version of parsed fields from data + cachedFields map[string]interface{} + + // cached version of parsed name from key + cachedName string + + // cached version of parsed tags + cachedTags Tags + + it fieldIterator +} + +const ( + // the number of characters for the largest possible int64 (9223372036854775807) + maxInt64Digits = 19 + + // the number of characters for the smallest possible int64 (-9223372036854775808) + minInt64Digits = 20 + + // the number of characters required for the largest float64 before a range check + // would occur during parsing + maxFloat64Digits = 25 + + // the number of characters required for smallest float64 before a range check occur + // would occur during parsing + minFloat64Digits = 27 +) + +// ParsePoints returns a slice of Points from a text representation of a point +// with each point separated by newlines. If any points fail to parse, a non-nil error +// will be returned in addition to the points that parsed successfully. +func ParsePoints(buf []byte) ([]Point, error) { + return ParsePointsWithPrecision(buf, time.Now().UTC(), "n") +} + +// ParsePointsString is identical to ParsePoints but accepts a string. +func ParsePointsString(buf string) ([]Point, error) { + return ParsePoints([]byte(buf)) +} + +// ParseKey returns the measurement name and tags from a point. +// +// NOTE: to minimize heap allocations, the returned Tags will refer to subslices of buf. +// This can have the unintended effect preventing buf from being garbage collected. +func ParseKey(buf []byte) (string, Tags) { + // Ignore the error because scanMeasurement returns "missing fields" which we ignore + // when just parsing a key + state, i, _ := scanMeasurement(buf, 0) + + var tags Tags + if state == tagKeyState { + tags = parseTags(buf) + // scanMeasurement returns the location of the comma if there are tags, strip that off + return string(buf[:i-1]), tags + } + return string(buf[:i]), tags +} + +func ParseTags(buf []byte) (Tags, error) { + return parseTags(buf), nil +} + +func ParseName(buf []byte) ([]byte, error) { + // Ignore the error because scanMeasurement returns "missing fields" which we ignore + // when just parsing a key + state, i, _ := scanMeasurement(buf, 0) + if state == tagKeyState { + return buf[:i-1], nil + } + return buf[:i], nil +} + +// ParsePointsWithPrecision is similar to ParsePoints, but allows the +// caller to provide a precision for time. +// +// NOTE: to minimize heap allocations, the returned Points will refer to subslices of buf. +// This can have the unintended effect preventing buf from being garbage collected. +func ParsePointsWithPrecision(buf []byte, defaultTime time.Time, precision string) ([]Point, error) { + points := make([]Point, 0, bytes.Count(buf, []byte{'\n'})+1) + var ( + pos int + block []byte + failed []string + ) + for pos < len(buf) { + pos, block = scanLine(buf, pos) + pos++ + + if len(block) == 0 { + continue + } + + // lines which start with '#' are comments + start := skipWhitespace(block, 0) + + // If line is all whitespace, just skip it + if start >= len(block) { + continue + } + + if block[start] == '#' { + continue + } + + // strip the newline if one is present + if block[len(block)-1] == '\n' { + block = block[:len(block)-1] + } + + pt, err := parsePoint(block[start:], defaultTime, precision) + if err != nil { + failed = append(failed, fmt.Sprintf("unable to parse '%s': %v", string(block[start:]), err)) + } else { + points = append(points, pt) + } + + } + if len(failed) > 0 { + return points, fmt.Errorf("%s", strings.Join(failed, "\n")) + } + return points, nil + +} + +func parsePoint(buf []byte, defaultTime time.Time, precision string) (Point, error) { + // scan the first block which is measurement[,tag1=value1,tag2=value=2...] + pos, key, err := scanKey(buf, 0) + if err != nil { + return nil, err + } + + // measurement name is required + if len(key) == 0 { + return nil, fmt.Errorf("missing measurement") + } + + if len(key) > MaxKeyLength { + return nil, fmt.Errorf("max key length exceeded: %v > %v", len(key), MaxKeyLength) + } + + // scan the second block is which is field1=value1[,field2=value2,...] + pos, fields, err := scanFields(buf, pos) + if err != nil { + return nil, err + } + + // at least one field is required + if len(fields) == 0 { + return nil, fmt.Errorf("missing fields") + } + + var maxKeyErr error + walkFields(fields, func(k, v []byte) bool { + if sz := seriesKeySize(key, k); sz > MaxKeyLength { + maxKeyErr = fmt.Errorf("max key length exceeded: %v > %v", sz, MaxKeyLength) + return false + } + return true + }) + + if maxKeyErr != nil { + return nil, maxKeyErr + } + + // scan the last block which is an optional integer timestamp + pos, ts, err := scanTime(buf, pos) + if err != nil { + return nil, err + } + + pt := &point{ + key: key, + fields: fields, + ts: ts, + } + + if len(ts) == 0 { + pt.time = defaultTime + pt.SetPrecision(precision) + } else { + ts, err := parseIntBytes(ts, 10, 64) + if err != nil { + return nil, err + } + pt.time, err = SafeCalcTime(ts, precision) + if err != nil { + return nil, err + } + + // Determine if there are illegal non-whitespace characters after the + // timestamp block. + for pos < len(buf) { + if buf[pos] != ' ' { + return nil, ErrInvalidPoint + } + pos++ + } + } + return pt, nil +} + +// GetPrecisionMultiplier will return a multiplier for the precision specified. +func GetPrecisionMultiplier(precision string) int64 { + d := time.Nanosecond + switch precision { + case "u": + d = time.Microsecond + case "ms": + d = time.Millisecond + case "s": + d = time.Second + case "m": + d = time.Minute + case "h": + d = time.Hour + } + return int64(d) +} + +// scanKey scans buf starting at i for the measurement and tag portion of the point. +// It returns the ending position and the byte slice of key within buf. If there +// are tags, they will be sorted if they are not already. +func scanKey(buf []byte, i int) (int, []byte, error) { + start := skipWhitespace(buf, i) + + i = start + + // Determines whether the tags are sort, assume they are + sorted := true + + // indices holds the indexes within buf of the start of each tag. For example, + // a buf of 'cpu,host=a,region=b,zone=c' would have indices slice of [4,11,20] + // which indicates that the first tag starts at buf[4], seconds at buf[11], and + // last at buf[20] + indices := make([]int, 100) + + // tracks how many commas we've seen so we know how many values are indices. + // Since indices is an arbitrarily large slice, + // we need to know how many values in the buffer are in use. + commas := 0 + + // First scan the Point's measurement. + state, i, err := scanMeasurement(buf, i) + if err != nil { + return i, buf[start:i], err + } + + // Optionally scan tags if needed. + if state == tagKeyState { + i, commas, indices, err = scanTags(buf, i, indices) + if err != nil { + return i, buf[start:i], err + } + } + + // Now we know where the key region is within buf, and the location of tags, we + // need to determine if duplicate tags exist and if the tags are sorted. This iterates + // over the list comparing each tag in the sequence with each other. + for j := 0; j < commas-1; j++ { + // get the left and right tags + _, left := scanTo(buf[indices[j]:indices[j+1]-1], 0, '=') + _, right := scanTo(buf[indices[j+1]:indices[j+2]-1], 0, '=') + + // If left is greater than right, the tags are not sorted. We do not have to + // continue because the short path no longer works. + // If the tags are equal, then there are duplicate tags, and we should abort. + // If the tags are not sorted, this pass may not find duplicate tags and we + // need to do a more exhaustive search later. + if cmp := bytes.Compare(left, right); cmp > 0 { + sorted = false + break + } else if cmp == 0 { + return i, buf[start:i], fmt.Errorf("duplicate tags") + } + } + + // If the tags are not sorted, then sort them. This sort is inline and + // uses the tag indices we created earlier. The actual buffer is not sorted, the + // indices are using the buffer for value comparison. After the indices are sorted, + // the buffer is reconstructed from the sorted indices. + if !sorted && commas > 0 { + // Get the measurement name for later + measurement := buf[start : indices[0]-1] + + // Sort the indices + indices := indices[:commas] + insertionSort(0, commas, buf, indices) + + // Create a new key using the measurement and sorted indices + b := make([]byte, len(buf[start:i])) + pos := copy(b, measurement) + for _, i := range indices { + b[pos] = ',' + pos++ + _, v := scanToSpaceOr(buf, i, ',') + pos += copy(b[pos:], v) + } + + // Check again for duplicate tags now that the tags are sorted. + for j := 0; j < commas-1; j++ { + // get the left and right tags + _, left := scanTo(buf[indices[j]:], 0, '=') + _, right := scanTo(buf[indices[j+1]:], 0, '=') + + // If the tags are equal, then there are duplicate tags, and we should abort. + // If the tags are not sorted, this pass may not find duplicate tags and we + // need to do a more exhaustive search later. + if bytes.Equal(left, right) { + return i, b, fmt.Errorf("duplicate tags") + } + } + + return i, b, nil + } + + return i, buf[start:i], nil +} + +// The following constants allow us to specify which state to move to +// next, when scanning sections of a Point. +const ( + tagKeyState = iota + tagValueState + fieldsState +) + +// scanMeasurement examines the measurement part of a Point, returning +// the next state to move to, and the current location in the buffer. +func scanMeasurement(buf []byte, i int) (int, int, error) { + // Check first byte of measurement, anything except a comma is fine. + // It can't be a space, since whitespace is stripped prior to this + // function call. + if i >= len(buf) || buf[i] == ',' { + return -1, i, fmt.Errorf("missing measurement") + } + + for { + i++ + if i >= len(buf) { + // cpu + return -1, i, fmt.Errorf("missing fields") + } + + if buf[i-1] == '\\' { + // Skip character (it's escaped). + continue + } + + // Unescaped comma; move onto scanning the tags. + if buf[i] == ',' { + return tagKeyState, i + 1, nil + } + + // Unescaped space; move onto scanning the fields. + if buf[i] == ' ' { + // cpu value=1.0 + return fieldsState, i, nil + } + } +} + +// scanTags examines all the tags in a Point, keeping track of and +// returning the updated indices slice, number of commas and location +// in buf where to start examining the Point fields. +func scanTags(buf []byte, i int, indices []int) (int, int, []int, error) { + var ( + err error + commas int + state = tagKeyState + ) + + for { + switch state { + case tagKeyState: + // Grow our indices slice if we have too many tags. + if commas >= len(indices) { + newIndics := make([]int, cap(indices)*2) + copy(newIndics, indices) + indices = newIndics + } + indices[commas] = i + commas++ + + i, err = scanTagsKey(buf, i) + state = tagValueState // tag value always follows a tag key + case tagValueState: + state, i, err = scanTagsValue(buf, i) + case fieldsState: + indices[commas] = i + 1 + return i, commas, indices, nil + } + + if err != nil { + return i, commas, indices, err + } + } +} + +// scanTagsKey scans each character in a tag key. +func scanTagsKey(buf []byte, i int) (int, error) { + // First character of the key. + if i >= len(buf) || buf[i] == ' ' || buf[i] == ',' || buf[i] == '=' { + // cpu,{'', ' ', ',', '='} + return i, fmt.Errorf("missing tag key") + } + + // Examine each character in the tag key until we hit an unescaped + // equals (the tag value), or we hit an error (i.e., unescaped + // space or comma). + for { + i++ + + // Either we reached the end of the buffer or we hit an + // unescaped comma or space. + if i >= len(buf) || + ((buf[i] == ' ' || buf[i] == ',') && buf[i-1] != '\\') { + // cpu,tag{'', ' ', ','} + return i, fmt.Errorf("missing tag value") + } + + if buf[i] == '=' && buf[i-1] != '\\' { + // cpu,tag= + return i + 1, nil + } + } +} + +// scanTagsValue scans each character in a tag value. +func scanTagsValue(buf []byte, i int) (int, int, error) { + // Tag value cannot be empty. + if i >= len(buf) || buf[i] == ',' || buf[i] == ' ' { + // cpu,tag={',', ' '} + return -1, i, fmt.Errorf("missing tag value") + } + + // Examine each character in the tag value until we hit an unescaped + // comma (move onto next tag key), an unescaped space (move onto + // fields), or we error out. + for { + i++ + if i >= len(buf) { + // cpu,tag=value + return -1, i, fmt.Errorf("missing fields") + } + + // An unescaped equals sign is an invalid tag value. + if buf[i] == '=' && buf[i-1] != '\\' { + // cpu,tag={'=', 'fo=o'} + return -1, i, fmt.Errorf("invalid tag format") + } + + if buf[i] == ',' && buf[i-1] != '\\' { + // cpu,tag=foo, + return tagKeyState, i + 1, nil + } + + // cpu,tag=foo value=1.0 + // cpu, tag=foo\= value=1.0 + if buf[i] == ' ' && buf[i-1] != '\\' { + return fieldsState, i, nil + } + } +} + +func insertionSort(l, r int, buf []byte, indices []int) { + for i := l + 1; i < r; i++ { + for j := i; j > l && less(buf, indices, j, j-1); j-- { + indices[j], indices[j-1] = indices[j-1], indices[j] + } + } +} + +func less(buf []byte, indices []int, i, j int) bool { + // This grabs the tag names for i & j, it ignores the values + _, a := scanTo(buf, indices[i], '=') + _, b := scanTo(buf, indices[j], '=') + return bytes.Compare(a, b) < 0 +} + +// scanFields scans buf, starting at i for the fields section of a point. It returns +// the ending position and the byte slice of the fields within buf. +func scanFields(buf []byte, i int) (int, []byte, error) { + start := skipWhitespace(buf, i) + i = start + quoted := false + + // tracks how many '=' we've seen + equals := 0 + + // tracks how many commas we've seen + commas := 0 + + for { + // reached the end of buf? + if i >= len(buf) { + break + } + + // escaped characters? + if buf[i] == '\\' && i+1 < len(buf) { + i += 2 + continue + } + + // If the value is quoted, scan until we get to the end quote + // Only quote values in the field value since quotes are not significant + // in the field key + if buf[i] == '"' && equals > commas { + quoted = !quoted + i++ + continue + } + + // If we see an =, ensure that there is at least on char before and after it + if buf[i] == '=' && !quoted { + equals++ + + // check for "... =123" but allow "a\ =123" + if buf[i-1] == ' ' && buf[i-2] != '\\' { + return i, buf[start:i], fmt.Errorf("missing field key") + } + + // check for "...a=123,=456" but allow "a=123,a\,=456" + if buf[i-1] == ',' && buf[i-2] != '\\' { + return i, buf[start:i], fmt.Errorf("missing field key") + } + + // check for "... value=" + if i+1 >= len(buf) { + return i, buf[start:i], fmt.Errorf("missing field value") + } + + // check for "... value=,value2=..." + if buf[i+1] == ',' || buf[i+1] == ' ' { + return i, buf[start:i], fmt.Errorf("missing field value") + } + + if isNumeric(buf[i+1]) || buf[i+1] == '-' || buf[i+1] == 'N' || buf[i+1] == 'n' { + var err error + i, err = scanNumber(buf, i+1) + if err != nil { + return i, buf[start:i], err + } + continue + } + // If next byte is not a double-quote, the value must be a boolean + if buf[i+1] != '"' { + var err error + i, _, err = scanBoolean(buf, i+1) + if err != nil { + return i, buf[start:i], err + } + continue + } + } + + if buf[i] == ',' && !quoted { + commas++ + } + + // reached end of block? + if buf[i] == ' ' && !quoted { + break + } + i++ + } + + if quoted { + return i, buf[start:i], fmt.Errorf("unbalanced quotes") + } + + // check that all field sections had key and values (e.g. prevent "a=1,b" + if equals == 0 || commas != equals-1 { + return i, buf[start:i], fmt.Errorf("invalid field format") + } + + return i, buf[start:i], nil +} + +// scanTime scans buf, starting at i for the time section of a point. It +// returns the ending position and the byte slice of the timestamp within buf +// and and error if the timestamp is not in the correct numeric format. +func scanTime(buf []byte, i int) (int, []byte, error) { + start := skipWhitespace(buf, i) + i = start + + for { + // reached the end of buf? + if i >= len(buf) { + break + } + + // Reached end of block or trailing whitespace? + if buf[i] == '\n' || buf[i] == ' ' { + break + } + + // Handle negative timestamps + if i == start && buf[i] == '-' { + i++ + continue + } + + // Timestamps should be integers, make sure they are so we don't need + // to actually parse the timestamp until needed. + if buf[i] < '0' || buf[i] > '9' { + return i, buf[start:i], fmt.Errorf("bad timestamp") + } + i++ + } + return i, buf[start:i], nil +} + +func isNumeric(b byte) bool { + return (b >= '0' && b <= '9') || b == '.' +} + +// scanNumber returns the end position within buf, start at i after +// scanning over buf for an integer, or float. It returns an +// error if a invalid number is scanned. +func scanNumber(buf []byte, i int) (int, error) { + start := i + var isInt bool + + // Is negative number? + if i < len(buf) && buf[i] == '-' { + i++ + // There must be more characters now, as just '-' is illegal. + if i == len(buf) { + return i, ErrInvalidNumber + } + } + + // how many decimal points we've see + decimal := false + + // indicates the number is float in scientific notation + scientific := false + + for { + if i >= len(buf) { + break + } + + if buf[i] == ',' || buf[i] == ' ' { + break + } + + if buf[i] == 'i' && i > start && !isInt { + isInt = true + i++ + continue + } + + if buf[i] == '.' { + // Can't have more than 1 decimal (e.g. 1.1.1 should fail) + if decimal { + return i, ErrInvalidNumber + } + decimal = true + } + + // `e` is valid for floats but not as the first char + if i > start && (buf[i] == 'e' || buf[i] == 'E') { + scientific = true + i++ + continue + } + + // + and - are only valid at this point if they follow an e (scientific notation) + if (buf[i] == '+' || buf[i] == '-') && (buf[i-1] == 'e' || buf[i-1] == 'E') { + i++ + continue + } + + // NaN is an unsupported value + if i+2 < len(buf) && (buf[i] == 'N' || buf[i] == 'n') { + return i, ErrInvalidNumber + } + + if !isNumeric(buf[i]) { + return i, ErrInvalidNumber + } + i++ + } + + if isInt && (decimal || scientific) { + return i, ErrInvalidNumber + } + + numericDigits := i - start + if isInt { + numericDigits-- + } + if decimal { + numericDigits-- + } + if buf[start] == '-' { + numericDigits-- + } + + if numericDigits == 0 { + return i, ErrInvalidNumber + } + + // It's more common that numbers will be within min/max range for their type but we need to prevent + // out or range numbers from being parsed successfully. This uses some simple heuristics to decide + // if we should parse the number to the actual type. It does not do it all the time because it incurs + // extra allocations and we end up converting the type again when writing points to disk. + if isInt { + // Make sure the last char is an 'i' for integers (e.g. 9i10 is not valid) + if buf[i-1] != 'i' { + return i, ErrInvalidNumber + } + // Parse the int to check bounds the number of digits could be larger than the max range + // We subtract 1 from the index to remove the `i` from our tests + if len(buf[start:i-1]) >= maxInt64Digits || len(buf[start:i-1]) >= minInt64Digits { + if _, err := parseIntBytes(buf[start:i-1], 10, 64); err != nil { + return i, fmt.Errorf("unable to parse integer %s: %s", buf[start:i-1], err) + } + } + } else { + // Parse the float to check bounds if it's scientific or the number of digits could be larger than the max range + if scientific || len(buf[start:i]) >= maxFloat64Digits || len(buf[start:i]) >= minFloat64Digits { + if _, err := parseFloatBytes(buf[start:i], 10); err != nil { + return i, fmt.Errorf("invalid float") + } + } + } + + return i, nil +} + +// scanBoolean returns the end position within buf, start at i after +// scanning over buf for boolean. Valid values for a boolean are +// t, T, true, TRUE, f, F, false, FALSE. It returns an error if a invalid boolean +// is scanned. +func scanBoolean(buf []byte, i int) (int, []byte, error) { + start := i + + if i < len(buf) && (buf[i] != 't' && buf[i] != 'f' && buf[i] != 'T' && buf[i] != 'F') { + return i, buf[start:i], fmt.Errorf("invalid boolean") + } + + i++ + for { + if i >= len(buf) { + break + } + + if buf[i] == ',' || buf[i] == ' ' { + break + } + i++ + } + + // Single char bool (t, T, f, F) is ok + if i-start == 1 { + return i, buf[start:i], nil + } + + // length must be 4 for true or TRUE + if (buf[start] == 't' || buf[start] == 'T') && i-start != 4 { + return i, buf[start:i], fmt.Errorf("invalid boolean") + } + + // length must be 5 for false or FALSE + if (buf[start] == 'f' || buf[start] == 'F') && i-start != 5 { + return i, buf[start:i], fmt.Errorf("invalid boolean") + } + + // Otherwise + valid := false + switch buf[start] { + case 't': + valid = bytes.Equal(buf[start:i], []byte("true")) + case 'f': + valid = bytes.Equal(buf[start:i], []byte("false")) + case 'T': + valid = bytes.Equal(buf[start:i], []byte("TRUE")) || bytes.Equal(buf[start:i], []byte("True")) + case 'F': + valid = bytes.Equal(buf[start:i], []byte("FALSE")) || bytes.Equal(buf[start:i], []byte("False")) + } + + if !valid { + return i, buf[start:i], fmt.Errorf("invalid boolean") + } + + return i, buf[start:i], nil + +} + +// skipWhitespace returns the end position within buf, starting at i after +// scanning over spaces in tags. +func skipWhitespace(buf []byte, i int) int { + for i < len(buf) { + if buf[i] != ' ' && buf[i] != '\t' && buf[i] != 0 { + break + } + i++ + } + return i +} + +// scanLine returns the end position in buf and the next line found within +// buf. +func scanLine(buf []byte, i int) (int, []byte) { + start := i + quoted := false + fields := false + + // tracks how many '=' and commas we've seen + // this duplicates some of the functionality in scanFields + equals := 0 + commas := 0 + for { + // reached the end of buf? + if i >= len(buf) { + break + } + + // skip past escaped characters + if buf[i] == '\\' { + i += 2 + continue + } + + if buf[i] == ' ' { + fields = true + } + + // If we see a double quote, makes sure it is not escaped + if fields { + if !quoted && buf[i] == '=' { + i++ + equals++ + continue + } else if !quoted && buf[i] == ',' { + i++ + commas++ + continue + } else if buf[i] == '"' && equals > commas { + i++ + quoted = !quoted + continue + } + } + + if buf[i] == '\n' && !quoted { + break + } + + i++ + } + + return i, buf[start:i] +} + +// scanTo returns the end position in buf and the next consecutive block +// of bytes, starting from i and ending with stop byte, where stop byte +// has not been escaped. +// +// If there are leading spaces, they are skipped. +func scanTo(buf []byte, i int, stop byte) (int, []byte) { + start := i + for { + // reached the end of buf? + if i >= len(buf) { + break + } + + // Reached unescaped stop value? + if buf[i] == stop && (i == 0 || buf[i-1] != '\\') { + break + } + i++ + } + + return i, buf[start:i] +} + +// scanTo returns the end position in buf and the next consecutive block +// of bytes, starting from i and ending with stop byte. If there are leading +// spaces, they are skipped. +func scanToSpaceOr(buf []byte, i int, stop byte) (int, []byte) { + start := i + if buf[i] == stop || buf[i] == ' ' { + return i, buf[start:i] + } + + for { + i++ + if buf[i-1] == '\\' { + continue + } + + // reached the end of buf? + if i >= len(buf) { + return i, buf[start:i] + } + + // reached end of block? + if buf[i] == stop || buf[i] == ' ' { + return i, buf[start:i] + } + } +} + +func scanTagValue(buf []byte, i int) (int, []byte) { + start := i + for { + if i >= len(buf) { + break + } + + if buf[i] == ',' && buf[i-1] != '\\' { + break + } + i++ + } + if i > len(buf) { + return i, nil + } + return i, buf[start:i] +} + +func scanFieldValue(buf []byte, i int) (int, []byte) { + start := i + quoted := false + for i < len(buf) { + // Only escape char for a field value is a double-quote and backslash + if buf[i] == '\\' && i+1 < len(buf) && (buf[i+1] == '"' || buf[i+1] == '\\') { + i += 2 + continue + } + + // Quoted value? (e.g. string) + if buf[i] == '"' { + i++ + quoted = !quoted + continue + } + + if buf[i] == ',' && !quoted { + break + } + i++ + } + return i, buf[start:i] +} + +func escapeMeasurement(in []byte) []byte { + for b, esc := range measurementEscapeCodes { + in = bytes.Replace(in, []byte{b}, esc, -1) + } + return in +} + +func unescapeMeasurement(in []byte) []byte { + for b, esc := range measurementEscapeCodes { + in = bytes.Replace(in, esc, []byte{b}, -1) + } + return in +} + +func escapeTag(in []byte) []byte { + for b, esc := range tagEscapeCodes { + if bytes.IndexByte(in, b) != -1 { + in = bytes.Replace(in, []byte{b}, esc, -1) + } + } + return in +} + +func unescapeTag(in []byte) []byte { + if bytes.IndexByte(in, '\\') == -1 { + return in + } + + for b, esc := range tagEscapeCodes { + if bytes.IndexByte(in, b) != -1 { + in = bytes.Replace(in, esc, []byte{b}, -1) + } + } + return in +} + +// escapeStringFieldReplacer replaces double quotes and backslashes +// with the same character preceded by a backslash. +// As of Go 1.7 this benchmarked better in allocations and CPU time +// compared to iterating through a string byte-by-byte and appending to a new byte slice, +// calling strings.Replace twice, and better than (*Regex).ReplaceAllString. +var escapeStringFieldReplacer = strings.NewReplacer(`"`, `\"`, `\`, `\\`) + +// EscapeStringField returns a copy of in with any double quotes or +// backslashes with escaped values. +func EscapeStringField(in string) string { + return escapeStringFieldReplacer.Replace(in) +} + +// unescapeStringField returns a copy of in with any escaped double-quotes +// or backslashes unescaped. +func unescapeStringField(in string) string { + if strings.IndexByte(in, '\\') == -1 { + return in + } + + var out []byte + i := 0 + for { + if i >= len(in) { + break + } + // unescape backslashes + if in[i] == '\\' && i+1 < len(in) && in[i+1] == '\\' { + out = append(out, '\\') + i += 2 + continue + } + // unescape double-quotes + if in[i] == '\\' && i+1 < len(in) && in[i+1] == '"' { + out = append(out, '"') + i += 2 + continue + } + out = append(out, in[i]) + i++ + + } + return string(out) +} + +// NewPoint returns a new point with the given measurement name, tags, fields and timestamp. If +// an unsupported field value (NaN) or out of range time is passed, this function returns an error. +func NewPoint(name string, tags Tags, fields Fields, t time.Time) (Point, error) { + key, err := pointKey(name, tags, fields, t) + if err != nil { + return nil, err + } + + return &point{ + key: key, + time: t, + fields: fields.MarshalBinary(), + }, nil +} + +// pointKey checks some basic requirements for valid points, and returns the +// key, along with an possible error. +func pointKey(measurement string, tags Tags, fields Fields, t time.Time) ([]byte, error) { + if len(fields) == 0 { + return nil, ErrPointMustHaveAField + } + + if !t.IsZero() { + if err := CheckTime(t); err != nil { + return nil, err + } + } + + for key, value := range fields { + switch value := value.(type) { + case float64: + // Ensure the caller validates and handles invalid field values + if math.IsNaN(value) { + return nil, fmt.Errorf("NaN is an unsupported value for field %s", key) + } + case float32: + // Ensure the caller validates and handles invalid field values + if math.IsNaN(float64(value)) { + return nil, fmt.Errorf("NaN is an unsupported value for field %s", key) + } + } + if len(key) == 0 { + return nil, fmt.Errorf("all fields must have non-empty names") + } + } + + key := MakeKey([]byte(measurement), tags) + for field := range fields { + sz := seriesKeySize(key, []byte(field)) + if sz > MaxKeyLength { + return nil, fmt.Errorf("max key length exceeded: %v > %v", sz, MaxKeyLength) + } + } + + return key, nil +} + +func seriesKeySize(key, field []byte) int { + // 4 is the length of the tsm1.fieldKeySeparator constant. It's inlined here to avoid a circular + // dependency. + return len(key) + 4 + len(field) +} + +// NewPointFromBytes returns a new Point from a marshalled Point. +func NewPointFromBytes(b []byte) (Point, error) { + p := &point{} + if err := p.UnmarshalBinary(b); err != nil { + return nil, err + } + + // This does some basic validation to ensure there are fields and they + // can be unmarshalled as well. + iter := p.FieldIterator() + var hasField bool + for iter.Next() { + if len(iter.FieldKey()) == 0 { + continue + } + hasField = true + switch iter.Type() { + case Float: + _, err := iter.FloatValue() + if err != nil { + return nil, fmt.Errorf("unable to unmarshal field %s: %s", string(iter.FieldKey()), err) + } + case Integer: + _, err := iter.IntegerValue() + if err != nil { + return nil, fmt.Errorf("unable to unmarshal field %s: %s", string(iter.FieldKey()), err) + } + case String: + // Skip since this won't return an error + case Boolean: + _, err := iter.BooleanValue() + if err != nil { + return nil, fmt.Errorf("unable to unmarshal field %s: %s", string(iter.FieldKey()), err) + } + } + } + + if !hasField { + return nil, ErrPointMustHaveAField + } + + return p, nil +} + +// MustNewPoint returns a new point with the given measurement name, tags, fields and timestamp. If +// an unsupported field value (NaN) is passed, this function panics. +func MustNewPoint(name string, tags Tags, fields Fields, time time.Time) Point { + pt, err := NewPoint(name, tags, fields, time) + if err != nil { + panic(err.Error()) + } + return pt +} + +// Key returns the key (measurement joined with tags) of the point. +func (p *point) Key() []byte { + return p.key +} + +func (p *point) name() []byte { + _, name := scanTo(p.key, 0, ',') + return name +} + +func (p *point) Name() []byte { + return escape.Unescape(p.name()) +} + +// SetName updates the measurement name for the point. +func (p *point) SetName(name string) { + p.cachedName = "" + p.key = MakeKey([]byte(name), p.Tags()) +} + +// Time return the timestamp for the point. +func (p *point) Time() time.Time { + return p.time +} + +// SetTime updates the timestamp for the point. +func (p *point) SetTime(t time.Time) { + p.time = t +} + +// Round will round the timestamp of the point to the given duration. +func (p *point) Round(d time.Duration) { + p.time = p.time.Round(d) +} + +// Tags returns the tag set for the point. +func (p *point) Tags() Tags { + if p.cachedTags != nil { + return p.cachedTags + } + p.cachedTags = parseTags(p.key) + return p.cachedTags +} + +func (p *point) HasTag(tag []byte) bool { + if len(p.key) == 0 { + return false + } + + var exists bool + walkTags(p.key, func(key, value []byte) bool { + if bytes.Equal(tag, key) { + exists = true + return false + } + return true + }) + + return exists +} + +func walkTags(buf []byte, fn func(key, value []byte) bool) { + if len(buf) == 0 { + return + } + + pos, name := scanTo(buf, 0, ',') + + // it's an empty key, so there are no tags + if len(name) == 0 { + return + } + + hasEscape := bytes.IndexByte(buf, '\\') != -1 + i := pos + 1 + var key, value []byte + for { + if i >= len(buf) { + break + } + i, key = scanTo(buf, i, '=') + i, value = scanTagValue(buf, i+1) + + if len(value) == 0 { + continue + } + + if hasEscape { + if !fn(unescapeTag(key), unescapeTag(value)) { + return + } + } else { + if !fn(key, value) { + return + } + } + + i++ + } +} + +// walkFields walks each field key and value via fn. If fn returns false, the iteration +// is stopped. The values are the raw byte slices and not the converted types. +func walkFields(buf []byte, fn func(key, value []byte) bool) { + var i int + var key, val []byte + for len(buf) > 0 { + i, key = scanTo(buf, 0, '=') + buf = buf[i+1:] + i, val = scanFieldValue(buf, 0) + buf = buf[i:] + if !fn(key, val) { + break + } + + // slice off comma + if len(buf) > 0 { + buf = buf[1:] + } + } +} + +func parseTags(buf []byte) Tags { + if len(buf) == 0 { + return nil + } + + tags := make(Tags, 0, bytes.Count(buf, []byte(","))) + walkTags(buf, func(key, value []byte) bool { + tags = append(tags, NewTag(key, value)) + return true + }) + return tags +} + +// MakeKey creates a key for a set of tags. +func MakeKey(name []byte, tags Tags) []byte { + // unescape the name and then re-escape it to avoid double escaping. + // The key should always be stored in escaped form. + return append(escapeMeasurement(unescapeMeasurement(name)), tags.HashKey()...) +} + +// SetTags replaces the tags for the point. +func (p *point) SetTags(tags Tags) { + p.key = MakeKey(p.Name(), tags) + p.cachedTags = tags +} + +// AddTag adds or replaces a tag value for a point. +func (p *point) AddTag(key, value string) { + tags := p.Tags() + tags = append(tags, Tag{Key: []byte(key), Value: []byte(value)}) + sort.Sort(tags) + p.cachedTags = tags + p.key = MakeKey(p.Name(), tags) +} + +// Fields returns the fields for the point. +func (p *point) Fields() (Fields, error) { + if p.cachedFields != nil { + return p.cachedFields, nil + } + cf, err := p.unmarshalBinary() + if err != nil { + return nil, err + } + p.cachedFields = cf + return p.cachedFields, nil +} + +// SetPrecision will round a time to the specified precision. +func (p *point) SetPrecision(precision string) { + switch precision { + case "n": + case "u": + p.SetTime(p.Time().Truncate(time.Microsecond)) + case "ms": + p.SetTime(p.Time().Truncate(time.Millisecond)) + case "s": + p.SetTime(p.Time().Truncate(time.Second)) + case "m": + p.SetTime(p.Time().Truncate(time.Minute)) + case "h": + p.SetTime(p.Time().Truncate(time.Hour)) + } +} + +// String returns the string representation of the point. +func (p *point) String() string { + if p.Time().IsZero() { + return string(p.Key()) + " " + string(p.fields) + } + return string(p.Key()) + " " + string(p.fields) + " " + strconv.FormatInt(p.UnixNano(), 10) +} + +// AppendString appends the string representation of the point to buf. +func (p *point) AppendString(buf []byte) []byte { + buf = append(buf, p.key...) + buf = append(buf, ' ') + buf = append(buf, p.fields...) + + if !p.time.IsZero() { + buf = append(buf, ' ') + buf = strconv.AppendInt(buf, p.UnixNano(), 10) + } + + return buf +} + +// StringSize returns the length of the string that would be returned by String(). +func (p *point) StringSize() int { + size := len(p.key) + len(p.fields) + 1 + + if !p.time.IsZero() { + digits := 1 // even "0" has one digit + t := p.UnixNano() + if t < 0 { + // account for negative sign, then negate + digits++ + t = -t + } + for t > 9 { // already accounted for one digit + digits++ + t /= 10 + } + size += digits + 1 // digits and a space + } + + return size +} + +// MarshalBinary returns a binary representation of the point. +func (p *point) MarshalBinary() ([]byte, error) { + if len(p.fields) == 0 { + return nil, ErrPointMustHaveAField + } + + tb, err := p.time.MarshalBinary() + if err != nil { + return nil, err + } + + b := make([]byte, 8+len(p.key)+len(p.fields)+len(tb)) + i := 0 + + binary.BigEndian.PutUint32(b[i:], uint32(len(p.key))) + i += 4 + + i += copy(b[i:], p.key) + + binary.BigEndian.PutUint32(b[i:i+4], uint32(len(p.fields))) + i += 4 + + i += copy(b[i:], p.fields) + + copy(b[i:], tb) + return b, nil +} + +// UnmarshalBinary decodes a binary representation of the point into a point struct. +func (p *point) UnmarshalBinary(b []byte) error { + var n int + + // Read key length. + if len(b) < 4 { + return io.ErrShortBuffer + } + n, b = int(binary.BigEndian.Uint32(b[:4])), b[4:] + + // Read key. + if len(b) < n { + return io.ErrShortBuffer + } + p.key, b = b[:n], b[n:] + + // Read fields length. + if len(b) < 4 { + return io.ErrShortBuffer + } + n, b = int(binary.BigEndian.Uint32(b[:4])), b[4:] + + // Read fields. + if len(b) < n { + return io.ErrShortBuffer + } + p.fields, b = b[:n], b[n:] + + // Read timestamp. + if err := p.time.UnmarshalBinary(b); err != nil { + return err + } + return nil +} + +// PrecisionString returns a string representation of the point. If there +// is a timestamp associated with the point then it will be specified in the +// given unit. +func (p *point) PrecisionString(precision string) string { + if p.Time().IsZero() { + return fmt.Sprintf("%s %s", p.Key(), string(p.fields)) + } + return fmt.Sprintf("%s %s %d", p.Key(), string(p.fields), + p.UnixNano()/GetPrecisionMultiplier(precision)) +} + +// RoundedString returns a string representation of the point. If there +// is a timestamp associated with the point, then it will be rounded to the +// given duration. +func (p *point) RoundedString(d time.Duration) string { + if p.Time().IsZero() { + return fmt.Sprintf("%s %s", p.Key(), string(p.fields)) + } + return fmt.Sprintf("%s %s %d", p.Key(), string(p.fields), + p.time.Round(d).UnixNano()) +} + +func (p *point) unmarshalBinary() (Fields, error) { + iter := p.FieldIterator() + fields := make(Fields, 8) + for iter.Next() { + if len(iter.FieldKey()) == 0 { + continue + } + switch iter.Type() { + case Float: + v, err := iter.FloatValue() + if err != nil { + return nil, fmt.Errorf("unable to unmarshal field %s: %s", string(iter.FieldKey()), err) + } + fields[string(iter.FieldKey())] = v + case Integer: + v, err := iter.IntegerValue() + if err != nil { + return nil, fmt.Errorf("unable to unmarshal field %s: %s", string(iter.FieldKey()), err) + } + fields[string(iter.FieldKey())] = v + case String: + fields[string(iter.FieldKey())] = iter.StringValue() + case Boolean: + v, err := iter.BooleanValue() + if err != nil { + return nil, fmt.Errorf("unable to unmarshal field %s: %s", string(iter.FieldKey()), err) + } + fields[string(iter.FieldKey())] = v + } + } + return fields, nil +} + +// HashID returns a non-cryptographic checksum of the point's key. +func (p *point) HashID() uint64 { + h := NewInlineFNV64a() + h.Write(p.key) + sum := h.Sum64() + return sum +} + +// UnixNano returns the timestamp of the point as nanoseconds since Unix epoch. +func (p *point) UnixNano() int64 { + return p.Time().UnixNano() +} + +// Split will attempt to return multiple points with the same timestamp whose +// string representations are no longer than size. Points with a single field or +// a point without a timestamp may exceed the requested size. +func (p *point) Split(size int) []Point { + if p.time.IsZero() || len(p.String()) <= size { + return []Point{p} + } + + // key string, timestamp string, spaces + size -= len(p.key) + len(strconv.FormatInt(p.time.UnixNano(), 10)) + 2 + + var points []Point + var start, cur int + + for cur < len(p.fields) { + end, _ := scanTo(p.fields, cur, '=') + end, _ = scanFieldValue(p.fields, end+1) + + if cur > start && end-start > size { + points = append(points, &point{ + key: p.key, + time: p.time, + fields: p.fields[start : cur-1], + }) + start = cur + } + + cur = end + 1 + } + + points = append(points, &point{ + key: p.key, + time: p.time, + fields: p.fields[start:], + }) + + return points +} + +// Tag represents a single key/value tag pair. +type Tag struct { + Key []byte + Value []byte +} + +// NewTag returns a new Tag. +func NewTag(key, value []byte) Tag { + return Tag{ + Key: key, + Value: value, + } +} + +// Size returns the size of the key and value. +func (t Tag) Size() int { return len(t.Key) + len(t.Value) } + +// Clone returns a shallow copy of Tag. +// +// Tags associated with a Point created by ParsePointsWithPrecision will hold references to the byte slice that was parsed. +// Use Clone to create a Tag with new byte slices that do not refer to the argument to ParsePointsWithPrecision. +func (t Tag) Clone() Tag { + other := Tag{ + Key: make([]byte, len(t.Key)), + Value: make([]byte, len(t.Value)), + } + + copy(other.Key, t.Key) + copy(other.Value, t.Value) + + return other +} + +// String returns the string reprsentation of the tag. +func (t *Tag) String() string { + var buf bytes.Buffer + buf.WriteByte('{') + buf.WriteString(string(t.Key)) + buf.WriteByte(' ') + buf.WriteString(string(t.Value)) + buf.WriteByte('}') + return buf.String() +} + +// Tags represents a sorted list of tags. +type Tags []Tag + +// NewTags returns a new Tags from a map. +func NewTags(m map[string]string) Tags { + if len(m) == 0 { + return nil + } + a := make(Tags, 0, len(m)) + for k, v := range m { + a = append(a, NewTag([]byte(k), []byte(v))) + } + sort.Sort(a) + return a +} + +// String returns the string representation of the tags. +func (a Tags) String() string { + var buf bytes.Buffer + buf.WriteByte('[') + for i := range a { + buf.WriteString(a[i].String()) + if i < len(a)-1 { + buf.WriteByte(' ') + } + } + buf.WriteByte(']') + return buf.String() +} + +// Size returns the number of bytes needed to store all tags. Note, this is +// the number of bytes needed to store all keys and values and does not account +// for data structures or delimiters for example. +func (a Tags) Size() int { + var total int + for _, t := range a { + total += t.Size() + } + return total +} + +// Clone returns a copy of the slice where the elements are a result of calling `Clone` on the original elements +// +// Tags associated with a Point created by ParsePointsWithPrecision will hold references to the byte slice that was parsed. +// Use Clone to create Tags with new byte slices that do not refer to the argument to ParsePointsWithPrecision. +func (a Tags) Clone() Tags { + if len(a) == 0 { + return nil + } + + others := make(Tags, len(a)) + for i := range a { + others[i] = a[i].Clone() + } + + return others +} + +func (a Tags) Len() int { return len(a) } +func (a Tags) Less(i, j int) bool { return bytes.Compare(a[i].Key, a[j].Key) == -1 } +func (a Tags) Swap(i, j int) { a[i], a[j] = a[j], a[i] } + +// Equal returns true if a equals other. +func (a Tags) Equal(other Tags) bool { + if len(a) != len(other) { + return false + } + for i := range a { + if !bytes.Equal(a[i].Key, other[i].Key) || !bytes.Equal(a[i].Value, other[i].Value) { + return false + } + } + return true +} + +// CompareTags returns -1 if a < b, 1 if a > b, and 0 if a == b. +func CompareTags(a, b Tags) int { + // Compare each key & value until a mismatch. + for i := 0; i < len(a) && i < len(b); i++ { + if cmp := bytes.Compare(a[i].Key, b[i].Key); cmp != 0 { + return cmp + } + if cmp := bytes.Compare(a[i].Value, b[i].Value); cmp != 0 { + return cmp + } + } + + // If all tags are equal up to this point then return shorter tagset. + if len(a) < len(b) { + return -1 + } else if len(a) > len(b) { + return 1 + } + + // All tags are equal. + return 0 +} + +// Get returns the value for a key. +func (a Tags) Get(key []byte) []byte { + // OPTIMIZE: Use sort.Search if tagset is large. + + for _, t := range a { + if bytes.Equal(t.Key, key) { + return t.Value + } + } + return nil +} + +// GetString returns the string value for a string key. +func (a Tags) GetString(key string) string { + return string(a.Get([]byte(key))) +} + +// Set sets the value for a key. +func (a *Tags) Set(key, value []byte) { + for i, t := range *a { + if bytes.Equal(t.Key, key) { + (*a)[i].Value = value + return + } + } + *a = append(*a, Tag{Key: key, Value: value}) + sort.Sort(*a) +} + +// SetString sets the string value for a string key. +func (a *Tags) SetString(key, value string) { + a.Set([]byte(key), []byte(value)) +} + +// Delete removes a tag by key. +func (a *Tags) Delete(key []byte) { + for i, t := range *a { + if bytes.Equal(t.Key, key) { + copy((*a)[i:], (*a)[i+1:]) + (*a)[len(*a)-1] = Tag{} + *a = (*a)[:len(*a)-1] + return + } + } +} + +// Map returns a map representation of the tags. +func (a Tags) Map() map[string]string { + m := make(map[string]string, len(a)) + for _, t := range a { + m[string(t.Key)] = string(t.Value) + } + return m +} + +// Merge merges the tags combining the two. If both define a tag with the +// same key, the merged value overwrites the old value. +// A new map is returned. +func (a Tags) Merge(other map[string]string) Tags { + merged := make(map[string]string, len(a)+len(other)) + for _, t := range a { + merged[string(t.Key)] = string(t.Value) + } + for k, v := range other { + merged[k] = v + } + return NewTags(merged) +} + +// HashKey hashes all of a tag's keys. +func (a Tags) HashKey() []byte { + // Empty maps marshal to empty bytes. + if len(a) == 0 { + return nil + } + + // Type invariant: Tags are sorted + + escaped := make(Tags, 0, len(a)) + sz := 0 + for _, t := range a { + ek := escapeTag(t.Key) + ev := escapeTag(t.Value) + + if len(ev) > 0 { + escaped = append(escaped, Tag{Key: ek, Value: ev}) + sz += len(ek) + len(ev) + } + } + + sz += len(escaped) + (len(escaped) * 2) // separators + + // Generate marshaled bytes. + b := make([]byte, sz) + buf := b + idx := 0 + for _, k := range escaped { + buf[idx] = ',' + idx++ + copy(buf[idx:idx+len(k.Key)], k.Key) + idx += len(k.Key) + buf[idx] = '=' + idx++ + copy(buf[idx:idx+len(k.Value)], k.Value) + idx += len(k.Value) + } + return b[:idx] +} + +// CopyTags returns a shallow copy of tags. +func CopyTags(a Tags) Tags { + other := make(Tags, len(a)) + copy(other, a) + return other +} + +// DeepCopyTags returns a deep copy of tags. +func DeepCopyTags(a Tags) Tags { + // Calculate size of keys/values in bytes. + var n int + for _, t := range a { + n += len(t.Key) + len(t.Value) + } + + // Build single allocation for all key/values. + buf := make([]byte, n) + + // Copy tags to new set. + other := make(Tags, len(a)) + for i, t := range a { + copy(buf, t.Key) + other[i].Key, buf = buf[:len(t.Key)], buf[len(t.Key):] + + copy(buf, t.Value) + other[i].Value, buf = buf[:len(t.Value)], buf[len(t.Value):] + } + + return other +} + +// Fields represents a mapping between a Point's field names and their +// values. +type Fields map[string]interface{} + +// FieldIterator retuns a FieldIterator that can be used to traverse the +// fields of a point without constructing the in-memory map. +func (p *point) FieldIterator() FieldIterator { + p.Reset() + return p +} + +type fieldIterator struct { + start, end int + key, keybuf []byte + valueBuf []byte + fieldType FieldType +} + +// Next indicates whether there any fields remaining. +func (p *point) Next() bool { + p.it.start = p.it.end + if p.it.start >= len(p.fields) { + return false + } + + p.it.end, p.it.key = scanTo(p.fields, p.it.start, '=') + if escape.IsEscaped(p.it.key) { + p.it.keybuf = escape.AppendUnescaped(p.it.keybuf[:0], p.it.key) + p.it.key = p.it.keybuf + } + + p.it.end, p.it.valueBuf = scanFieldValue(p.fields, p.it.end+1) + p.it.end++ + + if len(p.it.valueBuf) == 0 { + p.it.fieldType = Empty + return true + } + + c := p.it.valueBuf[0] + + if c == '"' { + p.it.fieldType = String + return true + } + + if strings.IndexByte(`0123456789-.nNiI`, c) >= 0 { + if p.it.valueBuf[len(p.it.valueBuf)-1] == 'i' { + p.it.fieldType = Integer + p.it.valueBuf = p.it.valueBuf[:len(p.it.valueBuf)-1] + } else { + p.it.fieldType = Float + } + return true + } + + // to keep the same behavior that currently exists, default to boolean + p.it.fieldType = Boolean + return true +} + +// FieldKey returns the key of the current field. +func (p *point) FieldKey() []byte { + return p.it.key +} + +// Type returns the FieldType of the current field. +func (p *point) Type() FieldType { + return p.it.fieldType +} + +// StringValue returns the string value of the current field. +func (p *point) StringValue() string { + return unescapeStringField(string(p.it.valueBuf[1 : len(p.it.valueBuf)-1])) +} + +// IntegerValue returns the integer value of the current field. +func (p *point) IntegerValue() (int64, error) { + n, err := parseIntBytes(p.it.valueBuf, 10, 64) + if err != nil { + return 0, fmt.Errorf("unable to parse integer value %q: %v", p.it.valueBuf, err) + } + return n, nil +} + +// BooleanValue returns the boolean value of the current field. +func (p *point) BooleanValue() (bool, error) { + b, err := parseBoolBytes(p.it.valueBuf) + if err != nil { + return false, fmt.Errorf("unable to parse bool value %q: %v", p.it.valueBuf, err) + } + return b, nil +} + +// FloatValue returns the float value of the current field. +func (p *point) FloatValue() (float64, error) { + f, err := parseFloatBytes(p.it.valueBuf, 64) + if err != nil { + return 0, fmt.Errorf("unable to parse floating point value %q: %v", p.it.valueBuf, err) + } + return f, nil +} + +// Reset resets the iterator to its initial state. +func (p *point) Reset() { + p.it.fieldType = Empty + p.it.key = nil + p.it.valueBuf = nil + p.it.start = 0 + p.it.end = 0 +} + +// MarshalBinary encodes all the fields to their proper type and returns the binary +// represenation +// NOTE: uint64 is specifically not supported due to potential overflow when we decode +// again later to an int64 +// NOTE2: uint is accepted, and may be 64 bits, and is for some reason accepted... +func (p Fields) MarshalBinary() []byte { + var b []byte + keys := make([]string, 0, len(p)) + + for k := range p { + keys = append(keys, k) + } + + // Not really necessary, can probably be removed. + sort.Strings(keys) + + for i, k := range keys { + if i > 0 { + b = append(b, ',') + } + b = appendField(b, k, p[k]) + } + + return b +} + +func appendField(b []byte, k string, v interface{}) []byte { + b = append(b, []byte(escape.String(k))...) + b = append(b, '=') + + // check popular types first + switch v := v.(type) { + case float64: + b = strconv.AppendFloat(b, v, 'f', -1, 64) + case int64: + b = strconv.AppendInt(b, v, 10) + b = append(b, 'i') + case string: + b = append(b, '"') + b = append(b, []byte(EscapeStringField(v))...) + b = append(b, '"') + case bool: + b = strconv.AppendBool(b, v) + case int32: + b = strconv.AppendInt(b, int64(v), 10) + b = append(b, 'i') + case int16: + b = strconv.AppendInt(b, int64(v), 10) + b = append(b, 'i') + case int8: + b = strconv.AppendInt(b, int64(v), 10) + b = append(b, 'i') + case int: + b = strconv.AppendInt(b, int64(v), 10) + b = append(b, 'i') + case uint32: + b = strconv.AppendInt(b, int64(v), 10) + b = append(b, 'i') + case uint16: + b = strconv.AppendInt(b, int64(v), 10) + b = append(b, 'i') + case uint8: + b = strconv.AppendInt(b, int64(v), 10) + b = append(b, 'i') + // TODO: 'uint' should be considered just as "dangerous" as a uint64, + // perhaps the value should be checked and capped at MaxInt64? We could + // then include uint64 as an accepted value + case uint: + b = strconv.AppendInt(b, int64(v), 10) + b = append(b, 'i') + case float32: + b = strconv.AppendFloat(b, float64(v), 'f', -1, 32) + case []byte: + b = append(b, v...) + case nil: + // skip + default: + // Can't determine the type, so convert to string + b = append(b, '"') + b = append(b, []byte(EscapeStringField(fmt.Sprintf("%v", v)))...) + b = append(b, '"') + + } + + return b +} + +type byteSlices [][]byte + +func (a byteSlices) Len() int { return len(a) } +func (a byteSlices) Less(i, j int) bool { return bytes.Compare(a[i], a[j]) == -1 } +func (a byteSlices) Swap(i, j int) { a[i], a[j] = a[j], a[i] } diff --git a/vendor/github.com/influxdata/influxdb/models/rows.go b/vendor/github.com/influxdata/influxdb/models/rows.go new file mode 100644 index 000000000..c087a4882 --- /dev/null +++ b/vendor/github.com/influxdata/influxdb/models/rows.go @@ -0,0 +1,62 @@ +package models + +import ( + "sort" +) + +// Row represents a single row returned from the execution of a statement. +type Row struct { + Name string `json:"name,omitempty"` + Tags map[string]string `json:"tags,omitempty"` + Columns []string `json:"columns,omitempty"` + Values [][]interface{} `json:"values,omitempty"` + Partial bool `json:"partial,omitempty"` +} + +// SameSeries returns true if r contains values for the same series as o. +func (r *Row) SameSeries(o *Row) bool { + return r.tagsHash() == o.tagsHash() && r.Name == o.Name +} + +// tagsHash returns a hash of tag key/value pairs. +func (r *Row) tagsHash() uint64 { + h := NewInlineFNV64a() + keys := r.tagsKeys() + for _, k := range keys { + h.Write([]byte(k)) + h.Write([]byte(r.Tags[k])) + } + return h.Sum64() +} + +// tagKeys returns a sorted list of tag keys. +func (r *Row) tagsKeys() []string { + a := make([]string, 0, len(r.Tags)) + for k := range r.Tags { + a = append(a, k) + } + sort.Strings(a) + return a +} + +// Rows represents a collection of rows. Rows implements sort.Interface. +type Rows []*Row + +// Len implements sort.Interface. +func (p Rows) Len() int { return len(p) } + +// Less implements sort.Interface. +func (p Rows) Less(i, j int) bool { + // Sort by name first. + if p[i].Name != p[j].Name { + return p[i].Name < p[j].Name + } + + // Sort by tag set hash. Tags don't have a meaningful sort order so we + // just compute a hash and sort by that instead. This allows the tests + // to receive rows in a predictable order every time. + return p[i].tagsHash() < p[j].tagsHash() +} + +// Swap implements sort.Interface. +func (p Rows) Swap(i, j int) { p[i], p[j] = p[j], p[i] } diff --git a/vendor/github.com/influxdata/influxdb/models/statistic.go b/vendor/github.com/influxdata/influxdb/models/statistic.go new file mode 100644 index 000000000..553e9d09f --- /dev/null +++ b/vendor/github.com/influxdata/influxdb/models/statistic.go @@ -0,0 +1,42 @@ +package models + +// Statistic is the representation of a statistic used by the monitoring service. +type Statistic struct { + Name string `json:"name"` + Tags map[string]string `json:"tags"` + Values map[string]interface{} `json:"values"` +} + +// NewStatistic returns an initialized Statistic. +func NewStatistic(name string) Statistic { + return Statistic{ + Name: name, + Tags: make(map[string]string), + Values: make(map[string]interface{}), + } +} + +// StatisticTags is a map that can be merged with others without causing +// mutations to either map. +type StatisticTags map[string]string + +// Merge creates a new map containing the merged contents of tags and t. +// If both tags and the receiver map contain the same key, the value in tags +// is used in the resulting map. +// +// Merge always returns a usable map. +func (t StatisticTags) Merge(tags map[string]string) map[string]string { + // Add everything in tags to the result. + out := make(map[string]string, len(tags)) + for k, v := range tags { + out[k] = v + } + + // Only add values from t that don't appear in tags. + for k, v := range t { + if _, ok := tags[k]; !ok { + out[k] = v + } + } + return out +} diff --git a/vendor/github.com/influxdata/influxdb/models/time.go b/vendor/github.com/influxdata/influxdb/models/time.go new file mode 100644 index 000000000..e98f2cb33 --- /dev/null +++ b/vendor/github.com/influxdata/influxdb/models/time.go @@ -0,0 +1,74 @@ +package models + +// Helper time methods since parsing time can easily overflow and we only support a +// specific time range. + +import ( + "fmt" + "math" + "time" +) + +const ( + // MinNanoTime is the minumum time that can be represented. + // + // 1677-09-21 00:12:43.145224194 +0000 UTC + // + // The two lowest minimum integers are used as sentinel values. The + // minimum value needs to be used as a value lower than any other value for + // comparisons and another separate value is needed to act as a sentinel + // default value that is unusable by the user, but usable internally. + // Because these two values need to be used for a special purpose, we do + // not allow users to write points at these two times. + MinNanoTime = int64(math.MinInt64) + 2 + + // MaxNanoTime is the maximum time that can be represented. + // + // 2262-04-11 23:47:16.854775806 +0000 UTC + // + // The highest time represented by a nanosecond needs to be used for an + // exclusive range in the shard group, so the maximum time needs to be one + // less than the possible maximum number of nanoseconds representable by an + // int64 so that we don't lose a point at that one time. + MaxNanoTime = int64(math.MaxInt64) - 1 +) + +var ( + minNanoTime = time.Unix(0, MinNanoTime).UTC() + maxNanoTime = time.Unix(0, MaxNanoTime).UTC() + + // ErrTimeOutOfRange gets returned when time is out of the representable range using int64 nanoseconds since the epoch. + ErrTimeOutOfRange = fmt.Errorf("time outside range %d - %d", MinNanoTime, MaxNanoTime) +) + +// SafeCalcTime safely calculates the time given. Will return error if the time is outside the +// supported range. +func SafeCalcTime(timestamp int64, precision string) (time.Time, error) { + mult := GetPrecisionMultiplier(precision) + if t, ok := safeSignedMult(timestamp, mult); ok { + tme := time.Unix(0, t).UTC() + return tme, CheckTime(tme) + } + + return time.Time{}, ErrTimeOutOfRange +} + +// CheckTime checks that a time is within the safe range. +func CheckTime(t time.Time) error { + if t.Before(minNanoTime) || t.After(maxNanoTime) { + return ErrTimeOutOfRange + } + return nil +} + +// Perform the multiplication and check to make sure it didn't overflow. +func safeSignedMult(a, b int64) (int64, bool) { + if a == 0 || b == 0 || a == 1 || b == 1 { + return a * b, true + } + if a == MinNanoTime || b == MaxNanoTime { + return 0, false + } + c := a * b + return c, c/b == a +} diff --git a/vendor/github.com/influxdata/influxdb/node.go b/vendor/github.com/influxdata/influxdb/node.go new file mode 100644 index 000000000..68709edc3 --- /dev/null +++ b/vendor/github.com/influxdata/influxdb/node.go @@ -0,0 +1,121 @@ +package influxdb + +import ( + "encoding/json" + "fmt" + "io/ioutil" + "os" + "path/filepath" + "strconv" +) + +const ( + nodeFile = "node.json" + oldNodeFile = "id" + peersFilename = "peers.json" +) + +type Node struct { + path string + ID uint64 +} + +// LoadNode will load the node information from disk if present +func LoadNode(path string) (*Node, error) { + // Always check to see if we are upgrading first + if err := upgradeNodeFile(path); err != nil { + return nil, err + } + + n := &Node{ + path: path, + } + + f, err := os.Open(filepath.Join(path, nodeFile)) + if err != nil { + return nil, err + } + defer f.Close() + + if err := json.NewDecoder(f).Decode(n); err != nil { + return nil, err + } + + return n, nil +} + +// NewNode will return a new node +func NewNode(path string) *Node { + return &Node{ + path: path, + } +} + +// Save will save the node file to disk and replace the existing one if present +func (n *Node) Save() error { + file := filepath.Join(n.path, nodeFile) + tmpFile := file + "tmp" + + f, err := os.Create(tmpFile) + if err != nil { + return err + } + + if err = json.NewEncoder(f).Encode(n); err != nil { + f.Close() + return err + } + + if err = f.Close(); nil != err { + return err + } + + return os.Rename(tmpFile, file) +} + +func upgradeNodeFile(path string) error { + oldFile := filepath.Join(path, oldNodeFile) + b, err := ioutil.ReadFile(oldFile) + if err != nil { + if os.IsNotExist(err) { + return nil + } + return err + } + // We shouldn't have an empty ID file, but if we do, ignore it + if len(b) == 0 { + return nil + } + + peers := []string{} + pb, err := ioutil.ReadFile(filepath.Join(path, peersFilename)) + if err != nil { + if os.IsNotExist(err) { + return nil + } + return err + } + + err = json.Unmarshal(pb, &peers) + if err != nil { + return err + } + + if len(peers) > 1 { + return fmt.Errorf("to upgrade a cluster, please contact support at influxdata") + } + + n := &Node{ + path: path, + } + if n.ID, err = strconv.ParseUint(string(b), 10, 64); err != nil { + return err + } + if err := n.Save(); err != nil { + return err + } + if err := os.Remove(oldFile); err != nil { + return err + } + return nil +} diff --git a/vendor/github.com/influxdata/influxdb/pkg/escape/bytes.go b/vendor/github.com/influxdata/influxdb/pkg/escape/bytes.go new file mode 100644 index 000000000..ac7ed5ab3 --- /dev/null +++ b/vendor/github.com/influxdata/influxdb/pkg/escape/bytes.go @@ -0,0 +1,111 @@ +// Package escape contains utilities for escaping parts of InfluxQL +// and InfluxDB line protocol. +package escape // import "github.com/influxdata/influxdb/pkg/escape" + +import ( + "bytes" + "strings" +) + +// Codes is a map of bytes to be escaped. +var Codes = map[byte][]byte{ + ',': []byte(`\,`), + '"': []byte(`\"`), + ' ': []byte(`\ `), + '=': []byte(`\=`), +} + +// Bytes escapes characters on the input slice, as defined by Codes. +func Bytes(in []byte) []byte { + for b, esc := range Codes { + in = bytes.Replace(in, []byte{b}, esc, -1) + } + return in +} + +const escapeChars = `," =` + +// IsEscaped returns whether b has any escaped characters, +// i.e. whether b seems to have been processed by Bytes. +func IsEscaped(b []byte) bool { + for len(b) > 0 { + i := bytes.IndexByte(b, '\\') + if i < 0 { + return false + } + + if i+1 < len(b) && strings.IndexByte(escapeChars, b[i+1]) >= 0 { + return true + } + b = b[i+1:] + } + return false +} + +// AppendUnescaped appends the unescaped version of src to dst +// and returns the resulting slice. +func AppendUnescaped(dst, src []byte) []byte { + var pos int + for len(src) > 0 { + next := bytes.IndexByte(src[pos:], '\\') + if next < 0 || pos+next+1 >= len(src) { + return append(dst, src...) + } + + if pos+next+1 < len(src) && strings.IndexByte(escapeChars, src[pos+next+1]) >= 0 { + if pos+next > 0 { + dst = append(dst, src[:pos+next]...) + } + src = src[pos+next+1:] + pos = 0 + } else { + pos += next + 1 + } + } + + return dst +} + +// Unescape returns a new slice containing the unescaped version of in. +func Unescape(in []byte) []byte { + if len(in) == 0 { + return nil + } + + if bytes.IndexByte(in, '\\') == -1 { + return in + } + + i := 0 + inLen := len(in) + var out []byte + + for { + if i >= inLen { + break + } + if in[i] == '\\' && i+1 < inLen { + switch in[i+1] { + case ',': + out = append(out, ',') + i += 2 + continue + case '"': + out = append(out, '"') + i += 2 + continue + case ' ': + out = append(out, ' ') + i += 2 + continue + case '=': + out = append(out, '=') + i += 2 + continue + } + } + out = append(out, in[i]) + i += 1 + } + return out +} diff --git a/vendor/github.com/influxdata/influxdb/pkg/escape/strings.go b/vendor/github.com/influxdata/influxdb/pkg/escape/strings.go new file mode 100644 index 000000000..db98033b0 --- /dev/null +++ b/vendor/github.com/influxdata/influxdb/pkg/escape/strings.go @@ -0,0 +1,21 @@ +package escape + +import "strings" + +var ( + escaper = strings.NewReplacer(`,`, `\,`, `"`, `\"`, ` `, `\ `, `=`, `\=`) + unescaper = strings.NewReplacer(`\,`, `,`, `\"`, `"`, `\ `, ` `, `\=`, `=`) +) + +// UnescapeString returns unescaped version of in. +func UnescapeString(in string) string { + if strings.IndexByte(in, '\\') == -1 { + return in + } + return unescaper.Replace(in) +} + +// String returns the escaped version of in. +func String(in string) string { + return escaper.Replace(in) +} diff --git a/vendor/github.com/stvp/go-udp-testing/udp.go b/vendor/github.com/stvp/go-udp-testing/udp.go index caedb900f..a7a5a55fa 100644 --- a/vendor/github.com/stvp/go-udp-testing/udp.go +++ b/vendor/github.com/stvp/go-udp-testing/udp.go @@ -44,26 +44,21 @@ func getMessage(t *testing.T, body fn) string { start(t) defer stop(t) - result := make(chan string) - - go func() { - message := make([]byte, 1024*32) - var bufLen int - for { - listener.SetReadDeadline(time.Now().Add(Timeout)) - n, _, _ := listener.ReadFrom(message[bufLen:]) - if n == 0 { - result <- string(message[0:bufLen]) - break - } else { - bufLen += n - } - } - }() - body() - return <-result + message := make([]byte, 1024*32) + var bufLen int + for { + listener.SetReadDeadline(time.Now().Add(Timeout)) + n, _, _ := listener.ReadFrom(message[bufLen:]) + if n == 0 { + break + } else { + bufLen += n + } + } + + return string(message[0:bufLen]) } func get(t *testing.T, match string, body fn) (got string, equals bool, contains bool) { @@ -190,3 +185,7 @@ func ShouldReceiveAllAndNotReceiveAny(t *testing.T, expected []string, unexpecte t.Errorf("but got: %#v", got) } } + +func ReceiveString(t *testing.T, body fn) string { + return getMessage(t, body) +}