Merge v1.2.1-master

Signed-off-by: Emile Vauge <emile@vauge.com>
This commit is contained in:
Emile Vauge 2017-04-11 17:10:46 +02:00
parent a590155b0b
commit aeb17182b4
No known key found for this signature in database
GPG key ID: D808B4C167352E59
396 changed files with 27271 additions and 9969 deletions

View file

@ -8,7 +8,7 @@ TRAEFIK_ENVS := \
-e VERSION \
-e CODENAME
SRCS = $(shell git ls-files '*.go' | grep -v '^external/')
SRCS = $(shell git ls-files '*.go' | grep -v '^vendor/' | grep -v '^integration/vendor/')
BIND_DIR := "dist"
TRAEFIK_MOUNT := -v "$(CURDIR)/$(BIND_DIR):/go/src/github.com/containous/traefik/$(BIND_DIR)"

487
glide.lock generated
View file

@ -1,51 +1,19 @@
<<<<<<< HEAD
<<<<<<< HEAD
<<<<<<< HEAD
<<<<<<< HEAD
<<<<<<< HEAD
<<<<<<< HEAD
hash: b1cbcbd938a47a246b0d4d634037b76e63486c63e5867b339f92bcbd7453b75c
updated: 2017-04-07T11:34:46.101385591+01:00
=======
hash: b4e99f1210c5bfb7784b55c9ec5b54629fadaea0625b29e540979b1acbd7dc09
updated: 2017-03-02T09:39:03.718549862+01:00
>>>>>>> 9af5ba3... Bump go-rancher version
=======
hash: 6d21d24827ef1c9321e8a41a328c42f891281af2ea68841d42411f23b0666d4b
updated: 2017-03-02T14:57:08.032744661-07:00
>>>>>>> eebbf6e... update oxy hash
=======
hash: f4360ad29820f7366aeb1bb85f3765c0b9dc4388ed5a072ead82209b5230ce25
updated: 2017-03-14T10:10:22.924762706Z
>>>>>>> 8392846... Update vulcand and pin deps in glide.yaml
=======
hash: 6d21d24827ef1c9321e8a41a328c42f891281af2ea68841d42411f23b0666d4b
updated: 2017-03-02T14:57:08.032744661-07:00
>>>>>>> 7b1c0a9... Reset glide files to versions from upstream/v1.2.
=======
hash: 741ec5fae23f12e6c0fa0e4c7c00c0af06fac1ddc199dd4b45c904856890b347
updated: 2017-03-15T10:48:05.202095822+01:00
>>>>>>> 7c55a4f... Update github.com/containous/oxy only.
=======
hash: b689cb0faed68086641d9e3504ee29498e5bf06b088ad4fcd1e76543446d4d9a
updated: 2017-03-27T14:29:54.009570184+02:00
>>>>>>> 0d657a0... bump lego 0e2937900
hash: 2abed980e61ff7659b181849419aadcb8d9d63594a5ac3215d09df0ea12a1700
updated: 2017-04-11T17:02:21.540487905+02:00
imports:
- name: bitbucket.org/ww/goautoneg
version: 75cd24fc2f2c2a2088577d12123ddee5f54e0675
- name: cloud.google.com/go
version: 2e6a95edb1071d750f6d7db777bf66cd2997af6c
subpackages:
- compute/metadata
- internal
- name: github.com/abbot/go-http-auth
version: cb4372376e1e00e9f6ab9ec142e029302c9e7140
version: d45c47bedec736d172957bd394786b76626fa8ac
- name: github.com/ArthurHlt/go-eureka-client
version: ba361cd0f9f571b4e871421423d2f02f5689c3d2
version: 9d0a49cbd39aa3634ae1977e9f519a262b10adaf
subpackages:
- eureka
- name: github.com/ArthurHlt/gominlog
version: 068c01ce147ad68fca25ef3fa29ae5395ae273ab
version: 72eebf980f467d3ab3a8b4ddf660f664911ce519
- name: github.com/aws/aws-sdk-go
version: 3f8f870ec9939e32b3372abf74d24e468bcd285d
subpackages:
@ -84,63 +52,38 @@ imports:
- service/route53
- service/sts
- name: github.com/Azure/azure-sdk-for-go
<<<<<<< HEAD
<<<<<<< HEAD
version: 088007b3b08cc02b27f2eadfdcd870958460ce7e
subpackages:
- arm/dns
- name: github.com/Azure/go-autorest
version: a2fdd780c9a50455cecd249b00bdc3eb73a78e31
=======
version: 4897648e310020dae650a89c31ff633284c13a24
subpackages:
- arm/dns
- name: github.com/Azure/go-autorest
version: ec5f4903f77ed9927ac95b19ab8e44ada64c1356
>>>>>>> 8392846... Update vulcand and pin deps in glide.yaml
=======
version: 1620af6b32398bfc91827ceae54a8cc1f55df04d
subpackages:
- arm/dns
- name: github.com/Azure/go-autorest
version: 32cc2321122a649b7ba4e323527bcb145134fd47
>>>>>>> 7b1c0a9... Reset glide files to versions from upstream/v1.2.
subpackages:
- autorest
- autorest/azure
- autorest/date
- autorest/to
- autorest/validation
- name: github.com/beorn7/perks
version: b965b613227fddccbfffe13eae360ed3fa822f8d
version: 4c0e84591b9aa9e6dcfdf3e020114cd81f89d5f9
subpackages:
- quantile
- name: github.com/blang/semver
<<<<<<< HEAD
<<<<<<< HEAD
version: 31b736133b98f26d5e078ec9eb591666edfd091f
=======
version: 4a1e882c79dcf4ec00d2e29fac74b9c8938d5052
>>>>>>> 8392846... Update vulcand and pin deps in glide.yaml
=======
version: 3a37c301dda64cbe17f16f661b4c976803c0e2d2
>>>>>>> 7b1c0a9... Reset glide files to versions from upstream/v1.2.
- name: github.com/boltdb/bolt
version: 5cc10bbbc5c141029940133bb33c9e969512a698
version: e9cf4fae01b5a8ff89d0ec6b32f0d9c9f79aefdd
- name: github.com/BurntSushi/toml
version: 99064174e013895bbd9b025c31100bd1d9b590ca
version: b26d9c308763d68093482582cea63d69be07a0f0
- name: github.com/BurntSushi/ty
version: 6add9cd6ad42d389d6ead1dde60b4ad71e46fd74
subpackages:
- fun
- name: github.com/cenk/backoff
version: 8edc80b07f38c27352fb186d971c628a6c32552b
version: 5d150e7eec023ce7a124856b37c68e54b4050ac7
- name: github.com/codahale/hdrhistogram
version: 9208b142303c12d8899bae836fd524ac9338b4fd
- name: github.com/codegangsta/cli
version: bf4a526f48af7badd25d2cb02d587e1b01be3b50
- name: github.com/codegangsta/negroni
version: dc6b9d037e8dab60cbfc09c61d6932537829be8b
version: c0db5feaa33826cd5117930c8f4ee5c0f565eec6
- name: github.com/containous/flaeg
version: b5d2dc5878df07c2d74413348186982e7b865871
- name: github.com/containous/mux
@ -150,21 +93,15 @@ imports:
- name: github.com/coreos/etcd
version: c400d05d0aa73e21e431c16145e558d624098018
subpackages:
- Godeps/_workspace/src/github.com/ugorji/go/codec
- Godeps/_workspace/src/golang.org/x/net/context
- Godeps/_workspace/src/github.com/coreos/go-systemd/journal
- Godeps/_workspace/src/github.com/coreos/pkg/capnslog
- client
- pkg/fileutil
- pkg/pathutil
- pkg/types
- version
- name: github.com/coreos/go-oidc
<<<<<<< HEAD
<<<<<<< HEAD
version: 5644a2f50e2d2d5ba0b474bc5bc55fea1925936d
=======
version: be73733bb8cc830d0205609b95d125215f8e9c70
>>>>>>> 8392846... Update vulcand and pin deps in glide.yaml
=======
version: 9e117111587506b9dc83b7b38263268bf48352ea
>>>>>>> 7b1c0a9... Reset glide files to versions from upstream/v1.2.
subpackages:
- http
- jose
@ -182,34 +119,17 @@ imports:
- httputil
- timeutil
- name: github.com/davecgh/go-spew
version: 6d212800a42e8ab5c146b8ace3490ee17e5225f9
version: 04cdfd42973bb9c8589fd6a731800cf222fde1a9
subpackages:
- spew
- name: github.com/daviddengcn/go-colortext
version: 3b18c8575a432453d41fdafb340099fff5bba2f7
- name: github.com/decker502/dnspod-go
version: f6b1d56f1c048bd94d7e42ac36efb4d57b069b6f
version: 68650ee11e182e30773781d391c66a0c80ccf9f2
- name: github.com/dgrijalva/jwt-go
<<<<<<< HEAD
<<<<<<< HEAD
version: 9ed569b5d1ac936e6494082958d63a6aa4fff99a
version: d2709f9f1f31ebcda9651b03077758c1f3a0018c
- name: github.com/dnsimple/dnsimple-go
version: 5a5b427618a76f9eed5ede0f3e6306fbd9311d2e
subpackages:
- dnsimple
=======
version: d2709f9f1f31ebcda9651b03077758c1f3a0018c
>>>>>>> 8392846... Update vulcand and pin deps in glide.yaml
=======
version: 9ed569b5d1ac936e6494082958d63a6aa4fff99a
<<<<<<< HEAD
>>>>>>> 7b1c0a9... Reset glide files to versions from upstream/v1.2.
=======
- name: github.com/dnsimple/dnsimple-go
version: eeb343928d9a3de357a650c8c25d8f1318330d57
subpackages:
- dnsimple
>>>>>>> 0d657a0... bump lego 0e2937900
- name: github.com/docker/distribution
version: 325b0804fef3a66309d962357aac3c2ce3f4d329
subpackages:
@ -218,67 +138,7 @@ imports:
- name: github.com/docker/docker
version: 49bf474f9ed7ce7143a59d1964ff7b7fd9b52178
subpackages:
<<<<<<< HEAD
- namesgenerator
=======
- api/types
- api/types/backend
- api/types/blkiodev
- api/types/container
- api/types/filters
- api/types/mount
- api/types/network
- api/types/registry
- api/types/strslice
- api/types/swarm
- api/types/versions
- builder
- builder/dockerignore
- cliconfig
- cliconfig/configfile
- daemon/graphdriver
- image
- image/v1
- layer
- namesgenerator
- oci
- opts
- pkg/archive
- pkg/chrootarchive
- pkg/fileutils
- pkg/gitutils
- pkg/homedir
- pkg/httputils
- pkg/idtools
- pkg/ioutils
- pkg/jsonlog
- pkg/jsonmessage
- pkg/longpath
- pkg/mount
- pkg/namesgenerator
- pkg/plugingetter
- pkg/plugins
- pkg/plugins/transport
- pkg/pools
- pkg/progress
- pkg/promise
- pkg/random
- pkg/reexec
- pkg/signal
- pkg/stdcopy
- pkg/streamformatter
- pkg/stringid
- pkg/symlink
- pkg/system
- pkg/tarsum
- pkg/term
- pkg/term/windows
- pkg/urlutil
- plugin/v2
- reference
- registry
- runconfig/opts
>>>>>>> 8392846... Update vulcand and pin deps in glide.yaml
- name: github.com/docker/engine-api
version: 3d1601b9d2436a70b0dfc045a23f6503d19195df
subpackages:
@ -306,9 +166,9 @@ imports:
- name: github.com/docker/go-units
version: 0dadbb0345b35ec7ef35e228dabb8de89a65bf52
- name: github.com/docker/leadership
version: bfc7753dd48af19513b29deec23c364bf0f274eb
version: 0a913e2d71a12fd14a028452435cb71ac8d82cb6
- name: github.com/docker/libkv
version: 35d3e2084c650109e7bcc7282655b1bc8ba924ff
version: 1d8431073ae03cdaedb198a89722f3aab6d418ef
subpackages:
- store
- store/boltdb
@ -330,18 +190,20 @@ imports:
- tokens
- zones
- name: github.com/elazarl/go-bindata-assetfs
version: 57eb5e1fc594ad4b0b1dbea7b286d299e0cb43c2
version: 30f82fa23fd844bd5bb1e5f216db87fd77b5eb43
- name: github.com/emicklei/go-restful
version: 892402ba11a2e2fd5e1295dd633481f27365f14d
subpackages:
- log
- swagger
- name: github.com/fatih/color
version: 9131ab34cf20d2f6d83fdc67168a5430d1c7dc23
- name: github.com/gambol99/go-marathon
version: 6b00a5b651b1beb2c6821863f7c60df490bd46c8
- name: github.com/ghodss/yaml
version: 04f313413ffd65ce25f2541bfd2b2ceec5c0908c
version: 73d445a93680fa1a78ae23a5839bad48f32ba1ee
- name: github.com/go-ini/ini
version: 6f66b0e091edb3c7b380f7c4f0f884274d550b67
version: e7fea39b01aea8d5671f6858f0532f56e8bff3a5
- name: github.com/go-kit/kit
version: f66b0e13579bfc5a48b9e2a94b1209c107ea1f41
subpackages:
@ -349,33 +211,13 @@ imports:
- metrics/internal/lv
- metrics/prometheus
- name: github.com/go-openapi/jsonpointer
<<<<<<< HEAD
<<<<<<< HEAD
version: 46af16f9f7b149af66e5d1bd010e3574dc06de98
=======
version: 779f45308c19820f1a69e9a4cd965f496e0da10f
>>>>>>> 8392846... Update vulcand and pin deps in glide.yaml
=======
version: 8d96a2dc61536b690bd36b2e9df0b3c0b62825b2
>>>>>>> 7b1c0a9... Reset glide files to versions from upstream/v1.2.
- name: github.com/go-openapi/jsonreference
version: 13c6e3589ad90f49bd3e3bbe2c2cb3d7a4142272
- name: github.com/go-openapi/spec
<<<<<<< HEAD
<<<<<<< HEAD
version: 6aced65f8501fe1217321abf0749d354824ba2ff
- name: github.com/go-openapi/swag
version: 1d0bd113de87027671077d3c71eb3ac5d7dbba72
=======
version: 02fb9cd3430ed0581e0ceb4804d5d4b3cc702694
- name: github.com/go-openapi/swag
version: d5f8ebc3b1c55a4cf6489eeae7354f338cfe299e
>>>>>>> 8392846... Update vulcand and pin deps in glide.yaml
=======
version: 34b5ffff717ab4535aef76e3dd90818bddde571b
- name: github.com/go-openapi/swag
version: 96d7b9ebd181a1735a1c9ac87914f2b32fbf56c9
>>>>>>> 7b1c0a9... Reset glide files to versions from upstream/v1.2.
- name: github.com/gogo/protobuf
version: 909568be09de550ed094403c2bf8a261b5bb730a
subpackages:
@ -384,15 +226,15 @@ imports:
- name: github.com/golang/glog
version: fca8c8854093a154ff1eb580aae10276ad6b1b5f
- name: github.com/golang/protobuf
version: 5677a0e3d5e89854c9974e1256839ee23f8233ca
version: 2bba0603135d7d7f5cb73b2125beeda19c09f4ef
subpackages:
- proto
- name: github.com/google/go-github
version: c8ebe3a4d7f0791a6315b7410353d4084c58805d
version: 6896997c7c9fe603fb9d2e8e92303bb18481e60a
subpackages:
- github
- name: github.com/google/go-querystring
version: 9235644dd9e52eeae6fa48efd539fdc351a0af53
version: 53e6ce116135b80d037921a7fdd5138cf32d7a8a
subpackages:
- query
- name: github.com/google/gofuzz
@ -400,24 +242,23 @@ imports:
- name: github.com/googleapis/gax-go
version: 9af46dd5a1713e8b5cd71106287eba3cefdde50b
- name: github.com/gorilla/context
version: 1ea25387ff6f684839d82767c1733ff4d4d15d0a
version: 08b5f424b9271eedf6f9f0ce86cb9396ed337a42
- name: github.com/gorilla/websocket
version: 4873052237e4eeda85cf50c071ef33836fe8e139
version: a91eba7f97777409bc2c443f5534d41dd20c5720
- name: github.com/hashicorp/consul
version: fce7d75609a04eeb9d4bf41c8dc592aac18fc97d
version: 3f92cc70e8163df866873c16c6d89889b5c95fc4
subpackages:
- api
- name: github.com/hashicorp/go-cleanhttp
version: 875fb671b3ddc66f8e2f0acc33829c8cb989a38d
version: 3573b8b52aa7b37b9358d966a898feb387f62437
- name: github.com/hashicorp/go-version
version: e96d3840402619007766590ecea8dd7af1292276
version: 03c5bf6be031b6dd45afec16b1cf94fc8938bc77
- name: github.com/hashicorp/serf
version: 6c4672d66fc6312ddde18399262943e21175d831
version: 19f2c401e122352c047a84d6584dd51e2fb8fcc4
subpackages:
- coordinate
- serf
- name: github.com/JamesClonk/vultr
version: 9ec0427d51411407c0402b093a1771cb75af9679
version: 0f156dd232bc4ebf8a32ba83fec57c0e4c9db69f
subpackages:
- lib
- name: github.com/jmespath/go-jmespath
@ -429,23 +270,21 @@ imports:
- name: github.com/mailgun/timetools
version: fd192d755b00c968d312d23f521eb0cdc6f66bd0
- name: github.com/mailru/easyjson
<<<<<<< HEAD
<<<<<<< HEAD
version: d5b7844b561a7bc640052f1b935f7b800330d7e0
=======
version: db58e6f9072c545a3a24b8d44c51d81fff6dcb51
>>>>>>> 8392846... Update vulcand and pin deps in glide.yaml
=======
version: 9d6630dc8c577b56cb9687a9cf9e8578aca7298a
>>>>>>> 7b1c0a9... Reset glide files to versions from upstream/v1.2.
subpackages:
- buffer
- jlexer
- jwriter
- name: github.com/mattn/go-colorable
version: 5411d3eea5978e6cdc258b30de592b60df6aba96
repo: https://github.com/mattn/go-colorable
- name: github.com/mattn/go-isatty
version: 57fdcb988a5c543893cc61bce354a6e24ab70022
repo: https://github.com/mattn/go-isatty
- name: github.com/mattn/go-shellwords
version: 525bedee691b5a8df547cb5cf9f86b7fb1883e24
version: 02e3cf038dcea8290e44424da473dd12be796a8a
- name: github.com/matttproud/golang_protobuf_extensions
version: fc2b8d3a73c4867e51861bbdd5ae3c1f0869dd6a
version: c12348ce28de40eed0136aa2b644d0ee0650e56c
subpackages:
- pbutil
- name: github.com/mesos/mesos-go
@ -471,63 +310,58 @@ imports:
- records/state
- util
- name: github.com/Microsoft/go-winio
version: ce2922f643c8fd76b46cadc7f404a06282678b34
version: fff283ad5116362ca252298cfc9b95828956d85d
- name: github.com/miekg/dns
version: 8060d9f51305bbe024b99679454e62f552cd0b0b
- name: github.com/mitchellh/mapstructure
version: f3009df150dadf309fdee4a54ed65c124afad715
version: 53818660ed4955e899c0bcafa97299a388bd7c8e
- name: github.com/mvdan/xurls
version: fa08908f19eca8c491d68c6bd8b4b44faea6daf8
version: db96455566f05ffe42bd6ac671f05eeb1152b45d
- name: github.com/NYTimes/gziphandler
version: 6710af535839f57c687b62c4c23d649f9545d885
version: 22d4470af89e09998fc16b35029df973932df4ae
- name: github.com/ogier/pflag
version: 45c278ab3607870051a2ea9040bb85fcb8557481
- name: github.com/opencontainers/runc
version: 1a81e9ab1f138c091fe5c86d0883f87716088527
version: 50401b5b4c2e01e4f1372b73a021742deeaf4e2d
subpackages:
- libcontainer/user
- name: github.com/ovh/go-ovh
version: a8a4c0bc40e56322142649bda7b2b4bb15145b6e
version: d2207178e10e4527e8f222fd8707982df8c3af17
subpackages:
- ovh
- name: github.com/pborman/uuid
<<<<<<< HEAD
<<<<<<< HEAD
version: ca53cad383cad2479bbba7f7a1a05797ec1386e4
=======
version: 1b00554d822231195d1babd97ff4a781231955c9
>>>>>>> 8392846... Update vulcand and pin deps in glide.yaml
=======
version: 5007efa264d92316c43112bc573e754bc889b7b1
>>>>>>> 7b1c0a9... Reset glide files to versions from upstream/v1.2.
- name: github.com/pkg/errors
version: bfd5150e4e41705ded2129ec33379de1cb90b513
version: ff09b135c25aae272398c51a07235b90a75aa4f0
- name: github.com/pmezard/go-difflib
version: d8ed2627bdf02c080bf22230dbb337003b7aba2d
subpackages:
- difflib
- name: github.com/prometheus/client_golang
version: c5b7fccd204277076155f10851dad72b76a49317
version: 08fd2e12372a66e68e30523c7642e0cbc3e4fbde
subpackages:
- prometheus
- prometheus/promhttp
- name: github.com/prometheus/client_model
version: fa8ad6fec33561be4280a8f0514318c79d7f6cb6
version: 6f3806018612930941127f2a7c6c453ba2c527d2
subpackages:
- go
- name: github.com/prometheus/common
version: ffe929a3f4c4faeaa10f2b9535c2b1be3ad15650
version: 49fee292b27bfff7f354ee0f64e1bc4850462edf
subpackages:
- expfmt
- internal/bitbucket.org/ww/goautoneg
- model
- name: github.com/prometheus/procfs
version: 454a56f35412459b5e684fd5ec0f9211b94f002a
version: a1dba9ce8baed984a2495b658c82687f8157b98f
subpackages:
- xfs
- name: github.com/PuerkitoBio/purell
version: 8a290539e2e8629dbc4e6bad948158f790ec31f4
- name: github.com/PuerkitoBio/urlesc
version: 5bd2802263f21d8788851d5305584c82a5c75d7e
- name: github.com/pyr/egoscale
version: ab4b0d7ff424c462da486aef27f354cdeb29a319
version: 987e683a7552f34ee586217d1cc8507d52e80ab9
subpackages:
- src/egoscale
- name: github.com/rancher/go-rancher
@ -535,23 +369,23 @@ imports:
subpackages:
- client
- name: github.com/ryanuber/go-glob
version: 572520ed46dbddaed19ea3d9541bdd0494163693
version: 256dc444b735e061061cf46c809487313d5b0065
- name: github.com/samuel/go-zookeeper
version: e64db453f3512cade908163702045e0f31137843
version: 1d7be4effb13d2d908342d349d71a284a7542693
subpackages:
- zk
- name: github.com/satori/go.uuid
version: 879c5887cd475cd7864858769793b2ceb0d44feb
- name: github.com/Sirupsen/logrus
version: a283a10442df8dc09befd873fab202bf8a253d6a
version: 10f801ebc38b33738c9d17d50860f484a0988ff5
- name: github.com/spf13/pflag
version: 5ccb023bc27df288a957c5e994cd44fd19619465
- name: github.com/streamrail/concurrent-map
version: 65a174a3a4188c0b7099acbc6cfa0c53628d3287
version: 8bf1e9bacbf65b10c81d0f4314cf2b1ebef728b5
- name: github.com/stretchr/objx
version: cbeaeb16a013161a98496fad62933b1d21786672
- name: github.com/stretchr/testify
version: 69483b4bd14f5845b5a1e55bca19e954e827f1d0
version: 4d4bfba8f1d1027c4fdbe371823030df51419987
subpackages:
- assert
- mock
@ -568,7 +402,7 @@ imports:
subpackages:
- codec
- name: github.com/unrolled/render
version: 198ad4d8b8a4612176b804ca10555b222a086b40
version: 50716a0a853771bb36bfce61a45cdefdb98c2e6e
- name: github.com/vdemeester/docker-events
version: be74d4929ec1ad118df54349fda4b0cba60f849b
- name: github.com/vulcand/oxy
@ -595,11 +429,7 @@ imports:
- plugin/rewrite
- router
- name: github.com/xenolf/lego
<<<<<<< HEAD
version: 5dfe609afb1ebe9da97c9846d97a55415e5a5ccd
=======
version: 0e2937900b224325f4476745a9b53aef246b7410
>>>>>>> 0d657a0... bump lego 0e2937900
subpackages:
- acme
- providers/dns
@ -655,8 +485,6 @@ imports:
- unix
- windows
- name: golang.org/x/text
<<<<<<< HEAD
<<<<<<< HEAD
version: 2910a502d2bf9e43193af9d68ca516529614eed3
subpackages:
- cases
@ -665,16 +493,6 @@ imports:
- runes
- secure/bidirule
- secure/precis
=======
version: f28f36722d5ef2f9655ad3de1f248e3e52ad5ebd
=======
version: a49bea13b776691cb1b49873e5d8df96ec74831a
>>>>>>> 7b1c0a9... Reset glide files to versions from upstream/v1.2.
repo: https://github.com/golang/text.git
vcs: git
subpackages:
- .
>>>>>>> 8392846... Update vulcand and pin deps in glide.yaml
- transform
- unicode/bidi
- unicode/norm
@ -713,17 +531,18 @@ imports:
- tap
- transport
- name: gopkg.in/fsnotify.v1
version: a8a77c9133d2d6fd8334f3260d06f60e8d80a5fb
version: 629574ca2a5df945712d3079857300b5e4da0236
- name: gopkg.in/inf.v0
version: 3887ee99ecf07df5b447e9b00d9c0b2adaa9f3e4
- name: gopkg.in/ini.v1
version: 6f66b0e091edb3c7b380f7c4f0f884274d550b67
version: e7fea39b01aea8d5671f6858f0532f56e8bff3a5
- name: gopkg.in/mgo.v2
version: 29cc868a5ca65f401ff318143f9408d02f4799cc
version: 3f83fa5005286a7fe593b055f0d7771a7dce4655
subpackages:
- bson
- internal/json
- name: gopkg.in/ns1/ns1-go.v2
version: d8d10b7f448291ddbdce48d4594fb1b667014c8b
version: 2abc76c60bf88ba33b15d1d87a13f624d8dff956
subpackages:
- rest
- rest/model/account
@ -732,14 +551,13 @@ imports:
- rest/model/filter
- rest/model/monitor
- name: gopkg.in/square/go-jose.v1
version: e3f973b66b91445ec816dd7411ad1b6495a5a2fc
version: aa2e30fdd1fe9dd3394119af66451ae790d50e0d
subpackages:
- cipher
- json
- name: gopkg.in/yaml.v2
version: 53feefa2559fb8dfa8d81baad31be332c97d6c77
- name: k8s.io/client-go
<<<<<<< HEAD
version: e121606b0d09b2e1c467183ee46217fa85a6b672
subpackages:
- discovery
@ -850,170 +668,3 @@ imports:
- tools/metrics
- transport
testImports: []
=======
version: 1195e3a8ee1a529d53eed7c624527a68555ddf1f
subpackages:
- 1.5/discovery
- 1.5/kubernetes
- 1.5/kubernetes/typed/apps/v1alpha1
- 1.5/kubernetes/typed/authentication/v1beta1
- 1.5/kubernetes/typed/authorization/v1beta1
- 1.5/kubernetes/typed/autoscaling/v1
- 1.5/kubernetes/typed/batch/v1
- 1.5/kubernetes/typed/certificates/v1alpha1
- 1.5/kubernetes/typed/core/v1
- 1.5/kubernetes/typed/extensions/v1beta1
- 1.5/kubernetes/typed/policy/v1alpha1
- 1.5/kubernetes/typed/rbac/v1alpha1
- 1.5/kubernetes/typed/storage/v1beta1
- 1.5/pkg/api
- 1.5/pkg/api/errors
- 1.5/pkg/api/install
- 1.5/pkg/api/meta
- 1.5/pkg/api/meta/metatypes
- 1.5/pkg/api/resource
- 1.5/pkg/api/unversioned
- 1.5/pkg/api/v1
- 1.5/pkg/api/validation/path
- 1.5/pkg/apimachinery
- 1.5/pkg/apimachinery/announced
- 1.5/pkg/apimachinery/registered
- 1.5/pkg/apis/apps
- 1.5/pkg/apis/apps/install
- 1.5/pkg/apis/apps/v1alpha1
- 1.5/pkg/apis/authentication
- 1.5/pkg/apis/authentication/install
- 1.5/pkg/apis/authentication/v1beta1
- 1.5/pkg/apis/authorization
- 1.5/pkg/apis/authorization/install
- 1.5/pkg/apis/authorization/v1beta1
- 1.5/pkg/apis/autoscaling
- 1.5/pkg/apis/autoscaling/install
- 1.5/pkg/apis/autoscaling/v1
- 1.5/pkg/apis/batch
- 1.5/pkg/apis/batch/install
- 1.5/pkg/apis/batch/v1
- 1.5/pkg/apis/batch/v2alpha1
- 1.5/pkg/apis/certificates
- 1.5/pkg/apis/certificates/install
- 1.5/pkg/apis/certificates/v1alpha1
- 1.5/pkg/apis/extensions
- 1.5/pkg/apis/extensions/install
- 1.5/pkg/apis/extensions/v1beta1
- 1.5/pkg/apis/policy
- 1.5/pkg/apis/policy/install
- 1.5/pkg/apis/policy/v1alpha1
- 1.5/pkg/apis/rbac
- 1.5/pkg/apis/rbac/install
- 1.5/pkg/apis/rbac/v1alpha1
- 1.5/pkg/apis/storage
- 1.5/pkg/apis/storage/install
- 1.5/pkg/apis/storage/v1beta1
- 1.5/pkg/auth/user
- 1.5/pkg/conversion
- 1.5/pkg/conversion/queryparams
- 1.5/pkg/fields
- 1.5/pkg/genericapiserver/openapi/common
- 1.5/pkg/labels
- 1.5/pkg/runtime
- 1.5/pkg/runtime/serializer
- 1.5/pkg/runtime/serializer/json
- 1.5/pkg/runtime/serializer/protobuf
- 1.5/pkg/runtime/serializer/recognizer
- 1.5/pkg/runtime/serializer/streaming
- 1.5/pkg/runtime/serializer/versioning
- 1.5/pkg/selection
- 1.5/pkg/third_party/forked/golang/reflect
- 1.5/pkg/types
- 1.5/pkg/util
- 1.5/pkg/util/cert
- 1.5/pkg/util/clock
- 1.5/pkg/util/errors
- 1.5/pkg/util/flowcontrol
- 1.5/pkg/util/framer
- 1.5/pkg/util/integer
- 1.5/pkg/util/intstr
- 1.5/pkg/util/json
- 1.5/pkg/util/labels
- 1.5/pkg/util/net
- 1.5/pkg/util/parsers
- 1.5/pkg/util/rand
- 1.5/pkg/util/runtime
- 1.5/pkg/util/sets
- 1.5/pkg/util/uuid
- 1.5/pkg/util/validation
- 1.5/pkg/util/validation/field
- 1.5/pkg/util/wait
- 1.5/pkg/util/yaml
- 1.5/pkg/version
- 1.5/pkg/watch
- 1.5/pkg/watch/versioned
- 1.5/plugin/pkg/client/auth
- 1.5/plugin/pkg/client/auth/gcp
- 1.5/plugin/pkg/client/auth/oidc
- 1.5/rest
- 1.5/tools/cache
- 1.5/tools/clientcmd/api
- 1.5/tools/metrics
- 1.5/transport
testImports:
- name: github.com/Azure/go-ansiterm
version: fa152c58bc15761d0200cb75fe958b89a9d4888e
subpackages:
- winterm
- name: github.com/cloudfoundry-incubator/candiedyaml
version: 99c3df83b51532e3615f851d8c2dbb638f5313bf
- name: github.com/docker/libcompose
version: d1876c1d68527a49c0aac22a0b161acc7296b740
subpackages:
- config
- docker
- docker/builder
- docker/client
- docker/network
- labels
- logger
- lookup
- project
- project/events
- project/options
- utils
- version
- yaml
- name: github.com/flynn/go-shlex
version: 3f9db97f856818214da2e1057f8ad84803971cff
- name: github.com/go-check/check
version: 11d3bc7aa68e238947792f30573146a3231fc0f1
- name: github.com/gorilla/mux
version: e444e69cbd2e2e3e0749a2f3c717cec491552bbf
- name: github.com/libkermit/compose
version: cadc5a3b83a15790174bd7fbc75ea2529785e772
subpackages:
- check
- name: github.com/libkermit/docker
version: 55e3595409924fcfbb850811e5a7cdbe8960a0b7
- name: github.com/libkermit/docker-check
version: cbe0ef03b3d23070eac4d00ba8828f2cc7f7e5a3
- name: github.com/opencontainers/runtime-spec
version: 06479209bdc0d4135911688c18157bd39bd99c22
subpackages:
- specs-go
- name: github.com/vbatts/tar-split
version: 6810cedb21b2c3d0b9bb8f9af12ff2dc7a2f14df
subpackages:
- archive/tar
- tar/asm
- tar/storage
- name: github.com/vdemeester/shakers
version: 24d7f1d6a71aa5d9cbe7390e4afb66b7eef9e1b3
- name: github.com/xeipuuv/gojsonpointer
version: e0fe6f68307607d540ed8eac07a342c33fa1b54a
- name: github.com/xeipuuv/gojsonreference
version: e02fc20de94c78484cd5ffb007f8af96be030a45
- name: github.com/xeipuuv/gojsonschema
version: 00f9fafb54d2244d291b86ab63d12c38bd5c3886
- name: golang.org/x/time
version: a4bde12657593d5e90d0533a3e4fd95e635124cb
subpackages:
- rate
>>>>>>> 9af5ba3... Bump go-rancher version

View file

@ -25,11 +25,11 @@ func GetHealthCheck() *HealthCheck {
// BackendHealthCheck HealthCheck configuration for a backend
type BackendHealthCheck struct {
Path string
Interval time.Duration
DisabledURLs []*url.URL
Path string
Interval time.Duration
DisabledURLs []*url.URL
requestTimeout time.Duration
lb loadBalancer
lb loadBalancer
}
var launch = false
@ -53,9 +53,9 @@ func newHealthCheck() *HealthCheck {
}
// NewBackendHealthCheck Instantiate a new BackendHealthCheck
func NewBackendHealthCheck(URL string, interval time.Duration, lb loadBalancer) *BackendHealthCheck {
func NewBackendHealthCheck(Path string, interval time.Duration, lb loadBalancer) *BackendHealthCheck {
return &BackendHealthCheck{
URL: URL,
Path: Path,
Interval: interval,
requestTimeout: 5 * time.Second,
lb: lb,
@ -85,12 +85,12 @@ func (hc *HealthCheck) execute(ctx context.Context, backendID string, backend *B
checkBackend(backend)
ticker := time.NewTicker(backend.Interval)
defer ticker.Stop()
for {
select {
case <-ctx.Done():
log.Debugf("Stopping all current Healthcheck goroutines")
return
case <-ticker.C:
for {
select {
case <-ctx.Done():
log.Debugf("Stopping all current Healthcheck goroutines")
return
case <-ticker.C:
log.Debugf("Refreshing healthcheck for currentBackend %s ", backendID)
checkBackend(backend)
}
@ -98,26 +98,26 @@ func (hc *HealthCheck) execute(ctx context.Context, backendID string, backend *B
}
func checkBackend(currentBackend *BackendHealthCheck) {
enabledURLs := currentBackend.lb.Servers()
var newDisabledURLs []*url.URL
for _, url := range currentBackend.DisabledURLs {
enabledURLs := currentBackend.lb.Servers()
var newDisabledURLs []*url.URL
for _, url := range currentBackend.DisabledURLs {
if checkHealth(url, currentBackend) {
log.Debugf("HealthCheck is up [%s]: Upsert in server list", url.String())
currentBackend.lb.UpsertServer(url, roundrobin.Weight(1))
} else {
log.Debugf("HealthCheck is up [%s]: Upsert in server list", url.String())
currentBackend.lb.UpsertServer(url, roundrobin.Weight(1))
} else {
log.Warnf("HealthCheck is still failing [%s]", url.String())
newDisabledURLs = append(newDisabledURLs, url)
}
}
currentBackend.DisabledURLs = newDisabledURLs
newDisabledURLs = append(newDisabledURLs, url)
}
}
currentBackend.DisabledURLs = newDisabledURLs
for _, url := range enabledURLs {
for _, url := range enabledURLs {
if !checkHealth(url, currentBackend) {
log.Warnf("HealthCheck has failed [%s]: Remove from server list", url.String())
currentBackend.lb.RemoveServer(url)
currentBackend.DisabledURLs = append(currentBackend.DisabledURLs, url)
}
}
currentBackend.lb.RemoveServer(url)
currentBackend.DisabledURLs = append(currentBackend.DisabledURLs, url)
}
}
}
func checkHealth(serverURL *url.URL, backend *BackendHealthCheck) bool {

View file

@ -26,7 +26,7 @@ func (s *SimpleSuite) TestInvalidConfigShouldFail(c *check.C) {
defer cmd.Process.Kill()
output := b.Bytes()
c.Assert(string(output), checker.Contains, "Near line 0 (last key parsed ''): Bare keys cannot contain '{'")
c.Assert(string(output), checker.Contains, "Near line 0 (last key parsed ''): bare keys cannot contain '{'")
}
func (s *SimpleSuite) TestSimpleDefaultConfig(c *check.C) {
@ -70,7 +70,7 @@ func (s *SimpleSuite) TestDefaultEntryPoints(c *check.C) {
defer cmd.Process.Kill()
output := b.Bytes()
c.Assert(string(output), checker.Contains, "\\\"DefaultEntryPoints\\\":[\\\"http\\\"]")
c.Assert(string(output), checker.Contains, "\"DefaultEntryPoints\":[\"http\"]")
}
func (s *SimpleSuite) TestPrintHelp(c *check.C) {

View file

@ -19,7 +19,7 @@ type mockDynamoDBCLient struct {
var backend = &types.Backend{
HealthCheck: &types.HealthCheck{
URL: "/build",
Path: "/build",
},
Servers: map[string]types.Server{
"server1": {

View file

@ -6,7 +6,6 @@ import (
"io/ioutil"
"time"
"github.com/containous/traefik/log"
"k8s.io/client-go/kubernetes"
"k8s.io/client-go/pkg/api"
"k8s.io/client-go/pkg/api/v1"

View file

@ -236,39 +236,39 @@ func (provider *Kubernetes) loadIngresses(k8sClient k8s.Client) (*types.Configur
Weight: 1,
}
} else {
endpoints, exists, err := k8sClient.GetEndpoints(service.ObjectMeta.Namespace, service.ObjectMeta.Name)
if err != nil {
log.Errorf("Error retrieving endpoints %s/%s: %v", service.ObjectMeta.Namespace, service.ObjectMeta.Name, err)
return nil, err
}
if !exists {
log.Errorf("Endpoints not found for %s/%s", service.ObjectMeta.Namespace, service.ObjectMeta.Name)
continue
}
if len(endpoints.Subsets) == 0 {
log.Warnf("Service endpoints not found for %s/%s, falling back to Service ClusterIP", service.ObjectMeta.Namespace, service.ObjectMeta.Name)
templateObjects.Backends[r.Host+pa.Path].Servers[string(service.UID)] = types.Server{
URL: protocol + "://" + service.Spec.ClusterIP + ":" + strconv.Itoa(int(port.Port)),
Weight: 1,
endpoints, exists, err := k8sClient.GetEndpoints(service.ObjectMeta.Namespace, service.ObjectMeta.Name)
if err != nil {
log.Errorf("Error retrieving endpoints %s/%s: %v", service.ObjectMeta.Namespace, service.ObjectMeta.Name, err)
return nil, err
}
} else {
for _, subset := range endpoints.Subsets {
for _, address := range subset.Addresses {
url := protocol + "://" + address.IP + ":" + strconv.Itoa(endpointPortNumber(port, subset.Ports))
name := url
if address.TargetRef != nil && address.TargetRef.Name != "" {
name = address.TargetRef.Name
}
templateObjects.Backends[r.Host+pa.Path].Servers[name] = types.Server{
URL: url,
Weight: 1,
if !exists {
log.Errorf("Endpoints not found for %s/%s", service.ObjectMeta.Namespace, service.ObjectMeta.Name)
continue
}
if len(endpoints.Subsets) == 0 {
log.Warnf("Service endpoints not found for %s/%s, falling back to Service ClusterIP", service.ObjectMeta.Namespace, service.ObjectMeta.Name)
templateObjects.Backends[r.Host+pa.Path].Servers[string(service.UID)] = types.Server{
URL: protocol + "://" + service.Spec.ClusterIP + ":" + strconv.Itoa(int(port.Port)),
Weight: 1,
}
} else {
for _, subset := range endpoints.Subsets {
for _, address := range subset.Addresses {
url := protocol + "://" + address.IP + ":" + strconv.Itoa(endpointPortNumber(port, subset.Ports))
name := url
if address.TargetRef != nil && address.TargetRef.Name != "" {
name = address.TargetRef.Name
}
templateObjects.Backends[r.Host+pa.Path].Servers[name] = types.Server{
URL: url,
Weight: 1,
}
}
}
}
}
}
break
}
}

View file

@ -341,8 +341,8 @@ func TestRuleType(t *testing.T) {
desc: "implicit default",
ingressRuleType: "",
frontendRuleType: ruleTypePathPrefix,
},
{
},
{
desc: "unknown ingress / explicit default",
ingressRuleType: "unknown",
frontendRuleType: ruleTypePathPrefix,
@ -351,7 +351,7 @@ func TestRuleType(t *testing.T) {
desc: "explicit ingress",
ingressRuleType: ruleTypePath,
frontendRuleType: ruleTypePath,
},
},
}
for _, test := range tests {
@ -359,27 +359,27 @@ func TestRuleType(t *testing.T) {
t.Run(test.desc, func(t *testing.T) {
t.Parallel()
ingress := &v1beta1.Ingress{
Spec: v1beta1.IngressSpec{
Rules: []v1beta1.IngressRule{
{
Spec: v1beta1.IngressSpec{
Rules: []v1beta1.IngressRule{
{
Host: "host",
IngressRuleValue: v1beta1.IngressRuleValue{
HTTP: &v1beta1.HTTPIngressRuleValue{
Paths: []v1beta1.HTTPIngressPath{
{
IngressRuleValue: v1beta1.IngressRuleValue{
HTTP: &v1beta1.HTTPIngressRuleValue{
Paths: []v1beta1.HTTPIngressPath{
{
Path: "/path",
Backend: v1beta1.IngressBackend{
Backend: v1beta1.IngressBackend{
ServiceName: "service",
ServicePort: intstr.FromInt(80),
},
},
},
},
},
},
},
},
},
},
}
}
if test.ingressRuleType != "" {
ingress.ObjectMeta.Annotations = map[string]string{
@ -388,54 +388,54 @@ func TestRuleType(t *testing.T) {
}
service := &v1.Service{
ObjectMeta: v1.ObjectMeta{
ObjectMeta: v1.ObjectMeta{
Name: "service",
UID: "1",
},
Spec: v1.ServiceSpec{
ClusterIP: "10.0.0.1",
Ports: []v1.ServicePort{
{
Name: "http",
Port: 801,
UID: "1",
},
Spec: v1.ServiceSpec{
ClusterIP: "10.0.0.1",
Ports: []v1.ServicePort{
{
Name: "http",
Port: 801,
},
},
},
},
}
}
watchChan := make(chan interface{})
client := clientMock{
watchChan := make(chan interface{})
client := clientMock{
ingresses: []*v1beta1.Ingress{ingress},
services: []*v1.Service{service},
watchChan: watchChan,
}
provider := Kubernetes{DisablePassHostHeaders: true}
actualConfig, err := provider.loadIngresses(client)
if err != nil {
watchChan: watchChan,
}
provider := Kubernetes{DisablePassHostHeaders: true}
actualConfig, err := provider.loadIngresses(client)
if err != nil {
t.Fatalf("error loading ingresses: %+v", err)
}
}
actual := actualConfig.Frontends
expected := map[string]*types.Frontend{
expected := map[string]*types.Frontend{
"host/path": {
Backend: "host/path",
Priority: len("/path"),
Routes: map[string]types.Route{
Routes: map[string]types.Route{
"/path": {
Rule: fmt.Sprintf("%s:/path", test.frontendRuleType),
},
},
"host": {
Rule: "Host:host",
},
},
},
},
},
}
}
if !reflect.DeepEqual(expected, actual) {
expectedJSON, _ := json.Marshal(expected)
actualJSON, _ := json.Marshal(actual)
t.Fatalf("expected %+v, got %+v", string(expectedJSON), string(actualJSON))
}
actualJSON, _ := json.Marshal(actual)
t.Fatalf("expected %+v, got %+v", string(expectedJSON), string(actualJSON))
}
})
}
}

View file

@ -80,7 +80,7 @@ func TestConfigurationErrors(t *testing.T) {
},
nil,
},
expectedError: "Near line 1 (last key parsed 'Hello'): Expected key separator '=', but got '<' instead",
expectedError: "Near line 1 (last key parsed 'Hello'): expected key separator '=', but got '<' instead",
funcMap: template.FuncMap{
"Foo": func() string {
return "bar"

21
vendor/github.com/ArthurHlt/go-eureka-client/LICENSE generated vendored Normal file
View file

@ -0,0 +1,21 @@
MIT License
Copyright (c) 2017 Arthur Halet
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.

View file

@ -1,11 +1,12 @@
package eureka
import (
"encoding/xml"
"strings"
)
func (c *Client) GetApplications() (*Applications, error) {
response, err := c.Get("apps");
response, err := c.Get("apps")
if err != nil {
return nil, err
}
@ -17,7 +18,7 @@ func (c *Client) GetApplications() (*Applications, error) {
func (c *Client) GetApplication(appId string) (*Application, error) {
values := []string{"apps", appId}
path := strings.Join(values, "/")
response, err := c.Get(path);
response, err := c.Get(path)
if err != nil {
return nil, err
}
@ -25,14 +26,39 @@ func (c *Client) GetApplication(appId string) (*Application, error) {
err = xml.Unmarshal(response.Body, application)
return application, err
}
func (c *Client) GetInstance(appId, instanceId string) (*InstanceInfo, error) {
values := []string{"apps", appId, instanceId}
path := strings.Join(values, "/")
response, err := c.Get(path);
response, err := c.Get(path)
if err != nil {
return nil, err
}
var instance *InstanceInfo = new(InstanceInfo)
err = xml.Unmarshal(response.Body, instance)
return instance, err
}
}
func (c *Client) GetVIP(vipId string) (*Applications, error) {
values := []string{"vips", vipId}
path := strings.Join(values, "/")
response, err := c.Get(path)
if err != nil {
return nil, err
}
var applications *Applications = new(Applications)
err = xml.Unmarshal(response.Body, applications)
return applications, err
}
func (c *Client) GetSVIP(svipId string) (*Applications, error) {
values := []string{"svips", svipId}
path := strings.Join(values, "/")
response, err := c.Get(path)
if err != nil {
return nil, err
}
var applications *Applications = new(Applications)
err = xml.Unmarshal(response.Body, applications)
return applications, err
}

View file

@ -67,7 +67,7 @@ type InstanceInfo struct {
type DataCenterInfo struct {
Name string `xml:"name" json:"name"`
Class string `xml:"class,attr" json:"@class"`
Metadata DataCenterMetadata `xml:"metadata,omitempty" json:"metadata,omitempty"`
Metadata *DataCenterMetadata `xml:"metadata,omitempty" json:"metadata,omitempty"`
}
type DataCenterMetadata struct {
@ -106,6 +106,8 @@ func NewRawRequest(method, relativePath string, body []byte, cancel <-chan bool)
func NewInstanceInfo(hostName, app, ip string, port int, ttl uint, isSsl bool) *InstanceInfo {
dataCenterInfo := &DataCenterInfo{
Name: "MyOwn",
Class: "com.netflix.appinfo.InstanceInfo$DefaultDataCenterInfo",
Metadata: nil,
}
leaseInfo := &LeaseInfo{
EvictionDurationInSecs: ttl,

View file

@ -1,26 +1,26 @@
package gominlog
import (
"fmt"
"github.com/daviddengcn/go-colortext"
"io"
"log"
"os"
"regexp"
"fmt"
"runtime"
"github.com/fatih/color"
"regexp"
"strings"
"io"
)
type Level int
const (
Loff = Level(^uint(0) >> 1)
Lsevere = Level(1000)
Lerror = Level(900)
Loff = Level(^uint(0) >> 1)
Lsevere = Level(1000)
Lerror = Level(900)
Lwarning = Level(800)
Linfo = Level(700)
Ldebug = Level(600)
Lall = Level(-Loff - 1)
Linfo = Level(700)
Ldebug = Level(600)
Lall = Level(-Loff - 1)
)
type MinLog struct {
@ -54,6 +54,14 @@ func NewMinLog(appName string, level Level, withColor bool, flag int) *MinLog {
minLog.level = level
return minLog
}
func NewMinLogWithWriter(appName string, level Level, withColor bool, flag int, logWriter io.Writer) *MinLog {
minLog := &MinLog{}
minLog.log = log.New(logWriter, "", flag)
minLog.isColorized = withColor
minLog.packageName = appName
minLog.level = level
return minLog
}
func NewMinLogWithLogger(packageName string, level Level, withColor bool, logger *log.Logger) *MinLog {
minLog := &MinLog{}
minLog.log = logger
@ -89,10 +97,11 @@ func (this *MinLog) IsColorized() bool {
return this.isColorized
}
func (this *MinLog) GetLogger() *log.Logger {
return this.log
}
func (this *MinLog) logMessage(typeLog string, colorFg ct.Color, colorBg ct.Color, args ...interface{}) {
func (this *MinLog) logMessage(typeLog string, colorFg color.Attribute, colorBg color.Attribute, args ...interface{}) {
var text string
msg := ""
flags := this.log.Flags()
@ -100,7 +109,7 @@ func (this *MinLog) logMessage(typeLog string, colorFg ct.Color, colorBg ct.Colo
msg += this.trace()
this.log.SetFlags(flags - log.Lshortfile)
}
text, ok := args[0].(string)
text, ok := args[0].(string);
if !ok {
panic("Firt argument should be a string")
}
@ -113,51 +122,47 @@ func (this *MinLog) logMessage(typeLog string, colorFg ct.Color, colorBg ct.Colo
this.writeMsgInLogger(msg, colorFg, colorBg)
this.log.SetFlags(flags)
}
func (this *MinLog) writeMsgInLogger(msg string, colorFg ct.Color, colorBg ct.Color) {
if this.isColorized && colorFg > 0 {
ct.Foreground(colorFg, false)
}
if this.isColorized && colorBg > 0 {
ct.ChangeColor(colorFg, false, colorBg, false)
func (this *MinLog) writeMsgInLogger(msg string, colorFg color.Attribute, colorBg color.Attribute) {
if this.isColorized && int(colorBg) == 0 {
msg = color.New(colorFg).Sprint(msg)
} else if this.isColorized {
msg = color.New(colorFg, colorBg).Sprint(msg)
}
this.log.Print(msg)
if this.isColorized {
ct.ResetColor()
}
}
func (this *MinLog) Error(args ...interface{}) {
if this.level > Lerror {
return
}
this.logMessage("ERROR", ct.Red, 0, args...)
this.logMessage("ERROR", color.FgRed, 0, args...)
}
func (this *MinLog) Severe(args ...interface{}) {
if this.level > Lsevere {
return
}
this.logMessage("SEVERE", ct.Red, ct.Yellow, args...)
this.logMessage("SEVERE", color.FgRed, color.BgYellow, args...)
}
func (this *MinLog) Debug(args ...interface{}) {
if this.level > Ldebug {
return
}
this.logMessage("DEBUG", ct.Blue, 0, args...)
this.logMessage("DEBUG", color.FgBlue, 0, args...)
}
func (this *MinLog) Info(args ...interface{}) {
if this.level > Linfo {
return
}
this.logMessage("INFO", ct.Cyan, 0, args...)
this.logMessage("INFO", color.FgCyan, 0, args...)
}
func (this *MinLog) Warning(args ...interface{}) {
if this.level > Lwarning {
return
}
this.logMessage("WARNING", ct.Yellow, 0, args...)
this.logMessage("WARNING", color.FgYellow, 0, args...)
}
func (this *MinLog) trace() string {
var shortFile string
@ -167,7 +172,7 @@ func (this *MinLog) trace() string {
file, line := f.FileLine(pc[2])
if this.packageName == "" {
execFileSplit := strings.Split(os.Args[0], "/")
this.packageName = execFileSplit[len(execFileSplit)-1]
this.packageName = execFileSplit[len(execFileSplit) - 1]
}
regex, err := regexp.Compile(regexp.QuoteMeta(this.packageName) + "/(.*)")
if err != nil {
@ -176,10 +181,10 @@ func (this *MinLog) trace() string {
subMatch := regex.FindStringSubmatch(file)
if len(subMatch) < 2 {
fileSplit := strings.Split(file, "/")
shortFile = fileSplit[len(fileSplit)-1]
shortFile = fileSplit[len(fileSplit) - 1]
} else {
shortFile = subMatch[1]
}
return fmt.Sprintf("/%s/%s:%d ", this.packageName, shortFile, line)
}
}

View file

@ -4,7 +4,7 @@ files via reflection. There is also support for delaying decoding with
the Primitive type, and querying the set of keys in a TOML document with the
MetaData type.
The specification implemented: https://github.com/mojombo/toml
The specification implemented: https://github.com/toml-lang/toml
The sub-command github.com/BurntSushi/toml/cmd/tomlv can be used to verify
whether a file is a valid TOML document. It can also be used to print the

View file

@ -241,7 +241,7 @@ func (enc *Encoder) eArrayOfTables(key Key, rv reflect.Value) {
func (enc *Encoder) eTable(key Key, rv reflect.Value) {
panicIfInvalidKey(key)
if len(key) == 1 {
// Output an extra new line between top-level tables.
// Output an extra newline between top-level tables.
// (The newline isn't written if nothing else has been written though.)
enc.newline()
}

View file

@ -30,24 +30,28 @@ const (
itemArrayTableEnd
itemKeyStart
itemCommentStart
itemInlineTableStart
itemInlineTableEnd
)
const (
eof = 0
tableStart = '['
tableEnd = ']'
arrayTableStart = '['
arrayTableEnd = ']'
tableSep = '.'
keySep = '='
arrayStart = '['
arrayEnd = ']'
arrayValTerm = ','
commentStart = '#'
stringStart = '"'
stringEnd = '"'
rawStringStart = '\''
rawStringEnd = '\''
eof = 0
comma = ','
tableStart = '['
tableEnd = ']'
arrayTableStart = '['
arrayTableEnd = ']'
tableSep = '.'
keySep = '='
arrayStart = '['
arrayEnd = ']'
commentStart = '#'
stringStart = '"'
stringEnd = '"'
rawStringStart = '\''
rawStringEnd = '\''
inlineTableStart = '{'
inlineTableEnd = '}'
)
type stateFn func(lx *lexer) stateFn
@ -56,11 +60,18 @@ type lexer struct {
input string
start int
pos int
width int
line int
state stateFn
items chan item
// Allow for backing up up to three runes.
// This is necessary because TOML contains 3-rune tokens (""" and ''').
prevWidths [3]int
nprev int // how many of prevWidths are in use
// If we emit an eof, we can still back up, but it is not OK to call
// next again.
atEOF bool
// A stack of state functions used to maintain context.
// The idea is to reuse parts of the state machine in various places.
// For example, values can appear at the top level or within arbitrarily
@ -88,7 +99,7 @@ func (lx *lexer) nextItem() item {
func lex(input string) *lexer {
lx := &lexer{
input: input + "\n",
input: input,
state: lexTop,
line: 1,
items: make(chan item, 10),
@ -103,7 +114,7 @@ func (lx *lexer) push(state stateFn) {
func (lx *lexer) pop() stateFn {
if len(lx.stack) == 0 {
return lx.errorf("BUG in lexer: no states to pop.")
return lx.errorf("BUG in lexer: no states to pop")
}
last := lx.stack[len(lx.stack)-1]
lx.stack = lx.stack[0 : len(lx.stack)-1]
@ -125,16 +136,25 @@ func (lx *lexer) emitTrim(typ itemType) {
}
func (lx *lexer) next() (r rune) {
if lx.atEOF {
panic("next called after EOF")
}
if lx.pos >= len(lx.input) {
lx.width = 0
lx.atEOF = true
return eof
}
if lx.input[lx.pos] == '\n' {
lx.line++
}
r, lx.width = utf8.DecodeRuneInString(lx.input[lx.pos:])
lx.pos += lx.width
lx.prevWidths[2] = lx.prevWidths[1]
lx.prevWidths[1] = lx.prevWidths[0]
if lx.nprev < 3 {
lx.nprev++
}
r, w := utf8.DecodeRuneInString(lx.input[lx.pos:])
lx.prevWidths[0] = w
lx.pos += w
return r
}
@ -143,9 +163,20 @@ func (lx *lexer) ignore() {
lx.start = lx.pos
}
// backup steps back one rune. Can be called only once per call of next.
// backup steps back one rune. Can be called only twice between calls to next.
func (lx *lexer) backup() {
lx.pos -= lx.width
if lx.atEOF {
lx.atEOF = false
return
}
if lx.nprev < 1 {
panic("backed up too far")
}
w := lx.prevWidths[0]
lx.prevWidths[0] = lx.prevWidths[1]
lx.prevWidths[1] = lx.prevWidths[2]
lx.nprev--
lx.pos -= w
if lx.pos < len(lx.input) && lx.input[lx.pos] == '\n' {
lx.line--
}
@ -182,7 +213,7 @@ func (lx *lexer) skip(pred func(rune) bool) {
// errorf stops all lexing by emitting an error and returning `nil`.
// Note that any value that is a character is escaped if it's a special
// character (new lines, tabs, etc.).
// character (newlines, tabs, etc.).
func (lx *lexer) errorf(format string, values ...interface{}) stateFn {
lx.items <- item{
itemError,
@ -198,7 +229,6 @@ func lexTop(lx *lexer) stateFn {
if isWhitespace(r) || isNL(r) {
return lexSkip(lx, lexTop)
}
switch r {
case commentStart:
lx.push(lexTop)
@ -207,7 +237,7 @@ func lexTop(lx *lexer) stateFn {
return lexTableStart
case eof:
if lx.pos > lx.start {
return lx.errorf("Unexpected EOF.")
return lx.errorf("unexpected EOF")
}
lx.emit(itemEOF)
return nil
@ -222,12 +252,12 @@ func lexTop(lx *lexer) stateFn {
// lexTopEnd is entered whenever a top-level item has been consumed. (A value
// or a table.) It must see only whitespace, and will turn back to lexTop
// upon a new line. If it sees EOF, it will quit the lexer successfully.
// upon a newline. If it sees EOF, it will quit the lexer successfully.
func lexTopEnd(lx *lexer) stateFn {
r := lx.next()
switch {
case r == commentStart:
// a comment will read to a new line for us.
// a comment will read to a newline for us.
lx.push(lexTop)
return lexCommentStart
case isWhitespace(r):
@ -236,11 +266,11 @@ func lexTopEnd(lx *lexer) stateFn {
lx.ignore()
return lexTop
case r == eof:
lx.ignore()
return lexTop
lx.emit(itemEOF)
return nil
}
return lx.errorf("Expected a top-level item to end with a new line, "+
"comment or EOF, but got %q instead.", r)
return lx.errorf("expected a top-level item to end with a newline, "+
"comment, or EOF, but got %q instead", r)
}
// lexTable lexes the beginning of a table. Namely, it makes sure that
@ -267,8 +297,8 @@ func lexTableEnd(lx *lexer) stateFn {
func lexArrayTableEnd(lx *lexer) stateFn {
if r := lx.next(); r != arrayTableEnd {
return lx.errorf("Expected end of table array name delimiter %q, "+
"but got %q instead.", arrayTableEnd, r)
return lx.errorf("expected end of table array name delimiter %q, "+
"but got %q instead", arrayTableEnd, r)
}
lx.emit(itemArrayTableEnd)
return lexTopEnd
@ -278,11 +308,11 @@ func lexTableNameStart(lx *lexer) stateFn {
lx.skip(isWhitespace)
switch r := lx.peek(); {
case r == tableEnd || r == eof:
return lx.errorf("Unexpected end of table name. (Table names cannot " +
"be empty.)")
return lx.errorf("unexpected end of table name " +
"(table names cannot be empty)")
case r == tableSep:
return lx.errorf("Unexpected table separator. (Table names cannot " +
"be empty.)")
return lx.errorf("unexpected table separator " +
"(table names cannot be empty)")
case r == stringStart || r == rawStringStart:
lx.ignore()
lx.push(lexTableNameEnd)
@ -317,8 +347,8 @@ func lexTableNameEnd(lx *lexer) stateFn {
case r == tableEnd:
return lx.pop()
default:
return lx.errorf("Expected '.' or ']' to end table name, but got %q "+
"instead.", r)
return lx.errorf("expected '.' or ']' to end table name, "+
"but got %q instead", r)
}
}
@ -328,7 +358,7 @@ func lexKeyStart(lx *lexer) stateFn {
r := lx.peek()
switch {
case r == keySep:
return lx.errorf("Unexpected key separator %q.", keySep)
return lx.errorf("unexpected key separator %q", keySep)
case isWhitespace(r) || isNL(r):
lx.next()
return lexSkip(lx, lexKeyStart)
@ -359,7 +389,7 @@ func lexBareKey(lx *lexer) stateFn {
lx.emit(itemText)
return lexKeyEnd
default:
return lx.errorf("Bare keys cannot contain %q.", r)
return lx.errorf("bare keys cannot contain %q", r)
}
}
@ -372,7 +402,7 @@ func lexKeyEnd(lx *lexer) stateFn {
case isWhitespace(r):
return lexSkip(lx, lexKeyEnd)
default:
return lx.errorf("Expected key separator %q, but got %q instead.",
return lx.errorf("expected key separator %q, but got %q instead",
keySep, r)
}
}
@ -381,9 +411,8 @@ func lexKeyEnd(lx *lexer) stateFn {
// lexValue will ignore whitespace.
// After a value is lexed, the last state on the next is popped and returned.
func lexValue(lx *lexer) stateFn {
// We allow whitespace to precede a value, but NOT new lines.
// In array syntax, the array states are responsible for ignoring new
// lines.
// We allow whitespace to precede a value, but NOT newlines.
// In array syntax, the array states are responsible for ignoring newlines.
r := lx.next()
switch {
case isWhitespace(r):
@ -397,6 +426,10 @@ func lexValue(lx *lexer) stateFn {
lx.ignore()
lx.emit(itemArray)
return lexArrayValue
case inlineTableStart:
lx.ignore()
lx.emit(itemInlineTableStart)
return lexInlineTableValue
case stringStart:
if lx.accept(stringStart) {
if lx.accept(stringStart) {
@ -420,7 +453,7 @@ func lexValue(lx *lexer) stateFn {
case '+', '-':
return lexNumberStart
case '.': // special error case, be kind to users
return lx.errorf("Floats must start with a digit, not '.'.")
return lx.errorf("floats must start with a digit, not '.'")
}
if unicode.IsLetter(r) {
// Be permissive here; lexBool will give a nice error if the
@ -430,11 +463,11 @@ func lexValue(lx *lexer) stateFn {
lx.backup()
return lexBool
}
return lx.errorf("Expected value but found %q instead.", r)
return lx.errorf("expected value but found %q instead", r)
}
// lexArrayValue consumes one value in an array. It assumes that '[' or ','
// have already been consumed. All whitespace and new lines are ignored.
// have already been consumed. All whitespace and newlines are ignored.
func lexArrayValue(lx *lexer) stateFn {
r := lx.next()
switch {
@ -443,10 +476,11 @@ func lexArrayValue(lx *lexer) stateFn {
case r == commentStart:
lx.push(lexArrayValue)
return lexCommentStart
case r == arrayValTerm:
return lx.errorf("Unexpected array value terminator %q.",
arrayValTerm)
case r == comma:
return lx.errorf("unexpected comma")
case r == arrayEnd:
// NOTE(caleb): The spec isn't clear about whether you can have
// a trailing comma or not, so we'll allow it.
return lexArrayEnd
}
@ -455,8 +489,9 @@ func lexArrayValue(lx *lexer) stateFn {
return lexValue
}
// lexArrayValueEnd consumes the cruft between values of an array. Namely,
// it ignores whitespace and expects either a ',' or a ']'.
// lexArrayValueEnd consumes everything between the end of an array value and
// the next value (or the end of the array): it ignores whitespace and newlines
// and expects either a ',' or a ']'.
func lexArrayValueEnd(lx *lexer) stateFn {
r := lx.next()
switch {
@ -465,31 +500,88 @@ func lexArrayValueEnd(lx *lexer) stateFn {
case r == commentStart:
lx.push(lexArrayValueEnd)
return lexCommentStart
case r == arrayValTerm:
case r == comma:
lx.ignore()
return lexArrayValue // move on to the next value
case r == arrayEnd:
return lexArrayEnd
}
return lx.errorf("Expected an array value terminator %q or an array "+
"terminator %q, but got %q instead.", arrayValTerm, arrayEnd, r)
return lx.errorf(
"expected a comma or array terminator %q, but got %q instead",
arrayEnd, r,
)
}
// lexArrayEnd finishes the lexing of an array. It assumes that a ']' has
// just been consumed.
// lexArrayEnd finishes the lexing of an array.
// It assumes that a ']' has just been consumed.
func lexArrayEnd(lx *lexer) stateFn {
lx.ignore()
lx.emit(itemArrayEnd)
return lx.pop()
}
// lexInlineTableValue consumes one key/value pair in an inline table.
// It assumes that '{' or ',' have already been consumed. Whitespace is ignored.
func lexInlineTableValue(lx *lexer) stateFn {
r := lx.next()
switch {
case isWhitespace(r):
return lexSkip(lx, lexInlineTableValue)
case isNL(r):
return lx.errorf("newlines not allowed within inline tables")
case r == commentStart:
lx.push(lexInlineTableValue)
return lexCommentStart
case r == comma:
return lx.errorf("unexpected comma")
case r == inlineTableEnd:
return lexInlineTableEnd
}
lx.backup()
lx.push(lexInlineTableValueEnd)
return lexKeyStart
}
// lexInlineTableValueEnd consumes everything between the end of an inline table
// key/value pair and the next pair (or the end of the table):
// it ignores whitespace and expects either a ',' or a '}'.
func lexInlineTableValueEnd(lx *lexer) stateFn {
r := lx.next()
switch {
case isWhitespace(r):
return lexSkip(lx, lexInlineTableValueEnd)
case isNL(r):
return lx.errorf("newlines not allowed within inline tables")
case r == commentStart:
lx.push(lexInlineTableValueEnd)
return lexCommentStart
case r == comma:
lx.ignore()
return lexInlineTableValue
case r == inlineTableEnd:
return lexInlineTableEnd
}
return lx.errorf("expected a comma or an inline table terminator %q, "+
"but got %q instead", inlineTableEnd, r)
}
// lexInlineTableEnd finishes the lexing of an inline table.
// It assumes that a '}' has just been consumed.
func lexInlineTableEnd(lx *lexer) stateFn {
lx.ignore()
lx.emit(itemInlineTableEnd)
return lx.pop()
}
// lexString consumes the inner contents of a string. It assumes that the
// beginning '"' has already been consumed and ignored.
func lexString(lx *lexer) stateFn {
r := lx.next()
switch {
case r == eof:
return lx.errorf("unexpected EOF")
case isNL(r):
return lx.errorf("Strings cannot contain new lines.")
return lx.errorf("strings cannot contain newlines")
case r == '\\':
lx.push(lexString)
return lexStringEscape
@ -506,11 +598,12 @@ func lexString(lx *lexer) stateFn {
// lexMultilineString consumes the inner contents of a string. It assumes that
// the beginning '"""' has already been consumed and ignored.
func lexMultilineString(lx *lexer) stateFn {
r := lx.next()
switch {
case r == '\\':
switch lx.next() {
case eof:
return lx.errorf("unexpected EOF")
case '\\':
return lexMultilineStringEscape
case r == stringEnd:
case stringEnd:
if lx.accept(stringEnd) {
if lx.accept(stringEnd) {
lx.backup()
@ -534,8 +627,10 @@ func lexMultilineString(lx *lexer) stateFn {
func lexRawString(lx *lexer) stateFn {
r := lx.next()
switch {
case r == eof:
return lx.errorf("unexpected EOF")
case isNL(r):
return lx.errorf("Strings cannot contain new lines.")
return lx.errorf("strings cannot contain newlines")
case r == rawStringEnd:
lx.backup()
lx.emit(itemRawString)
@ -547,12 +642,13 @@ func lexRawString(lx *lexer) stateFn {
}
// lexMultilineRawString consumes a raw string. Nothing can be escaped in such
// a string. It assumes that the beginning "'" has already been consumed and
// a string. It assumes that the beginning "'''" has already been consumed and
// ignored.
func lexMultilineRawString(lx *lexer) stateFn {
r := lx.next()
switch {
case r == rawStringEnd:
switch lx.next() {
case eof:
return lx.errorf("unexpected EOF")
case rawStringEnd:
if lx.accept(rawStringEnd) {
if lx.accept(rawStringEnd) {
lx.backup()
@ -605,10 +701,9 @@ func lexStringEscape(lx *lexer) stateFn {
case 'U':
return lexLongUnicodeEscape
}
return lx.errorf("Invalid escape character %q. Only the following "+
return lx.errorf("invalid escape character %q; only the following "+
"escape characters are allowed: "+
"\\b, \\t, \\n, \\f, \\r, \\\", \\/, \\\\, "+
"\\uXXXX and \\UXXXXXXXX.", r)
`\b, \t, \n, \f, \r, \", \\, \uXXXX, and \UXXXXXXXX`, r)
}
func lexShortUnicodeEscape(lx *lexer) stateFn {
@ -616,8 +711,8 @@ func lexShortUnicodeEscape(lx *lexer) stateFn {
for i := 0; i < 4; i++ {
r = lx.next()
if !isHexadecimal(r) {
return lx.errorf("Expected four hexadecimal digits after '\\u', "+
"but got '%s' instead.", lx.current())
return lx.errorf(`expected four hexadecimal digits after '\u', `+
"but got %q instead", lx.current())
}
}
return lx.pop()
@ -628,8 +723,8 @@ func lexLongUnicodeEscape(lx *lexer) stateFn {
for i := 0; i < 8; i++ {
r = lx.next()
if !isHexadecimal(r) {
return lx.errorf("Expected eight hexadecimal digits after '\\U', "+
"but got '%s' instead.", lx.current())
return lx.errorf(`expected eight hexadecimal digits after '\U', `+
"but got %q instead", lx.current())
}
}
return lx.pop()
@ -647,9 +742,9 @@ func lexNumberOrDateStart(lx *lexer) stateFn {
case 'e', 'E':
return lexFloat
case '.':
return lx.errorf("Floats must start with a digit, not '.'.")
return lx.errorf("floats must start with a digit, not '.'")
}
return lx.errorf("Expected a digit but got %q.", r)
return lx.errorf("expected a digit but got %q", r)
}
// lexNumberOrDate consumes either an integer, float or datetime.
@ -697,9 +792,9 @@ func lexNumberStart(lx *lexer) stateFn {
r := lx.next()
if !isDigit(r) {
if r == '.' {
return lx.errorf("Floats must start with a digit, not '.'.")
return lx.errorf("floats must start with a digit, not '.'")
}
return lx.errorf("Expected a digit but got %q.", r)
return lx.errorf("expected a digit but got %q", r)
}
return lexNumber
}
@ -745,7 +840,7 @@ func lexBool(lx *lexer) stateFn {
var rs []rune
for {
r := lx.next()
if r == eof || isWhitespace(r) || isNL(r) {
if !unicode.IsLetter(r) {
lx.backup()
break
}
@ -757,7 +852,7 @@ func lexBool(lx *lexer) stateFn {
lx.emit(itemBool)
return lx.pop()
}
return lx.errorf("Expected value but found %q instead.", s)
return lx.errorf("expected value but found %q instead", s)
}
// lexCommentStart begins the lexing of a comment. It will emit
@ -769,7 +864,7 @@ func lexCommentStart(lx *lexer) stateFn {
}
// lexComment lexes an entire comment. It assumes that '#' has been consumed.
// It will consume *up to* the first new line character, and pass control
// It will consume *up to* the first newline character, and pass control
// back to the last state on the stack.
func lexComment(lx *lexer) stateFn {
r := lx.peek()

View file

@ -269,6 +269,41 @@ func (p *parser) value(it item) (interface{}, tomlType) {
types = append(types, typ)
}
return array, p.typeOfArray(types)
case itemInlineTableStart:
var (
hash = make(map[string]interface{})
outerContext = p.context
outerKey = p.currentKey
)
p.context = append(p.context, p.currentKey)
p.currentKey = ""
for it := p.next(); it.typ != itemInlineTableEnd; it = p.next() {
if it.typ != itemKeyStart {
p.bug("Expected key start but instead found %q, around line %d",
it.val, p.approxLine)
}
if it.typ == itemCommentStart {
p.expect(itemText)
continue
}
// retrieve key
k := p.next()
p.approxLine = k.line
kname := p.keyString(k)
// retrieve value
p.currentKey = kname
val, typ := p.value(p.next())
// make sure we keep metadata up to date
p.setType(kname, typ)
p.ordered = append(p.ordered, p.context.add(p.currentKey))
hash[kname] = val
}
p.context = outerContext
p.currentKey = outerKey
return hash, tomlHash
}
p.bug("Unexpected value type: %s", it.typ)
panic("unreachable")

38
vendor/github.com/JamesClonk/vultr/lib/applications.go generated vendored Normal file
View file

@ -0,0 +1,38 @@
package lib
import (
"sort"
"strings"
)
// Application on Vultr
type Application struct {
ID string `json:"APPID"`
Name string `json:"name"`
ShortName string `json:"short_name"`
DeployName string `json:"deploy_name"`
Surcharge float64 `json:"surcharge"`
}
type applications []Application
func (s applications) Len() int { return len(s) }
func (s applications) Swap(i, j int) { s[i], s[j] = s[j], s[i] }
func (s applications) Less(i, j int) bool {
return strings.ToLower(s[i].Name) < strings.ToLower(s[j].Name)
}
// GetApplications returns a list of all available applications on Vultr
func (c *Client) GetApplications() ([]Application, error) {
var appMap map[string]Application
if err := c.get(`app/list`, &appMap); err != nil {
return nil, err
}
var appList []Application
for _, app := range appMap {
appList = append(appList, app)
}
sort.Sort(applications(appList))
return appList, nil
}

View file

@ -4,7 +4,9 @@ import (
"encoding/json"
"fmt"
"net/url"
"sort"
"strconv"
"strings"
)
// BlockStorage on Vultr account
@ -19,6 +21,25 @@ type BlockStorage struct {
AttachedTo string `json:"attached_to_SUBID"`
}
type blockstorages []BlockStorage
func (b blockstorages) Len() int { return len(b) }
func (b blockstorages) Swap(i, j int) { b[i], b[j] = b[j], b[i] }
func (b blockstorages) Less(i, j int) bool {
// sort order: name, size, status
if strings.ToLower(b[i].Name) < strings.ToLower(b[j].Name) {
return true
} else if strings.ToLower(b[i].Name) > strings.ToLower(b[j].Name) {
return false
}
if b[i].SizeGB < b[j].SizeGB {
return true
} else if b[i].SizeGB > b[j].SizeGB {
return false
}
return b[i].Status < b[j].Status
}
// UnmarshalJSON implements json.Unmarshaller on BlockStorage.
// This is needed because the Vultr API is inconsistent in it's JSON responses.
// Some fields can change type, from JSON number to JSON string and vice-versa.
@ -87,6 +108,7 @@ func (c *Client) GetBlockStorages() (storages []BlockStorage, err error) {
if err := c.get(`block/list`, &storages); err != nil {
return nil, err
}
sort.Sort(blockstorages(storages))
return storages, nil
}

View file

@ -18,7 +18,7 @@ import (
const (
// Version of this libary
Version = "1.12.0"
Version = "1.13.0"
// APIVersion of Vultr
APIVersion = "v1"

View file

@ -3,6 +3,8 @@ package lib
import (
"fmt"
"net/url"
"sort"
"strings"
)
// DNSDomain represents a DNS domain on Vultr
@ -11,6 +13,14 @@ type DNSDomain struct {
Created string `json:"date_created"`
}
type dnsdomains []DNSDomain
func (d dnsdomains) Len() int { return len(d) }
func (d dnsdomains) Swap(i, j int) { d[i], d[j] = d[j], d[i] }
func (d dnsdomains) Less(i, j int) bool {
return strings.ToLower(d[i].Domain) < strings.ToLower(d[j].Domain)
}
// DNSRecord represents a DNS record on Vultr
type DNSRecord struct {
RecordID int `json:"RECORDID"`
@ -21,20 +31,41 @@ type DNSRecord struct {
TTL int `json:"ttl"`
}
type dnsrecords []DNSRecord
func (d dnsrecords) Len() int { return len(d) }
func (d dnsrecords) Swap(i, j int) { d[i], d[j] = d[j], d[i] }
func (d dnsrecords) Less(i, j int) bool {
// sort order: type, data, name
if d[i].Type < d[j].Type {
return true
} else if d[i].Type > d[j].Type {
return false
}
if d[i].Data < d[j].Data {
return true
} else if d[i].Data > d[j].Data {
return false
}
return strings.ToLower(d[i].Name) < strings.ToLower(d[j].Name)
}
// GetDNSDomains returns a list of available domains on Vultr account
func (c *Client) GetDNSDomains() (dnsdomains []DNSDomain, err error) {
if err := c.get(`dns/list`, &dnsdomains); err != nil {
func (c *Client) GetDNSDomains() (domains []DNSDomain, err error) {
if err := c.get(`dns/list`, &domains); err != nil {
return nil, err
}
return dnsdomains, nil
sort.Sort(dnsdomains(domains))
return domains, nil
}
// GetDNSRecords returns a list of all DNS records of a particular domain
func (c *Client) GetDNSRecords(domain string) (dnsrecords []DNSRecord, err error) {
if err := c.get(`dns/records?domain=`+domain, &dnsrecords); err != nil {
func (c *Client) GetDNSRecords(domain string) (records []DNSRecord, err error) {
if err := c.get(`dns/records?domain=`+domain, &records); err != nil {
return nil, err
}
return dnsrecords, nil
sort.Sort(dnsrecords(records))
return records, nil
}
// CreateDNSDomain creates a new DNS domain name on Vultr

View file

@ -1,6 +1,9 @@
package lib
import "net/url"
import (
"net/url"
"sort"
)
// IPv4 information of a virtual machine
type IPv4 struct {
@ -11,6 +14,20 @@ type IPv4 struct {
ReverseDNS string `json:"reverse"`
}
type ipv4s []IPv4
func (s ipv4s) Len() int { return len(s) }
func (s ipv4s) Swap(i, j int) { s[i], s[j] = s[j], s[i] }
func (s ipv4s) Less(i, j int) bool {
// sort order: type, ip
if s[i].Type < s[j].Type {
return true
} else if s[i].Type > s[j].Type {
return false
}
return s[i].IP < s[j].IP
}
// IPv6 information of a virtual machine
type IPv6 struct {
IP string `json:"ip"`
@ -19,12 +36,32 @@ type IPv6 struct {
Type string `json:"type"`
}
type ipv6s []IPv6
func (s ipv6s) Len() int { return len(s) }
func (s ipv6s) Swap(i, j int) { s[i], s[j] = s[j], s[i] }
func (s ipv6s) Less(i, j int) bool {
// sort order: type, ip
if s[i].Type < s[j].Type {
return true
} else if s[i].Type > s[j].Type {
return false
}
return s[i].IP < s[j].IP
}
// ReverseDNSIPv6 information of a virtual machine
type ReverseDNSIPv6 struct {
IP string `json:"ip"`
ReverseDNS string `json:"reverse"`
}
type reverseDNSIPv6s []ReverseDNSIPv6
func (s reverseDNSIPv6s) Len() int { return len(s) }
func (s reverseDNSIPv6s) Swap(i, j int) { s[i], s[j] = s[j], s[i] }
func (s reverseDNSIPv6s) Less(i, j int) bool { return s[i].IP < s[j].IP }
// ListIPv4 lists the IPv4 information of a virtual machine
func (c *Client) ListIPv4(id string) (list []IPv4, err error) {
var ipMap map[string][]IPv4
@ -37,6 +74,7 @@ func (c *Client) ListIPv4(id string) (list []IPv4, err error) {
list = append(list, ip)
}
}
sort.Sort(ipv4s(list))
return list, nil
}
@ -52,6 +90,7 @@ func (c *Client) ListIPv6(id string) (list []IPv6, err error) {
list = append(list, ip)
}
}
sort.Sort(ipv6s(list))
return list, nil
}
@ -67,6 +106,7 @@ func (c *Client) ListIPv6ReverseDNS(id string) (list []ReverseDNSIPv6, err error
list = append(list, ip)
}
}
sort.Sort(reverseDNSIPv6s(list))
return list, nil
}

View file

@ -1,5 +1,10 @@
package lib
import (
"sort"
"strings"
)
// ISO image on Vultr
type ISO struct {
ID int `json:"ISOID"`
@ -9,6 +14,20 @@ type ISO struct {
MD5sum string `json:"md5sum"`
}
type isos []ISO
func (s isos) Len() int { return len(s) }
func (s isos) Swap(i, j int) { s[i], s[j] = s[j], s[i] }
func (s isos) Less(i, j int) bool {
// sort order: filename, created
if strings.ToLower(s[i].Filename) < strings.ToLower(s[j].Filename) {
return true
} else if strings.ToLower(s[i].Filename) > strings.ToLower(s[j].Filename) {
return false
}
return s[i].Created < s[j].Created
}
// GetISO returns a list of all ISO images on Vultr account
func (c *Client) GetISO() ([]ISO, error) {
var isoMap map[string]ISO
@ -20,5 +39,6 @@ func (c *Client) GetISO() ([]ISO, error) {
for _, iso := range isoMap {
isoList = append(isoList, iso)
}
sort.Sort(isos(isoList))
return isoList, nil
}

View file

@ -1,5 +1,10 @@
package lib
import (
"sort"
"strings"
)
// OS image on Vultr
type OS struct {
ID int `json:"OSID"`
@ -10,6 +15,12 @@ type OS struct {
Surcharge string `json:"surcharge"`
}
type oses []OS
func (s oses) Len() int { return len(s) }
func (s oses) Swap(i, j int) { s[i], s[j] = s[j], s[i] }
func (s oses) Less(i, j int) bool { return strings.ToLower(s[i].Name) < strings.ToLower(s[j].Name) }
// GetOS returns a list of all available operating systems on Vultr
func (c *Client) GetOS() ([]OS, error) {
var osMap map[string]OS
@ -21,5 +32,6 @@ func (c *Client) GetOS() ([]OS, error) {
for _, os := range osMap {
osList = append(osList, os)
}
sort.Sort(oses(osList))
return osList, nil
}

View file

@ -1,6 +1,11 @@
package lib
import "fmt"
import (
"fmt"
"sort"
"strconv"
"strings"
)
// Plan on Vultr
type Plan struct {
@ -14,6 +19,40 @@ type Plan struct {
Regions []int `json:"available_locations"`
}
type plans []Plan
func (p plans) Len() int { return len(p) }
func (p plans) Swap(i, j int) { p[i], p[j] = p[j], p[i] }
func (p plans) Less(i, j int) bool {
pa, _ := strconv.ParseFloat(strings.TrimSpace(p[i].Price), 64)
pb, _ := strconv.ParseFloat(strings.TrimSpace(p[j].Price), 64)
ra, _ := strconv.ParseInt(strings.TrimSpace(p[i].RAM), 10, 64)
rb, _ := strconv.ParseInt(strings.TrimSpace(p[j].RAM), 10, 64)
da, _ := strconv.ParseInt(strings.TrimSpace(p[i].Disk), 10, 64)
db, _ := strconv.ParseInt(strings.TrimSpace(p[j].Disk), 10, 64)
// sort order: price, vcpu, ram, disk
if pa < pb {
return true
} else if pa > pb {
return false
}
if p[i].VCpus < p[j].VCpus {
return true
} else if p[i].VCpus > p[j].VCpus {
return false
}
if ra < rb {
return true
} else if ra > rb {
return false
}
return da < db
}
// GetPlans returns a list of all available plans on Vultr account
func (c *Client) GetPlans() ([]Plan, error) {
var planMap map[string]Plan
@ -21,11 +60,13 @@ func (c *Client) GetPlans() ([]Plan, error) {
return nil, err
}
var planList []Plan
var p plans
for _, plan := range planMap {
planList = append(planList, plan)
p = append(p, plan)
}
return planList, nil
sort.Sort(plans(p))
return p, nil
}
// GetAvailablePlansForRegion returns available plans for specified region

View file

@ -1,5 +1,7 @@
package lib
import "sort"
// Region on Vultr
type Region struct {
ID int `json:"DCID,string"`
@ -12,6 +14,20 @@ type Region struct {
Code string `json:"regioncode"`
}
type regions []Region
func (s regions) Len() int { return len(s) }
func (s regions) Swap(i, j int) { s[i], s[j] = s[j], s[i] }
func (s regions) Less(i, j int) bool {
// sort order: continent, name
if s[i].Continent < s[j].Continent {
return true
} else if s[i].Continent > s[j].Continent {
return false
}
return s[i].Name < s[j].Name
}
// GetRegions returns a list of all available Vultr regions
func (c *Client) GetRegions() ([]Region, error) {
var regionMap map[string]Region
@ -23,5 +39,6 @@ func (c *Client) GetRegions() ([]Region, error) {
for _, os := range regionMap {
regionList = append(regionList, os)
}
sort.Sort(regions(regionList))
return regionList, nil
}

View file

@ -4,7 +4,9 @@ import (
"encoding/json"
"fmt"
"net/url"
"sort"
"strconv"
"strings"
)
// IP on Vultr
@ -18,6 +20,25 @@ type IP struct {
AttachedTo string `json:"attached_SUBID,string"`
}
type ips []IP
func (s ips) Len() int { return len(s) }
func (s ips) Swap(i, j int) { s[i], s[j] = s[j], s[i] }
func (s ips) Less(i, j int) bool {
// sort order: label, iptype, subnet
if strings.ToLower(s[i].Label) < strings.ToLower(s[j].Label) {
return true
} else if strings.ToLower(s[i].Label) > strings.ToLower(s[j].Label) {
return false
}
if s[i].IPType < s[j].IPType {
return true
} else if s[i].IPType > s[j].IPType {
return false
}
return s[i].Subnet < s[j].Subnet
}
// UnmarshalJSON implements json.Unmarshaller on IP.
// This is needed because the Vultr API is inconsistent in it's JSON responses.
// Some fields can change type, from JSON number to JSON string and vice-versa.
@ -89,11 +110,12 @@ func (c *Client) ListReservedIP() ([]IP, error) {
return nil, err
}
ips := make([]IP, 0)
ipList := make([]IP, 0)
for _, ip := range ipMap {
ips = append(ips, ip)
ipList = append(ipList, ip)
}
return ips, nil
sort.Sort(ips(ipList))
return ipList, nil
}
// GetReservedIP returns reserved IP with given ID

View file

@ -4,6 +4,8 @@ import (
"encoding/json"
"fmt"
"net/url"
"sort"
"strings"
)
// StartupScript on Vultr account
@ -14,6 +16,14 @@ type StartupScript struct {
Content string `json:"script"`
}
type startupscripts []StartupScript
func (s startupscripts) Len() int { return len(s) }
func (s startupscripts) Swap(i, j int) { s[i], s[j] = s[j], s[i] }
func (s startupscripts) Less(i, j int) bool {
return strings.ToLower(s[i].Name) < strings.ToLower(s[j].Name)
}
// UnmarshalJSON implements json.Unmarshaller on StartupScript.
// Necessary because the SCRIPTID field has inconsistent types.
func (s *StartupScript) UnmarshalJSON(data []byte) (err error) {
@ -47,6 +57,7 @@ func (c *Client) GetStartupScripts() (scripts []StartupScript, err error) {
}
scripts = append(scripts, script)
}
sort.Sort(startupscripts(scripts))
return scripts, nil
}

View file

@ -5,7 +5,9 @@ import (
"encoding/json"
"fmt"
"net/url"
"sort"
"strconv"
"strings"
)
// Server (virtual machine) on Vultr account
@ -36,6 +38,8 @@ type Server struct {
KVMUrl string `json:"kvm_url"`
AutoBackups string `json:"auto_backups"`
Tag string `json:"tag"`
OSID string `json:"OSID"`
AppID string `json:"APPID"`
}
// ServerOptions are optional parameters to be used during server creation
@ -52,6 +56,21 @@ type ServerOptions struct {
DontNotifyOnActivate bool
Hostname string
Tag string
AppID string
}
type servers []Server
func (s servers) Len() int { return len(s) }
func (s servers) Swap(i, j int) { s[i], s[j] = s[j], s[i] }
func (s servers) Less(i, j int) bool {
// sort order: name, ip
if strings.ToLower(s[i].Name) < strings.ToLower(s[j].Name) {
return true
} else if strings.ToLower(s[i].Name) > strings.ToLower(s[j].Name) {
return false
}
return s[i].MainIP < s[j].MainIP
}
// V6Network represents a IPv6 network of a Vultr server
@ -140,6 +159,18 @@ func (s *Server) UnmarshalJSON(data []byte) (err error) {
}
s.AllowedBandwidth = ab
value = fmt.Sprintf("%v", fields["OSID"])
if value == "<nil>" {
value = ""
}
s.OSID = value
value = fmt.Sprintf("%v", fields["APPID"])
if value == "<nil>" {
value = ""
}
s.AppID = value
s.ID = fmt.Sprintf("%v", fields["SUBID"])
s.Name = fmt.Sprintf("%v", fields["label"])
s.OS = fmt.Sprintf("%v", fields["os"])
@ -180,29 +211,31 @@ func (s *Server) UnmarshalJSON(data []byte) (err error) {
}
// GetServers returns a list of current virtual machines on Vultr account
func (c *Client) GetServers() (servers []Server, err error) {
func (c *Client) GetServers() (serverList []Server, err error) {
var serverMap map[string]Server
if err := c.get(`server/list`, &serverMap); err != nil {
return nil, err
}
for _, server := range serverMap {
servers = append(servers, server)
serverList = append(serverList, server)
}
return servers, nil
sort.Sort(servers(serverList))
return serverList, nil
}
// GetServersByTag returns a list of all virtual machines matching by tag
func (c *Client) GetServersByTag(tag string) (servers []Server, err error) {
func (c *Client) GetServersByTag(tag string) (serverList []Server, err error) {
var serverMap map[string]Server
if err := c.get(`server/list?tag=`+tag, &serverMap); err != nil {
return nil, err
}
for _, server := range serverMap {
servers = append(servers, server)
serverList = append(serverList, server)
}
return servers, nil
sort.Sort(servers(serverList))
return serverList, nil
}
// GetServer returns the virtual machine with the given ID
@ -274,6 +307,10 @@ func (c *Client) CreateServer(name string, regionID, planID, osID int, options *
if options.Tag != "" {
values.Add("tag", options.Tag)
}
if options.AppID != "" {
values.Add("APPID", options.AppID)
}
}
var server Server
@ -371,6 +408,7 @@ func (c *Client) ListOSforServer(id string) (os []OS, err error) {
for _, o := range osMap {
os = append(os, o)
}
sort.Sort(oses(os))
return os, nil
}
@ -446,3 +484,30 @@ func (c *Client) BandwidthOfServer(id string) (bandwidth []map[string]string, er
return bandwidth, nil
}
// ChangeApplicationofServer changes the virtual machine to a different application
func (c *Client) ChangeApplicationofServer(id string, appID string) error {
values := url.Values{
"SUBID": {id},
"APPID": {appID},
}
if err := c.post(`server/app_change`, values, nil); err != nil {
return err
}
return nil
}
// ListApplicationsforServer lists all available operating systems to which an existing virtual machine can be changed
func (c *Client) ListApplicationsforServer(id string) (apps []Application, err error) {
var appMap map[string]Application
if err := c.get(`server/app_change_list?SUBID=`+id, &appMap); err != nil {
return nil, err
}
for _, app := range appMap {
apps = append(apps, app)
}
sort.Sort(applications(apps))
return apps, nil
}

View file

@ -1,6 +1,10 @@
package lib
import "net/url"
import (
"net/url"
"sort"
"strings"
)
// Snapshot of a virtual machine on Vultr account
type Snapshot struct {
@ -11,17 +15,32 @@ type Snapshot struct {
Created string `json:"date_created"`
}
type snapshots []Snapshot
func (s snapshots) Len() int { return len(s) }
func (s snapshots) Swap(i, j int) { s[i], s[j] = s[j], s[i] }
func (s snapshots) Less(i, j int) bool {
// sort order: description, created
if strings.ToLower(s[i].Description) < strings.ToLower(s[j].Description) {
return true
} else if strings.ToLower(s[i].Description) > strings.ToLower(s[j].Description) {
return false
}
return s[i].Created < s[j].Created
}
// GetSnapshots retrieves a list of all snapshots on Vultr account
func (c *Client) GetSnapshots() (snapshots []Snapshot, err error) {
func (c *Client) GetSnapshots() (snapshotList []Snapshot, err error) {
var snapshotMap map[string]Snapshot
if err := c.get(`snapshot/list`, &snapshotMap); err != nil {
return nil, err
}
for _, snapshot := range snapshotMap {
snapshots = append(snapshots, snapshot)
snapshotList = append(snapshotList, snapshot)
}
return snapshots, nil
sort.Sort(snapshots(snapshotList))
return snapshotList, nil
}
// CreateSnapshot creates a new virtual machine snapshot

View file

@ -1,6 +1,10 @@
package lib
import "net/url"
import (
"net/url"
"sort"
"strings"
)
// SSHKey on Vultr account
type SSHKey struct {
@ -10,6 +14,12 @@ type SSHKey struct {
Created string `json:"date_created"`
}
type sshkeys []SSHKey
func (s sshkeys) Len() int { return len(s) }
func (s sshkeys) Swap(i, j int) { s[i], s[j] = s[j], s[i] }
func (s sshkeys) Less(i, j int) bool { return strings.ToLower(s[i].Name) < strings.ToLower(s[j].Name) }
// GetSSHKeys returns a list of SSHKeys from Vultr account
func (c *Client) GetSSHKeys() (keys []SSHKey, err error) {
var keyMap map[string]SSHKey
@ -20,6 +30,7 @@ func (c *Client) GetSSHKeys() (keys []SSHKey, err error) {
for _, key := range keyMap {
keys = append(keys, key)
}
sort.Sort(sshkeys(keys))
return keys, nil
}

View file

@ -1,3 +1,5 @@
// +build windows
package winio
import (

View file

@ -1,3 +1,5 @@
// +build windows
package winio
import (

View file

@ -1,3 +1,5 @@
// +build windows
package winio
import (

View file

@ -1,3 +1,5 @@
// +build windows
package winio
import (

View file

@ -1,3 +1,5 @@
// +build windows
package winio
import (
@ -83,7 +85,7 @@ func RunWithPrivileges(names []string, fn func() error) error {
return err
}
defer releaseThreadToken(token)
err = adjustPrivileges(token, privileges)
err = adjustPrivileges(token, privileges, SE_PRIVILEGE_ENABLED)
if err != nil {
return err
}
@ -110,6 +112,15 @@ func mapPrivileges(names []string) ([]uint64, error) {
// EnableProcessPrivileges enables privileges globally for the process.
func EnableProcessPrivileges(names []string) error {
return enableDisableProcessPrivilege(names, SE_PRIVILEGE_ENABLED)
}
// DisableProcessPrivileges disables privileges globally for the process.
func DisableProcessPrivileges(names []string) error {
return enableDisableProcessPrivilege(names, 0)
}
func enableDisableProcessPrivilege(names []string, action uint32) error {
privileges, err := mapPrivileges(names)
if err != nil {
return err
@ -123,15 +134,15 @@ func EnableProcessPrivileges(names []string) error {
}
defer token.Close()
return adjustPrivileges(token, privileges)
return adjustPrivileges(token, privileges, action)
}
func adjustPrivileges(token windows.Token, privileges []uint64) error {
func adjustPrivileges(token windows.Token, privileges []uint64, action uint32) error {
var b bytes.Buffer
binary.Write(&b, binary.LittleEndian, uint32(len(privileges)))
for _, p := range privileges {
binary.Write(&b, binary.LittleEndian, p)
binary.Write(&b, binary.LittleEndian, uint32(SE_PRIVILEGE_ENABLED))
binary.Write(&b, binary.LittleEndian, action)
}
prevState := make([]byte, b.Len())
reqSize := uint32(0)

View file

@ -1,3 +1,5 @@
// +build windows
package winio
import (

View file

@ -1,3 +1,3 @@
package winio
//go:generate go run $GOROOT/src/syscall/mksyscall_windows.go -output zsyscall.go file.go pipe.go sd.go fileinfo.go privilege.go backup.go
//go:generate go run $GOROOT/src/syscall/mksyscall_windows.go -output zsyscall_windows.go file.go pipe.go sd.go fileinfo.go privilege.go backup.go

View file

@ -12,9 +12,9 @@ import (
var _ unsafe.Pointer
var (
modkernel32 = syscall.NewLazyDLL("kernel32.dll")
modwinmm = syscall.NewLazyDLL("winmm.dll")
modadvapi32 = syscall.NewLazyDLL("advapi32.dll")
modkernel32 = windows.NewLazySystemDLL("kernel32.dll")
modwinmm = windows.NewLazySystemDLL("winmm.dll")
modadvapi32 = windows.NewLazySystemDLL("advapi32.dll")
procCancelIoEx = modkernel32.NewProc("CancelIoEx")
procCreateIoCompletionPort = modkernel32.NewProc("CreateIoCompletionPort")

View file

@ -4,6 +4,7 @@ import (
"bufio"
"compress/gzip"
"fmt"
"io"
"net"
"net/http"
"strconv"
@ -21,10 +22,16 @@ const (
type codings map[string]float64
// The default qvalue to assign to an encoding if no explicit qvalue is set.
// This is actually kind of ambiguous in RFC 2616, so hopefully it's correct.
// The examples seem to indicate that it is.
const DEFAULT_QVALUE = 1.0
const (
// DefaultQValue is the default qvalue to assign to an encoding if no explicit qvalue is set.
// This is actually kind of ambiguous in RFC 2616, so hopefully it's correct.
// The examples seem to indicate that it is.
DefaultQValue = 1.0
// DefaultMinSize defines the minimum size to reach to enable compression.
// It's 512 bytes.
DefaultMinSize = 512
)
// gzipWriterPools stores a sync.Pool for each compression level for reuse of
// gzip.Writers. Use poolIndex to covert a compression level to an index into
@ -63,35 +70,88 @@ func addLevelPool(level int) {
// GzipResponseWriter provides an http.ResponseWriter interface, which gzips
// bytes before writing them to the underlying response. This doesn't close the
// writers, so don't forget to do that.
// It can be configured to skip response smaller than minSize.
type GzipResponseWriter struct {
http.ResponseWriter
index int // Index for gzipWriterPools.
gw *gzip.Writer
code int // Saves the WriteHeader value.
minSize int // Specifed the minimum response size to gzip. If the response length is bigger than this value, it is compressed.
buf []byte // Holds the first part of the write before reaching the minSize or the end of the write.
}
// Write appends data to the gzip writer.
func (w *GzipResponseWriter) Write(b []byte) (int, error) {
// Lazily create the gzip.Writer, this allows empty bodies to be actually
// empty, for example in the case of status code 204 (no content).
if w.gw == nil {
w.init()
}
// If content type is not set.
if _, ok := w.Header()[contentType]; !ok {
// If content type is not set, infer it from the uncompressed body.
// It infer it from the uncompressed body.
w.Header().Set(contentType, http.DetectContentType(b))
}
return w.gw.Write(b)
// GZIP responseWriter is initialized. Use the GZIP responseWriter.
if w.gw != nil {
n, err := w.gw.Write(b)
return n, err
}
// Save the write into a buffer for later use in GZIP responseWriter (if content is long enough) or at close with regular responseWriter.
w.buf = append(w.buf, b...)
// If the global writes are bigger than the minSize, compression is enable.
if len(w.buf) >= w.minSize {
err := w.startGzip()
if err != nil {
return 0, err
}
}
return len(b), nil
}
// WriteHeader will check if the gzip writer needs to be lazily initiated and
// then pass the code along to the underlying ResponseWriter.
func (w *GzipResponseWriter) WriteHeader(code int) {
if w.gw == nil &&
code != http.StatusNotModified && code != http.StatusNoContent {
w.init()
// startGzip initialize any GZIP specific informations.
func (w *GzipResponseWriter) startGzip() error {
// Set the GZIP header.
w.Header().Set(contentEncoding, "gzip")
// if the Content-Length is already set, then calls to Write on gzip
// will fail to set the Content-Length header since its already set
// See: https://github.com/golang/go/issues/14975.
w.Header().Del(contentLength)
// Write the header to gzip response.
w.writeHeader()
// Initialize the GZIP response.
w.init()
// Flush the buffer into the gzip reponse.
n, err := w.gw.Write(w.buf)
// This should never happen (per io.Writer docs), but if the write didn't
// accept the entire buffer but returned no specific error, we have no clue
// what's going on, so abort just to be safe.
if err == nil && n < len(w.buf) {
return io.ErrShortWrite
}
w.ResponseWriter.WriteHeader(code)
w.buf = nil
return err
}
// WriteHeader just saves the response code until close or GZIP effective writes.
func (w *GzipResponseWriter) WriteHeader(code int) {
w.code = code
}
// writeHeader uses the saved code to send it to the ResponseWriter.
func (w *GzipResponseWriter) writeHeader() {
if w.code == 0 {
w.code = http.StatusOK
}
w.ResponseWriter.WriteHeader(w.code)
}
// init graps a new gzip writer from the gzipWriterPool and writes the correct
@ -102,21 +162,29 @@ func (w *GzipResponseWriter) init() {
gzw := gzipWriterPools[w.index].Get().(*gzip.Writer)
gzw.Reset(w.ResponseWriter)
w.gw = gzw
w.ResponseWriter.Header().Set(contentEncoding, "gzip")
// if the Content-Length is already set, then calls to Write on gzip
// will fail to set the Content-Length header since its already set
// See: https://github.com/golang/go/issues/14975
w.ResponseWriter.Header().Del(contentLength)
}
// Close will close the gzip.Writer and will put it back in the gzipWriterPool.
func (w *GzipResponseWriter) Close() error {
// Buffer not nil means the regular response must be returned.
if w.buf != nil {
w.writeHeader()
// Make the write into the regular response.
_, writeErr := w.ResponseWriter.Write(w.buf)
// Returns the error if any at write.
if writeErr != nil {
return fmt.Errorf("gziphandler: write to regular responseWriter at close gets error: %q", writeErr.Error())
}
}
// If the GZIP responseWriter is not set no needs to close it.
if w.gw == nil {
return nil
}
err := w.gw.Close()
gzipWriterPools[w.index].Put(w.gw)
w.gw = nil
return err
}
@ -162,9 +230,18 @@ func MustNewGzipLevelHandler(level int) func(http.Handler) http.Handler {
// if an invalid gzip compression level is given, so if one can ensure the level
// is valid, the returned error can be safely ignored.
func NewGzipLevelHandler(level int) (func(http.Handler) http.Handler, error) {
return NewGzipLevelAndMinSize(level, DefaultMinSize)
}
// NewGzipLevelAndMinSize behave as NewGzipLevelHandler except it let the caller
// specify the minimum size before compression.
func NewGzipLevelAndMinSize(level, minSize int) (func(http.Handler) http.Handler, error) {
if level != gzip.DefaultCompression && (level < gzip.BestSpeed || level > gzip.BestCompression) {
return nil, fmt.Errorf("invalid compression level requested: %d", level)
}
if minSize < 0 {
return nil, fmt.Errorf("minimum size must be more than zero")
}
return func(h http.Handler) http.Handler {
index := poolIndex(level)
@ -175,6 +252,9 @@ func NewGzipLevelHandler(level int) (func(http.Handler) http.Handler, error) {
gw := &GzipResponseWriter{
ResponseWriter: w,
index: index,
minSize: minSize,
buf: []byte{},
}
defer gw.Close()
@ -237,7 +317,7 @@ func parseEncodings(s string) (codings, error) {
func parseCoding(s string) (coding string, qvalue float64, err error) {
for n, part := range strings.Split(s, ";") {
part = strings.TrimSpace(part)
qvalue = DEFAULT_QVALUE
qvalue = DefaultQValue
if n == 0 {
coding = strings.ToLower(part)

43
vendor/github.com/NYTimes/gziphandler/gzip_go18.go generated vendored Normal file
View file

@ -0,0 +1,43 @@
// +build go1.8
package gziphandler
import "net/http"
// Push initiates an HTTP/2 server push.
// Push returns ErrNotSupported if the client has disabled push or if push
// is not supported on the underlying connection.
func (w *GzipResponseWriter) Push(target string, opts *http.PushOptions) error {
pusher, ok := w.ResponseWriter.(http.Pusher)
if ok && pusher != nil {
return pusher.Push(target, setAcceptEncodingForPushOptions(opts))
}
return http.ErrNotSupported
}
// setAcceptEncodingForPushOptions sets "Accept-Encoding" : "gzip" for PushOptions without overriding existing headers.
func setAcceptEncodingForPushOptions(opts *http.PushOptions) *http.PushOptions {
if opts == nil {
opts = &http.PushOptions{
Header: http.Header{
acceptEncoding: []string{"gzip"},
},
}
return opts
}
if opts.Header == nil {
opts.Header = http.Header{
acceptEncoding: []string{"gzip"},
}
return opts
}
if encoding := opts.Header.Get(acceptEncoding); encoding == "" {
opts.Header.Add(acceptEncoding, "gzip")
return opts
}
return opts
}

View file

@ -1,7 +1,7 @@
package logrus
// The following code was sourced and modified from the
// https://bitbucket.org/tebeka/atexit package governed by the following license:
// https://github.com/tebeka/atexit package governed by the following license:
//
// Copyright (c) 2012 Miki Tebeka <miki.tebeka@gmail.com>.
//

View file

@ -3,11 +3,21 @@ package logrus
import (
"bytes"
"fmt"
"io"
"os"
"sync"
"time"
)
var bufferPool *sync.Pool
func init() {
bufferPool = &sync.Pool{
New: func() interface{} {
return new(bytes.Buffer)
},
}
}
// Defines the key when adding errors using WithError.
var ErrorKey = "error"
@ -29,6 +39,9 @@ type Entry struct {
// Message passed to Debug, Info, Warn, Error, Fatal or Panic
Message string
// When formatter is called in entry.log(), an Buffer may be set to entry
Buffer *bytes.Buffer
}
func NewEntry(logger *Logger) *Entry {
@ -39,21 +52,15 @@ func NewEntry(logger *Logger) *Entry {
}
}
// Returns a reader for the entry, which is a proxy to the formatter.
func (entry *Entry) Reader() (*bytes.Buffer, error) {
serialized, err := entry.Logger.Formatter.Format(entry)
return bytes.NewBuffer(serialized), err
}
// Returns the string representation from the reader and ultimately the
// formatter.
func (entry *Entry) String() (string, error) {
reader, err := entry.Reader()
serialized, err := entry.Logger.Formatter.Format(entry)
if err != nil {
return "", err
}
return reader.String(), err
str := string(serialized)
return str, nil
}
// Add an error as single field (using the key defined in ErrorKey) to the Entry.
@ -81,6 +88,7 @@ func (entry *Entry) WithFields(fields Fields) *Entry {
// This function is not declared with a pointer value because otherwise
// race conditions will occur when using multiple goroutines
func (entry Entry) log(level Level, msg string) {
var buffer *bytes.Buffer
entry.Time = time.Now()
entry.Level = level
entry.Message = msg
@ -90,20 +98,23 @@ func (entry Entry) log(level Level, msg string) {
fmt.Fprintf(os.Stderr, "Failed to fire hook: %v\n", err)
entry.Logger.mu.Unlock()
}
reader, err := entry.Reader()
buffer = bufferPool.Get().(*bytes.Buffer)
buffer.Reset()
defer bufferPool.Put(buffer)
entry.Buffer = buffer
serialized, err := entry.Logger.Formatter.Format(&entry)
entry.Buffer = nil
if err != nil {
entry.Logger.mu.Lock()
fmt.Fprintf(os.Stderr, "Failed to obtain reader, %v\n", err)
entry.Logger.mu.Unlock()
}
entry.Logger.mu.Lock()
defer entry.Logger.mu.Unlock()
_, err = io.Copy(entry.Logger.Out, reader)
if err != nil {
fmt.Fprintf(os.Stderr, "Failed to write to log, %v\n", err)
} else {
entry.Logger.mu.Lock()
_, err = entry.Logger.Out.Write(serialized)
if err != nil {
fmt.Fprintf(os.Stderr, "Failed to write to log, %v\n", err)
}
entry.Logger.mu.Unlock()
}
// To avoid Entry#log() returning a value that only would make sense for

View file

@ -5,9 +5,40 @@ import (
"fmt"
)
type fieldKey string
type FieldMap map[fieldKey]string
const (
FieldKeyMsg = "msg"
FieldKeyLevel = "level"
FieldKeyTime = "time"
)
func (f FieldMap) resolve(key fieldKey) string {
if k, ok := f[key]; ok {
return k
}
return string(key)
}
type JSONFormatter struct {
// TimestampFormat sets the format used for marshaling timestamps.
TimestampFormat string
// DisableTimestamp allows disabling automatic timestamps in output
DisableTimestamp bool
// FieldMap allows users to customize the names of keys for various fields.
// As an example:
// formatter := &JSONFormatter{
// FieldMap: FieldMap{
// FieldKeyTime: "@timestamp",
// FieldKeyLevel: "@level",
// FieldKeyLevel: "@message",
// },
// }
FieldMap FieldMap
}
func (f *JSONFormatter) Format(entry *Entry) ([]byte, error) {
@ -29,9 +60,11 @@ func (f *JSONFormatter) Format(entry *Entry) ([]byte, error) {
timestampFormat = DefaultTimestampFormat
}
data["time"] = entry.Time.Format(timestampFormat)
data["msg"] = entry.Message
data["level"] = entry.Level.String()
if !f.DisableTimestamp {
data[f.FieldMap.resolve(FieldKeyTime)] = entry.Time.Format(timestampFormat)
}
data[f.FieldMap.resolve(FieldKeyMsg)] = entry.Message
data[f.FieldMap.resolve(FieldKeyLevel)] = entry.Level.String()
serialized, err := json.Marshal(data)
if err != nil {

View file

@ -26,8 +26,31 @@ type Logger struct {
// to) `logrus.Info`, which allows Info(), Warn(), Error() and Fatal() to be
// logged. `logrus.Debug` is useful in
Level Level
// Used to sync writing to the log.
mu sync.Mutex
// Used to sync writing to the log. Locking is enabled by Default
mu MutexWrap
// Reusable empty entry
entryPool sync.Pool
}
type MutexWrap struct {
lock sync.Mutex
disabled bool
}
func (mw *MutexWrap) Lock() {
if !mw.disabled {
mw.lock.Lock()
}
}
func (mw *MutexWrap) Unlock() {
if !mw.disabled {
mw.lock.Unlock()
}
}
func (mw *MutexWrap) Disable() {
mw.disabled = true
}
// Creates a new logger. Configuration should be set by changing `Formatter`,
@ -51,162 +74,235 @@ func New() *Logger {
}
}
func (logger *Logger) newEntry() *Entry {
entry, ok := logger.entryPool.Get().(*Entry)
if ok {
return entry
}
return NewEntry(logger)
}
func (logger *Logger) releaseEntry(entry *Entry) {
logger.entryPool.Put(entry)
}
// Adds a field to the log entry, note that it doesn't log until you call
// Debug, Print, Info, Warn, Fatal or Panic. It only creates a log entry.
// If you want multiple fields, use `WithFields`.
func (logger *Logger) WithField(key string, value interface{}) *Entry {
return NewEntry(logger).WithField(key, value)
entry := logger.newEntry()
defer logger.releaseEntry(entry)
return entry.WithField(key, value)
}
// Adds a struct of fields to the log entry. All it does is call `WithField` for
// each `Field`.
func (logger *Logger) WithFields(fields Fields) *Entry {
return NewEntry(logger).WithFields(fields)
entry := logger.newEntry()
defer logger.releaseEntry(entry)
return entry.WithFields(fields)
}
// Add an error as single field to the log entry. All it does is call
// `WithError` for the given `error`.
func (logger *Logger) WithError(err error) *Entry {
return NewEntry(logger).WithError(err)
entry := logger.newEntry()
defer logger.releaseEntry(entry)
return entry.WithError(err)
}
func (logger *Logger) Debugf(format string, args ...interface{}) {
if logger.Level >= DebugLevel {
NewEntry(logger).Debugf(format, args...)
entry := logger.newEntry()
entry.Debugf(format, args...)
logger.releaseEntry(entry)
}
}
func (logger *Logger) Infof(format string, args ...interface{}) {
if logger.Level >= InfoLevel {
NewEntry(logger).Infof(format, args...)
entry := logger.newEntry()
entry.Infof(format, args...)
logger.releaseEntry(entry)
}
}
func (logger *Logger) Printf(format string, args ...interface{}) {
NewEntry(logger).Printf(format, args...)
entry := logger.newEntry()
entry.Printf(format, args...)
logger.releaseEntry(entry)
}
func (logger *Logger) Warnf(format string, args ...interface{}) {
if logger.Level >= WarnLevel {
NewEntry(logger).Warnf(format, args...)
entry := logger.newEntry()
entry.Warnf(format, args...)
logger.releaseEntry(entry)
}
}
func (logger *Logger) Warningf(format string, args ...interface{}) {
if logger.Level >= WarnLevel {
NewEntry(logger).Warnf(format, args...)
entry := logger.newEntry()
entry.Warnf(format, args...)
logger.releaseEntry(entry)
}
}
func (logger *Logger) Errorf(format string, args ...interface{}) {
if logger.Level >= ErrorLevel {
NewEntry(logger).Errorf(format, args...)
entry := logger.newEntry()
entry.Errorf(format, args...)
logger.releaseEntry(entry)
}
}
func (logger *Logger) Fatalf(format string, args ...interface{}) {
if logger.Level >= FatalLevel {
NewEntry(logger).Fatalf(format, args...)
entry := logger.newEntry()
entry.Fatalf(format, args...)
logger.releaseEntry(entry)
}
Exit(1)
}
func (logger *Logger) Panicf(format string, args ...interface{}) {
if logger.Level >= PanicLevel {
NewEntry(logger).Panicf(format, args...)
entry := logger.newEntry()
entry.Panicf(format, args...)
logger.releaseEntry(entry)
}
}
func (logger *Logger) Debug(args ...interface{}) {
if logger.Level >= DebugLevel {
NewEntry(logger).Debug(args...)
entry := logger.newEntry()
entry.Debug(args...)
logger.releaseEntry(entry)
}
}
func (logger *Logger) Info(args ...interface{}) {
if logger.Level >= InfoLevel {
NewEntry(logger).Info(args...)
entry := logger.newEntry()
entry.Info(args...)
logger.releaseEntry(entry)
}
}
func (logger *Logger) Print(args ...interface{}) {
NewEntry(logger).Info(args...)
entry := logger.newEntry()
entry.Info(args...)
logger.releaseEntry(entry)
}
func (logger *Logger) Warn(args ...interface{}) {
if logger.Level >= WarnLevel {
NewEntry(logger).Warn(args...)
entry := logger.newEntry()
entry.Warn(args...)
logger.releaseEntry(entry)
}
}
func (logger *Logger) Warning(args ...interface{}) {
if logger.Level >= WarnLevel {
NewEntry(logger).Warn(args...)
entry := logger.newEntry()
entry.Warn(args...)
logger.releaseEntry(entry)
}
}
func (logger *Logger) Error(args ...interface{}) {
if logger.Level >= ErrorLevel {
NewEntry(logger).Error(args...)
entry := logger.newEntry()
entry.Error(args...)
logger.releaseEntry(entry)
}
}
func (logger *Logger) Fatal(args ...interface{}) {
if logger.Level >= FatalLevel {
NewEntry(logger).Fatal(args...)
entry := logger.newEntry()
entry.Fatal(args...)
logger.releaseEntry(entry)
}
Exit(1)
}
func (logger *Logger) Panic(args ...interface{}) {
if logger.Level >= PanicLevel {
NewEntry(logger).Panic(args...)
entry := logger.newEntry()
entry.Panic(args...)
logger.releaseEntry(entry)
}
}
func (logger *Logger) Debugln(args ...interface{}) {
if logger.Level >= DebugLevel {
NewEntry(logger).Debugln(args...)
entry := logger.newEntry()
entry.Debugln(args...)
logger.releaseEntry(entry)
}
}
func (logger *Logger) Infoln(args ...interface{}) {
if logger.Level >= InfoLevel {
NewEntry(logger).Infoln(args...)
entry := logger.newEntry()
entry.Infoln(args...)
logger.releaseEntry(entry)
}
}
func (logger *Logger) Println(args ...interface{}) {
NewEntry(logger).Println(args...)
entry := logger.newEntry()
entry.Println(args...)
logger.releaseEntry(entry)
}
func (logger *Logger) Warnln(args ...interface{}) {
if logger.Level >= WarnLevel {
NewEntry(logger).Warnln(args...)
entry := logger.newEntry()
entry.Warnln(args...)
logger.releaseEntry(entry)
}
}
func (logger *Logger) Warningln(args ...interface{}) {
if logger.Level >= WarnLevel {
NewEntry(logger).Warnln(args...)
entry := logger.newEntry()
entry.Warnln(args...)
logger.releaseEntry(entry)
}
}
func (logger *Logger) Errorln(args ...interface{}) {
if logger.Level >= ErrorLevel {
NewEntry(logger).Errorln(args...)
entry := logger.newEntry()
entry.Errorln(args...)
logger.releaseEntry(entry)
}
}
func (logger *Logger) Fatalln(args ...interface{}) {
if logger.Level >= FatalLevel {
NewEntry(logger).Fatalln(args...)
entry := logger.newEntry()
entry.Fatalln(args...)
logger.releaseEntry(entry)
}
Exit(1)
}
func (logger *Logger) Panicln(args ...interface{}) {
if logger.Level >= PanicLevel {
NewEntry(logger).Panicln(args...)
entry := logger.newEntry()
entry.Panicln(args...)
logger.releaseEntry(entry)
}
}
//When file is opened with appending mode, it's safe to
//write concurrently to a file (within 4k message on Linux).
//In these cases user can choose to disable the lock.
func (logger *Logger) SetNoLock() {
logger.mu.Disable()
}

View file

@ -0,0 +1,10 @@
// +build appengine
package logrus
import "io"
// IsTerminal returns true if stderr's file descriptor is a terminal.
func IsTerminal(f io.Writer) bool {
return true
}

View file

@ -1,4 +1,5 @@
// +build darwin freebsd openbsd netbsd dragonfly
// +build !appengine
package logrus

View file

@ -3,6 +3,8 @@
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// +build !appengine
package logrus
import "syscall"

View file

@ -4,18 +4,25 @@
// license that can be found in the LICENSE file.
// +build linux darwin freebsd openbsd netbsd dragonfly
// +build !appengine
package logrus
import (
"io"
"os"
"syscall"
"unsafe"
)
// IsTerminal returns true if stderr's file descriptor is a terminal.
func IsTerminal() bool {
fd := syscall.Stderr
func IsTerminal(f io.Writer) bool {
var termios Termios
_, _, err := syscall.Syscall6(syscall.SYS_IOCTL, uintptr(fd), ioctlReadTermios, uintptr(unsafe.Pointer(&termios)), 0, 0, 0)
return err == 0
switch v := f.(type) {
case *os.File:
_, _, err := syscall.Syscall6(syscall.SYS_IOCTL, uintptr(v.Fd()), ioctlReadTermios, uintptr(unsafe.Pointer(&termios)), 0, 0, 0)
return err == 0
default:
return false
}
}

View file

@ -1,15 +1,21 @@
// +build solaris
// +build solaris,!appengine
package logrus
import (
"io"
"os"
"golang.org/x/sys/unix"
)
// IsTerminal returns true if the given file descriptor is a terminal.
func IsTerminal() bool {
_, err := unix.IoctlGetTermios(int(os.Stdout.Fd()), unix.TCGETA)
return err == nil
func IsTerminal(f io.Writer) bool {
switch v := f.(type) {
case *os.File:
_, err := unix.IoctlGetTermios(int(v.Fd()), unix.TCGETA)
return err == nil
default:
return false
}
}

View file

@ -3,11 +3,13 @@
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// +build windows
// +build windows,!appengine
package logrus
import (
"io"
"os"
"syscall"
"unsafe"
)
@ -19,9 +21,13 @@ var (
)
// IsTerminal returns true if stderr's file descriptor is a terminal.
func IsTerminal() bool {
fd := syscall.Stderr
var st uint32
r, _, e := syscall.Syscall(procGetConsoleMode.Addr(), 2, uintptr(fd), uintptr(unsafe.Pointer(&st)), 0)
return r != 0 && e == 0
func IsTerminal(f io.Writer) bool {
switch v := f.(type) {
case *os.File:
var st uint32
r, _, e := syscall.Syscall(procGetConsoleMode.Addr(), 2, uintptr(v.Fd()), uintptr(unsafe.Pointer(&st)), 0)
return r != 0 && e == 0
default:
return false
}
}

View file

@ -3,9 +3,9 @@ package logrus
import (
"bytes"
"fmt"
"runtime"
"sort"
"strings"
"sync"
"time"
)
@ -20,16 +20,10 @@ const (
var (
baseTimestamp time.Time
isTerminal bool
)
func init() {
baseTimestamp = time.Now()
isTerminal = IsTerminal()
}
func miniTS() int {
return int(time.Since(baseTimestamp) / time.Second)
}
type TextFormatter struct {
@ -54,10 +48,32 @@ type TextFormatter struct {
// that log extremely frequently and don't use the JSON formatter this may not
// be desired.
DisableSorting bool
// QuoteEmptyFields will wrap empty fields in quotes if true
QuoteEmptyFields bool
// QuoteCharacter can be set to the override the default quoting character "
// with something else. For example: ', or `.
QuoteCharacter string
// Whether the logger's out is to a terminal
isTerminal bool
sync.Once
}
func (f *TextFormatter) init(entry *Entry) {
if len(f.QuoteCharacter) == 0 {
f.QuoteCharacter = "\""
}
if entry.Logger != nil {
f.isTerminal = IsTerminal(entry.Logger.Out)
}
}
func (f *TextFormatter) Format(entry *Entry) ([]byte, error) {
var keys []string = make([]string, 0, len(entry.Data))
var b *bytes.Buffer
keys := make([]string, 0, len(entry.Data))
for k := range entry.Data {
keys = append(keys, k)
}
@ -65,13 +81,17 @@ func (f *TextFormatter) Format(entry *Entry) ([]byte, error) {
if !f.DisableSorting {
sort.Strings(keys)
}
b := &bytes.Buffer{}
if entry.Buffer != nil {
b = entry.Buffer
} else {
b = &bytes.Buffer{}
}
prefixFieldClashes(entry.Data)
isColorTerminal := isTerminal && (runtime.GOOS != "windows")
isColored := (f.ForceColors || isColorTerminal) && !f.DisableColors
f.Do(func() { f.init(entry) })
isColored := (f.ForceColors || f.isTerminal) && !f.DisableColors
timestampFormat := f.TimestampFormat
if timestampFormat == "" {
@ -111,18 +131,24 @@ func (f *TextFormatter) printColored(b *bytes.Buffer, entry *Entry, keys []strin
levelText := strings.ToUpper(entry.Level.String())[0:4]
if !f.FullTimestamp {
fmt.Fprintf(b, "\x1b[%dm%s\x1b[0m[%04d] %-44s ", levelColor, levelText, miniTS(), entry.Message)
if f.DisableTimestamp {
fmt.Fprintf(b, "\x1b[%dm%s\x1b[0m %-44s ", levelColor, levelText, entry.Message)
} else if !f.FullTimestamp {
fmt.Fprintf(b, "\x1b[%dm%s\x1b[0m[%04d] %-44s ", levelColor, levelText, int(entry.Time.Sub(baseTimestamp)/time.Second), entry.Message)
} else {
fmt.Fprintf(b, "\x1b[%dm%s\x1b[0m[%s] %-44s ", levelColor, levelText, entry.Time.Format(timestampFormat), entry.Message)
}
for _, k := range keys {
v := entry.Data[k]
fmt.Fprintf(b, " \x1b[%dm%s\x1b[0m=%+v", levelColor, k, v)
fmt.Fprintf(b, " \x1b[%dm%s\x1b[0m=", levelColor, k)
f.appendValue(b, v)
}
}
func needsQuoting(text string) bool {
func (f *TextFormatter) needsQuoting(text string) bool {
if f.QuoteEmptyFields && len(text) == 0 {
return true
}
for _, ch := range text {
if !((ch >= 'a' && ch <= 'z') ||
(ch >= 'A' && ch <= 'Z') ||
@ -138,24 +164,26 @@ func (f *TextFormatter) appendKeyValue(b *bytes.Buffer, key string, value interf
b.WriteString(key)
b.WriteByte('=')
f.appendValue(b, value)
b.WriteByte(' ')
}
func (f *TextFormatter) appendValue(b *bytes.Buffer, value interface{}) {
switch value := value.(type) {
case string:
if !needsQuoting(value) {
if !f.needsQuoting(value) {
b.WriteString(value)
} else {
fmt.Fprintf(b, "%q", value)
fmt.Fprintf(b, "%s%v%s", f.QuoteCharacter, value, f.QuoteCharacter)
}
case error:
errmsg := value.Error()
if !needsQuoting(errmsg) {
if !f.needsQuoting(errmsg) {
b.WriteString(errmsg)
} else {
fmt.Fprintf(b, "%q", value)
fmt.Fprintf(b, "%s%v%s", f.QuoteCharacter, errmsg, f.QuoteCharacter)
}
default:
fmt.Fprint(b, value)
}
b.WriteByte(' ')
}

View file

@ -11,39 +11,48 @@ func (logger *Logger) Writer() *io.PipeWriter {
}
func (logger *Logger) WriterLevel(level Level) *io.PipeWriter {
return NewEntry(logger).WriterLevel(level)
}
func (entry *Entry) Writer() *io.PipeWriter {
return entry.WriterLevel(InfoLevel)
}
func (entry *Entry) WriterLevel(level Level) *io.PipeWriter {
reader, writer := io.Pipe()
var printFunc func(args ...interface{})
switch level {
case DebugLevel:
printFunc = logger.Debug
printFunc = entry.Debug
case InfoLevel:
printFunc = logger.Info
printFunc = entry.Info
case WarnLevel:
printFunc = logger.Warn
printFunc = entry.Warn
case ErrorLevel:
printFunc = logger.Error
printFunc = entry.Error
case FatalLevel:
printFunc = logger.Fatal
printFunc = entry.Fatal
case PanicLevel:
printFunc = logger.Panic
printFunc = entry.Panic
default:
printFunc = logger.Print
printFunc = entry.Print
}
go logger.writerScanner(reader, printFunc)
go entry.writerScanner(reader, printFunc)
runtime.SetFinalizer(writer, writerFinalizer)
return writer
}
func (logger *Logger) writerScanner(reader *io.PipeReader, printFunc func(args ...interface{})) {
func (entry *Entry) writerScanner(reader *io.PipeReader, printFunc func(args ...interface{})) {
scanner := bufio.NewScanner(reader)
for scanner.Scan() {
printFunc(scanner.Text())
}
if err := scanner.Err(); err != nil {
logger.Errorf("Error while reading from Writer: %s", err)
entry.Errorf("Error while reading from Writer: %s", err)
}
reader.Close()
}

View file

@ -1,7 +1,10 @@
package auth
import "encoding/csv"
import "os"
import (
"encoding/csv"
"os"
"sync"
)
/*
SecretProvider is used by authenticators. Takes user name and realm
@ -20,6 +23,7 @@ type File struct {
Info os.FileInfo
/* must be set in inherited types during initialization */
Reload func()
mu sync.Mutex
}
func (f *File) ReloadIfNeeded() {
@ -27,6 +31,8 @@ func (f *File) ReloadIfNeeded() {
if err != nil {
panic(err)
}
f.mu.Lock()
defer f.mu.Unlock()
if f.Info == nil || f.Info.ModTime() != info.ModTime() {
f.Info = info
f.Reload()
@ -40,6 +46,7 @@ func (f *File) ReloadIfNeeded() {
type HtdigestFile struct {
File
Users map[string]map[string]string
mu sync.RWMutex
}
func reload_htdigest(hf *HtdigestFile) {
@ -57,6 +64,8 @@ func reload_htdigest(hf *HtdigestFile) {
panic(err)
}
hf.mu.Lock()
defer hf.mu.Unlock()
hf.Users = make(map[string]map[string]string)
for _, record := range records {
_, exists := hf.Users[record[1]]
@ -77,6 +86,8 @@ func HtdigestFileProvider(filename string) SecretProvider {
hf.Reload = func() { reload_htdigest(hf) }
return func(user, realm string) string {
hf.ReloadIfNeeded()
hf.mu.RLock()
defer hf.mu.RUnlock()
_, exists := hf.Users[realm]
if !exists {
return ""
@ -96,6 +107,7 @@ func HtdigestFileProvider(filename string) SecretProvider {
type HtpasswdFile struct {
File
Users map[string]string
mu sync.RWMutex
}
func reload_htpasswd(h *HtpasswdFile) {
@ -113,6 +125,8 @@ func reload_htpasswd(h *HtpasswdFile) {
panic(err)
}
h.mu.Lock()
defer h.mu.Unlock()
h.Users = make(map[string]string)
for _, record := range records {
h.Users[record[0]] = record[1]
@ -129,7 +143,9 @@ func HtpasswdFileProvider(filename string) SecretProvider {
h.Reload = func() { reload_htpasswd(h) }
return func(user, realm string) string {
h.ReloadIfNeeded()
h.mu.RLock()
password, exists := h.Users[user]
h.mu.RUnlock()
if !exists {
return ""
}

20
vendor/github.com/beorn7/perks/LICENSE generated vendored Normal file
View file

@ -0,0 +1,20 @@
Copyright (C) 2013 Blake Mizerany
Permission is hereby granted, free of charge, to any person obtaining
a copy of this software and associated documentation files (the
"Software"), to deal in the Software without restriction, including
without limitation the rights to use, copy, modify, merge, publish,
distribute, sublicense, and/or sell copies of the Software, and to
permit persons to whom the Software is furnished to do so, subject to
the following conditions:
The above copyright notice and this permission notice shall be
included in all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.

View file

@ -133,7 +133,7 @@ func (s *Stream) Query(q float64) float64 {
if l == 0 {
return 0
}
i := int(float64(l) * q)
i := int(math.Ceil(float64(l) * q))
if i > 0 {
i -= 1
}

View file

@ -5,3 +5,6 @@ const maxMapSize = 0x7FFFFFFF // 2GB
// maxAllocSize is the size used when creating array pointers.
const maxAllocSize = 0xFFFFFFF
// Are unaligned load/stores broken on this arch?
var brokenUnaligned = false

View file

@ -5,3 +5,6 @@ const maxMapSize = 0xFFFFFFFFFFFF // 256TB
// maxAllocSize is the size used when creating array pointers.
const maxAllocSize = 0x7FFFFFFF
// Are unaligned load/stores broken on this arch?
var brokenUnaligned = false

View file

@ -1,7 +1,28 @@
package bolt
import "unsafe"
// maxMapSize represents the largest mmap size supported by Bolt.
const maxMapSize = 0x7FFFFFFF // 2GB
// maxAllocSize is the size used when creating array pointers.
const maxAllocSize = 0xFFFFFFF
// Are unaligned load/stores broken on this arch?
var brokenUnaligned bool
func init() {
// Simple check to see whether this arch handles unaligned load/stores
// correctly.
// ARM9 and older devices require load/stores to be from/to aligned
// addresses. If not, the lower 2 bits are cleared and that address is
// read in a jumbled up order.
// See http://infocenter.arm.com/help/index.jsp?topic=/com.arm.doc.faqs/ka15414.html
raw := [6]byte{0xfe, 0xef, 0x11, 0x22, 0x22, 0x11}
val := *(*uint32)(unsafe.Pointer(uintptr(unsafe.Pointer(&raw)) + 2))
brokenUnaligned = val != 0x11222211
}

View file

@ -7,3 +7,6 @@ const maxMapSize = 0xFFFFFFFFFFFF // 256TB
// maxAllocSize is the size used when creating array pointers.
const maxAllocSize = 0x7FFFFFFF
// Are unaligned load/stores broken on this arch?
var brokenUnaligned = false

View file

@ -7,3 +7,6 @@ const maxMapSize = 0xFFFFFFFFFFFF // 256TB
// maxAllocSize is the size used when creating array pointers.
const maxAllocSize = 0x7FFFFFFF
// Are unaligned load/stores broken on this arch?
var brokenUnaligned = false

View file

@ -7,3 +7,6 @@ const maxMapSize = 0xFFFFFFFFFFFF // 256TB
// maxAllocSize is the size used when creating array pointers.
const maxAllocSize = 0x7FFFFFFF
// Are unaligned load/stores broken on this arch?
var brokenUnaligned = false

View file

@ -7,3 +7,6 @@ const maxMapSize = 0xFFFFFFFFFFFF // 256TB
// maxAllocSize is the size used when creating array pointers.
const maxAllocSize = 0x7FFFFFFF
// Are unaligned load/stores broken on this arch?
var brokenUnaligned = false

View file

@ -89,7 +89,7 @@ func flock(db *DB, mode os.FileMode, exclusive bool, timeout time.Duration) erro
func funlock(db *DB) error {
err := unlockFileEx(syscall.Handle(db.lockfile.Fd()), 0, 1, 0, &syscall.Overlapped{})
db.lockfile.Close()
os.Remove(db.path+lockExt)
os.Remove(db.path + lockExt)
return err
}

View file

@ -130,9 +130,17 @@ func (b *Bucket) Bucket(name []byte) *Bucket {
func (b *Bucket) openBucket(value []byte) *Bucket {
var child = newBucket(b.tx)
// If unaligned load/stores are broken on this arch and value is
// unaligned simply clone to an aligned byte array.
unaligned := brokenUnaligned && uintptr(unsafe.Pointer(&value[0]))&3 != 0
if unaligned {
value = cloneBytes(value)
}
// If this is a writable transaction then we need to copy the bucket entry.
// Read-only transactions can point directly at the mmap entry.
if b.tx.writable {
if b.tx.writable && !unaligned {
child.bucket = &bucket{}
*child.bucket = *(*bucket)(unsafe.Pointer(&value[0]))
} else {
@ -167,9 +175,8 @@ func (b *Bucket) CreateBucket(key []byte) (*Bucket, error) {
if bytes.Equal(key, k) {
if (flags & bucketLeafFlag) != 0 {
return nil, ErrBucketExists
} else {
return nil, ErrIncompatibleValue
}
return nil, ErrIncompatibleValue
}
// Create empty, inline bucket.
@ -329,6 +336,28 @@ func (b *Bucket) Delete(key []byte) error {
return nil
}
// Sequence returns the current integer for the bucket without incrementing it.
func (b *Bucket) Sequence() uint64 { return b.bucket.sequence }
// SetSequence updates the sequence number for the bucket.
func (b *Bucket) SetSequence(v uint64) error {
if b.tx.db == nil {
return ErrTxClosed
} else if !b.Writable() {
return ErrTxNotWritable
}
// Materialize the root node if it hasn't been already so that the
// bucket will be saved during commit.
if b.rootNode == nil {
_ = b.node(b.root, nil)
}
// Increment and return the sequence.
b.bucket.sequence = v
return nil
}
// NextSequence returns an autoincrementing integer for the bucket.
func (b *Bucket) NextSequence() (uint64, error) {
if b.tx.db == nil {

View file

@ -552,7 +552,10 @@ func (db *DB) removeTx(tx *Tx) {
// Remove the transaction.
for i, t := range db.txs {
if t == tx {
db.txs = append(db.txs[:i], db.txs[i+1:]...)
last := len(db.txs) - 1
db.txs[i] = db.txs[last]
db.txs[last] = nil
db.txs = db.txs[:last]
break
}
}
@ -952,7 +955,7 @@ func (s *Stats) Sub(other *Stats) Stats {
diff.PendingPageN = s.PendingPageN
diff.FreeAlloc = s.FreeAlloc
diff.FreelistInuse = s.FreelistInuse
diff.TxN = other.TxN - s.TxN
diff.TxN = s.TxN - other.TxN
diff.TxStats = s.TxStats.Sub(&other.TxStats)
return diff
}

View file

@ -24,7 +24,12 @@ func newFreelist() *freelist {
// size returns the size of the page after serialization.
func (f *freelist) size() int {
return pageHeaderSize + (int(unsafe.Sizeof(pgid(0))) * f.count())
n := f.count()
if n >= 0xFFFF {
// The first element will be used to store the count. See freelist.write.
n++
}
return pageHeaderSize + (int(unsafe.Sizeof(pgid(0))) * n)
}
// count returns count of pages on the freelist
@ -46,16 +51,15 @@ func (f *freelist) pending_count() int {
return count
}
// all returns a list of all free ids and all pending ids in one sorted list.
func (f *freelist) all() []pgid {
m := make(pgids, 0)
// copyall copies into dst a list of all free ids and all pending ids in one sorted list.
// f.count returns the minimum length required for dst.
func (f *freelist) copyall(dst []pgid) {
m := make(pgids, 0, f.pending_count())
for _, list := range f.pending {
m = append(m, list...)
}
sort.Sort(m)
return pgids(f.ids).merge(m)
mergepgids(dst, f.ids, m)
}
// allocate returns the starting page id of a contiguous list of pages of a given size.
@ -166,12 +170,16 @@ func (f *freelist) read(p *page) {
}
// Copy the list of page ids from the freelist.
ids := ((*[maxAllocSize]pgid)(unsafe.Pointer(&p.ptr)))[idx:count]
f.ids = make([]pgid, len(ids))
copy(f.ids, ids)
if count == 0 {
f.ids = nil
} else {
ids := ((*[maxAllocSize]pgid)(unsafe.Pointer(&p.ptr)))[idx:count]
f.ids = make([]pgid, len(ids))
copy(f.ids, ids)
// Make sure they're sorted.
sort.Sort(pgids(f.ids))
// Make sure they're sorted.
sort.Sort(pgids(f.ids))
}
// Rebuild the page cache.
f.reindex()
@ -182,20 +190,22 @@ func (f *freelist) read(p *page) {
// become free.
func (f *freelist) write(p *page) error {
// Combine the old free pgids and pgids waiting on an open transaction.
ids := f.all()
// Update the header flag.
p.flags |= freelistPageFlag
// The page.count can only hold up to 64k elements so if we overflow that
// number then we handle it by putting the size in the first element.
if len(ids) < 0xFFFF {
p.count = uint16(len(ids))
copy(((*[maxAllocSize]pgid)(unsafe.Pointer(&p.ptr)))[:], ids)
lenids := f.count()
if lenids == 0 {
p.count = uint16(lenids)
} else if lenids < 0xFFFF {
p.count = uint16(lenids)
f.copyall(((*[maxAllocSize]pgid)(unsafe.Pointer(&p.ptr)))[:])
} else {
p.count = 0xFFFF
((*[maxAllocSize]pgid)(unsafe.Pointer(&p.ptr)))[0] = pgid(len(ids))
copy(((*[maxAllocSize]pgid)(unsafe.Pointer(&p.ptr)))[1:], ids)
((*[maxAllocSize]pgid)(unsafe.Pointer(&p.ptr)))[0] = pgid(lenids)
f.copyall(((*[maxAllocSize]pgid)(unsafe.Pointer(&p.ptr)))[1:])
}
return nil
@ -230,7 +240,7 @@ func (f *freelist) reload(p *page) {
// reindex rebuilds the free cache based on available and pending free lists.
func (f *freelist) reindex() {
f.cache = make(map[pgid]bool)
f.cache = make(map[pgid]bool, len(f.ids))
for _, id := range f.ids {
f.cache[id] = true
}

View file

@ -201,6 +201,11 @@ func (n *node) write(p *page) {
}
p.count = uint16(len(n.inodes))
// Stop here if there are no items to write.
if p.count == 0 {
return
}
// Loop over each item and write it to the page.
b := (*[maxAllocSize]byte)(unsafe.Pointer(&p.ptr))[n.pageElementSize()*len(n.inodes):]
for i, item := range n.inodes {

View file

@ -62,6 +62,9 @@ func (p *page) leafPageElement(index uint16) *leafPageElement {
// leafPageElements retrieves a list of leaf nodes.
func (p *page) leafPageElements() []leafPageElement {
if p.count == 0 {
return nil
}
return ((*[0x7FFFFFF]leafPageElement)(unsafe.Pointer(&p.ptr)))[:]
}
@ -72,6 +75,9 @@ func (p *page) branchPageElement(index uint16) *branchPageElement {
// branchPageElements retrieves a list of branch nodes.
func (p *page) branchPageElements() []branchPageElement {
if p.count == 0 {
return nil
}
return ((*[0x7FFFFFF]branchPageElement)(unsafe.Pointer(&p.ptr)))[:]
}
@ -139,12 +145,33 @@ func (a pgids) merge(b pgids) pgids {
// Return the opposite slice if one is nil.
if len(a) == 0 {
return b
} else if len(b) == 0 {
}
if len(b) == 0 {
return a
}
merged := make(pgids, len(a)+len(b))
mergepgids(merged, a, b)
return merged
}
// Create a list to hold all elements from both lists.
merged := make(pgids, 0, len(a)+len(b))
// mergepgids copies the sorted union of a and b into dst.
// If dst is too small, it panics.
func mergepgids(dst, a, b pgids) {
if len(dst) < len(a)+len(b) {
panic(fmt.Errorf("mergepgids bad len %d < %d + %d", len(dst), len(a), len(b)))
}
// Copy in the opposite slice if one is nil.
if len(a) == 0 {
copy(dst, b)
return
}
if len(b) == 0 {
copy(dst, a)
return
}
// Merged will hold all elements from both lists.
merged := dst[:0]
// Assign lead to the slice with a lower starting value, follow to the higher value.
lead, follow := a, b
@ -166,7 +193,5 @@ func (a pgids) merge(b pgids) pgids {
}
// Append what's left in follow.
merged = append(merged, follow...)
return merged
_ = append(merged, follow...)
}

View file

@ -381,7 +381,9 @@ func (tx *Tx) Check() <-chan error {
func (tx *Tx) check(ch chan error) {
// Check if any pages are double freed.
freed := make(map[pgid]bool)
for _, id := range tx.db.freelist.all() {
all := make([]pgid, tx.db.freelist.count())
tx.db.freelist.copyall(all)
for _, id := range all {
if freed[id] {
ch <- fmt.Errorf("page %d: already freed", id)
}

60
vendor/github.com/cenk/backoff/context.go generated vendored Normal file
View file

@ -0,0 +1,60 @@
package backoff
import (
"time"
"golang.org/x/net/context"
)
// BackOffContext is a backoff policy that stops retrying after the context
// is canceled.
type BackOffContext interface {
BackOff
Context() context.Context
}
type backOffContext struct {
BackOff
ctx context.Context
}
// WithContext returns a BackOffContext with context ctx
//
// ctx must not be nil
func WithContext(b BackOff, ctx context.Context) BackOffContext {
if ctx == nil {
panic("nil context")
}
if b, ok := b.(*backOffContext); ok {
return &backOffContext{
BackOff: b.BackOff,
ctx: ctx,
}
}
return &backOffContext{
BackOff: b,
ctx: ctx,
}
}
func ensureContext(b BackOff) BackOffContext {
if cb, ok := b.(BackOffContext); ok {
return cb
}
return WithContext(b, context.Background())
}
func (b *backOffContext) Context() context.Context {
return b.ctx
}
func (b *backOffContext) NextBackOff() time.Duration {
select {
case <-b.Context().Done():
return Stop
default:
return b.BackOff.NextBackOff()
}
}

View file

@ -89,11 +89,6 @@ func NewExponentialBackOff() *ExponentialBackOff {
MaxElapsedTime: DefaultMaxElapsedTime,
Clock: SystemClock,
}
if b.RandomizationFactor < 0 {
b.RandomizationFactor = 0
} else if b.RandomizationFactor > 1 {
b.RandomizationFactor = 1
}
b.Reset()
return b
}

View file

@ -17,6 +17,9 @@ type Notify func(error, time.Duration)
// o is guaranteed to be run at least once.
// It is the caller's responsibility to reset b after Retry returns.
//
// If o returns a *PermanentError, the operation is not retried, and the
// wrapped error is returned.
//
// Retry sleeps the goroutine for the duration returned by BackOff after a
// failed operation returns.
func Retry(o Operation, b BackOff) error { return RetryNotify(o, b, nil) }
@ -27,12 +30,18 @@ func RetryNotify(operation Operation, b BackOff, notify Notify) error {
var err error
var next time.Duration
cb := ensureContext(b)
b.Reset()
for {
if err = operation(); err == nil {
return nil
}
if permanent, ok := err.(*PermanentError); ok {
return permanent.Err
}
if next = b.NextBackOff(); next == Stop {
return err
}
@ -41,6 +50,29 @@ func RetryNotify(operation Operation, b BackOff, notify Notify) error {
notify(err, next)
}
time.Sleep(next)
t := time.NewTimer(next)
select {
case <-cb.Context().Done():
t.Stop()
return err
case <-t.C:
}
}
}
// PermanentError signals that the operation should not be retried.
type PermanentError struct {
Err error
}
func (e *PermanentError) Error() string {
return e.Err.Error()
}
// Permanent wraps the given err in a *PermanentError.
func Permanent(err error) *PermanentError {
return &PermanentError{
Err: err,
}
}

View file

@ -13,7 +13,7 @@ import (
type Ticker struct {
C <-chan time.Time
c chan time.Time
b BackOff
b BackOffContext
stop chan struct{}
stopOnce sync.Once
}
@ -26,7 +26,7 @@ func NewTicker(b BackOff) *Ticker {
t := &Ticker{
C: c,
c: c,
b: b,
b: ensureContext(b),
stop: make(chan struct{}),
}
go t.run()
@ -58,6 +58,8 @@ func (t *Ticker) run() {
case <-t.stop:
t.c = nil // Prevent future ticks from being sent to the channel.
return
case <-t.b.Context().Done():
return
}
}
}

View file

@ -1,29 +1,80 @@
package negroni
import (
"bytes"
"log"
"net/http"
"os"
"text/template"
"time"
)
// LoggerEntry is the structure
// passed to the template.
type LoggerEntry struct {
StartTime string
Status int
Duration time.Duration
Hostname string
Method string
Path string
}
// LoggerDefaultFormat is the format
// logged used by the default Logger instance.
var LoggerDefaultFormat = "{{.StartTime}} | {{.Status}} | \t {{.Duration}} | {{.Hostname}} | {{.Method}} {{.Path}} \n"
// LoggerDefaultDateFormat is the
// format used for date by the
// default Logger instance.
var LoggerDefaultDateFormat = time.RFC3339
// ALogger interface
type ALogger interface {
Println(v ...interface{})
Printf(format string, v ...interface{})
}
// Logger is a middleware handler that logs the request as it goes in and the response as it goes out.
type Logger struct {
// Logger inherits from log.Logger used to log messages with the Logger middleware
*log.Logger
// ALogger implements just enough log.Logger interface to be compatible with other implementations
ALogger
dateFormat string
template *template.Template
}
// NewLogger returns a new Logger instance
func NewLogger() *Logger {
return &Logger{log.New(os.Stdout, "[negroni] ", 0)}
logger := &Logger{ALogger: log.New(os.Stdout, "[negroni] ", 0), dateFormat: LoggerDefaultDateFormat}
logger.SetFormat(LoggerDefaultFormat)
return logger
}
func (l *Logger) SetFormat(format string) {
l.template = template.Must(template.New("negroni_parser").Parse(format))
}
func (l *Logger) SetDateFormat(format string) {
l.dateFormat = format
}
func (l *Logger) ServeHTTP(rw http.ResponseWriter, r *http.Request, next http.HandlerFunc) {
start := time.Now()
l.Printf("Started %s %s", r.Method, r.URL.Path)
next(rw, r)
res := rw.(ResponseWriter)
l.Printf("Completed %v %s in %v", res.Status(), http.StatusText(res.Status()), time.Since(start))
log := LoggerEntry{
StartTime: start.Format(l.dateFormat),
Status: res.Status(),
Duration: time.Since(start),
Hostname: r.Host,
Method: r.Method,
Path: r.URL.Path,
}
buff := &bytes.Buffer{}
l.template.Execute(buff, log)
l.Printf(buff.String())
}

View file

@ -59,6 +59,14 @@ func New(handlers ...Handler) *Negroni {
}
}
// With returns a new Negroni instance that is a combination of the negroni
// receiver's handlers and the provided handlers.
func (n *Negroni) With(handlers ...Handler) *Negroni {
return New(
append(n.handlers, handlers...)...,
)
}
// Classic returns a new Negroni instance with the default middleware already
// in the stack.
//

View file

@ -11,7 +11,7 @@ import (
// Recovery is a Negroni middleware that recovers from any panics and writes a 500 if there was one.
type Recovery struct {
Logger *log.Logger
Logger ALogger
PrintStack bool
ErrorHandlerFunc func(interface{})
StackAll bool

View file

@ -29,9 +29,15 @@ type beforeFunc func(ResponseWriter)
// NewResponseWriter creates a ResponseWriter that wraps an http.ResponseWriter
func NewResponseWriter(rw http.ResponseWriter) ResponseWriter {
return &responseWriter{
nrw := &responseWriter{
ResponseWriter: rw,
}
if _, ok := rw.(http.CloseNotifier); ok {
return &responseWriterCloseNotifer{nrw}
}
return nrw
}
type responseWriter struct {
@ -81,10 +87,6 @@ func (rw *responseWriter) Hijack() (net.Conn, *bufio.ReadWriter, error) {
return hijacker.Hijack()
}
func (rw *responseWriter) CloseNotify() <-chan bool {
return rw.ResponseWriter.(http.CloseNotifier).CloseNotify()
}
func (rw *responseWriter) callBefore() {
for i := len(rw.beforeFuncs) - 1; i >= 0; i-- {
rw.beforeFuncs[i](rw)
@ -94,6 +96,18 @@ func (rw *responseWriter) callBefore() {
func (rw *responseWriter) Flush() {
flusher, ok := rw.ResponseWriter.(http.Flusher)
if ok {
if !rw.Written() {
// The status will be StatusOK if WriteHeader has not been called yet
rw.WriteHeader(http.StatusOK)
}
flusher.Flush()
}
}
type responseWriterCloseNotifer struct {
*responseWriter
}
func (rw *responseWriterCloseNotifer) CloseNotify() <-chan bool {
return rw.ResponseWriter.(http.CloseNotifier).CloseNotify()
}

63
vendor/github.com/coreos/etcd/pkg/fileutil/fileutil.go generated vendored Normal file
View file

@ -0,0 +1,63 @@
// Copyright 2015 CoreOS, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
// Package fileutil implements utility functions related to files and paths.
package fileutil
import (
"io/ioutil"
"os"
"path"
"sort"
"github.com/coreos/pkg/capnslog"
)
const (
privateFileMode = 0600
)
var (
plog = capnslog.NewPackageLogger("github.com/coreos/etcd/pkg", "fileutil")
)
// IsDirWriteable checks if dir is writable by writing and removing a file
// to dir. It returns nil if dir is writable.
func IsDirWriteable(dir string) error {
f := path.Join(dir, ".touch")
if err := ioutil.WriteFile(f, []byte(""), privateFileMode); err != nil {
return err
}
return os.Remove(f)
}
// ReadDir returns the filenames in the given directory in sorted order.
func ReadDir(dirpath string) ([]string, error) {
dir, err := os.Open(dirpath)
if err != nil {
return nil, err
}
defer dir.Close()
names, err := dir.Readdirnames(-1)
if err != nil {
return nil, err
}
sort.Strings(names)
return names, nil
}
func Exist(name string) bool {
_, err := os.Stat(name)
return err == nil
}

View file

@ -0,0 +1,90 @@
// Copyright 2015 CoreOS, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package fileutil
import (
"errors"
"os"
"syscall"
"time"
)
var (
ErrLocked = errors.New("file already locked")
)
type Lock interface {
Name() string
TryLock() error
Lock() error
Unlock() error
Destroy() error
}
type lock struct {
fname string
file *os.File
}
func (l *lock) Name() string {
return l.fname
}
// TryLock acquires exclusivity on the lock without blocking
func (l *lock) TryLock() error {
err := os.Chmod(l.fname, syscall.DMEXCL|0600)
if err != nil {
return err
}
f, err := os.Open(l.fname)
if err != nil {
return ErrLocked
}
l.file = f
return nil
}
// Lock acquires exclusivity on the lock with blocking
func (l *lock) Lock() error {
err := os.Chmod(l.fname, syscall.DMEXCL|0600)
if err != nil {
return err
}
for {
f, err := os.Open(l.fname)
if err == nil {
l.file = f
return nil
}
time.Sleep(10 * time.Millisecond)
}
}
// Unlock unlocks the lock
func (l *lock) Unlock() error {
return l.file.Close()
}
func (l *lock) Destroy() error {
return nil
}
func NewLock(file string) (Lock, error) {
l := &lock{fname: file}
return l, nil
}

View file

@ -0,0 +1,98 @@
// Copyright 2015 CoreOS, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
// +build solaris
package fileutil
import (
"errors"
"os"
"syscall"
)
var (
ErrLocked = errors.New("file already locked")
)
type Lock interface {
Name() string
TryLock() error
Lock() error
Unlock() error
Destroy() error
}
type lock struct {
fd int
file *os.File
}
func (l *lock) Name() string {
return l.file.Name()
}
// TryLock acquires exclusivity on the lock without blocking
func (l *lock) TryLock() error {
var lock syscall.Flock_t
lock.Start = 0
lock.Len = 0
lock.Pid = 0
lock.Type = syscall.F_WRLCK
lock.Whence = 0
lock.Pid = 0
err := syscall.FcntlFlock(uintptr(l.fd), syscall.F_SETLK, &lock)
if err != nil && err == syscall.EAGAIN {
return ErrLocked
}
return err
}
// Lock acquires exclusivity on the lock without blocking
func (l *lock) Lock() error {
var lock syscall.Flock_t
lock.Start = 0
lock.Len = 0
lock.Type = syscall.F_WRLCK
lock.Whence = 0
lock.Pid = 0
return syscall.FcntlFlock(uintptr(l.fd), syscall.F_SETLK, &lock)
}
// Unlock unlocks the lock
func (l *lock) Unlock() error {
var lock syscall.Flock_t
lock.Start = 0
lock.Len = 0
lock.Type = syscall.F_UNLCK
lock.Whence = 0
err := syscall.FcntlFlock(uintptr(l.fd), syscall.F_SETLK, &lock)
if err != nil && err == syscall.EAGAIN {
return ErrLocked
}
return err
}
func (l *lock) Destroy() error {
return l.file.Close()
}
func NewLock(file string) (Lock, error) {
f, err := os.OpenFile(file, os.O_WRONLY, 0600)
if err != nil {
return nil, err
}
l := &lock{int(f.Fd()), f}
return l, nil
}

View file

@ -0,0 +1,76 @@
// Copyright 2015 CoreOS, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
// +build !windows,!plan9,!solaris
package fileutil
import (
"errors"
"os"
"syscall"
)
var (
ErrLocked = errors.New("file already locked")
)
type Lock interface {
Name() string
TryLock() error
Lock() error
Unlock() error
Destroy() error
}
type lock struct {
fd int
file *os.File
}
func (l *lock) Name() string {
return l.file.Name()
}
// TryLock acquires exclusivity on the lock without blocking
func (l *lock) TryLock() error {
err := syscall.Flock(l.fd, syscall.LOCK_EX|syscall.LOCK_NB)
if err != nil && err == syscall.EWOULDBLOCK {
return ErrLocked
}
return err
}
// Lock acquires exclusivity on the lock without blocking
func (l *lock) Lock() error {
return syscall.Flock(l.fd, syscall.LOCK_EX)
}
// Unlock unlocks the lock
func (l *lock) Unlock() error {
return syscall.Flock(l.fd, syscall.LOCK_UN)
}
func (l *lock) Destroy() error {
return l.file.Close()
}
func NewLock(file string) (Lock, error) {
f, err := os.Open(file)
if err != nil {
return nil, err
}
l := &lock{int(f.Fd()), f}
return l, nil
}

View file

@ -0,0 +1,71 @@
// Copyright 2015 CoreOS, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
// +build windows
package fileutil
import (
"errors"
"os"
)
var (
ErrLocked = errors.New("file already locked")
)
type Lock interface {
Name() string
TryLock() error
Lock() error
Unlock() error
Destroy() error
}
type lock struct {
fd int
file *os.File
}
func (l *lock) Name() string {
return l.file.Name()
}
// TryLock acquires exclusivity on the lock without blocking
func (l *lock) TryLock() error {
return nil
}
// Lock acquires exclusivity on the lock without blocking
func (l *lock) Lock() error {
return nil
}
// Unlock unlocks the lock
func (l *lock) Unlock() error {
return nil
}
func (l *lock) Destroy() error {
return l.file.Close()
}
func NewLock(file string) (Lock, error) {
f, err := os.Open(file)
if err != nil {
return nil, err
}
l := &lock{int(f.Fd()), f}
return l, nil
}

View file

@ -0,0 +1,28 @@
// Copyright 2015 CoreOS, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
// +build !linux
package fileutil
import "os"
// Preallocate tries to allocate the space for given
// file. This operation is only supported on linux by a
// few filesystems (btrfs, ext4, etc.).
// If the operation is unsupported, no error will be returned.
// Otherwise, the error encountered will be returned.
func Preallocate(f *os.File, sizeInBytes int) error {
return nil
}

View file

@ -0,0 +1,42 @@
// Copyright 2015 CoreOS, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
// +build linux
package fileutil
import (
"os"
"syscall"
)
// Preallocate tries to allocate the space for given
// file. This operation is only supported on linux by a
// few filesystems (btrfs, ext4, etc.).
// If the operation is unsupported, no error will be returned.
// Otherwise, the error encountered will be returned.
func Preallocate(f *os.File, sizeInBytes int) error {
// use mode = 1 to keep size
// see FALLOC_FL_KEEP_SIZE
err := syscall.Fallocate(int(f.Fd()), 1, 0, int64(sizeInBytes))
if err != nil {
errno, ok := err.(syscall.Errno)
// treat not support as nil error
if ok && errno == syscall.ENOTSUP {
return nil
}
return err
}
return nil
}

80
vendor/github.com/coreos/etcd/pkg/fileutil/purge.go generated vendored Normal file
View file

@ -0,0 +1,80 @@
// Copyright 2015 CoreOS, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package fileutil
import (
"os"
"path"
"sort"
"strings"
"time"
)
func PurgeFile(dirname string, suffix string, max uint, interval time.Duration, stop <-chan struct{}) <-chan error {
errC := make(chan error, 1)
go func() {
for {
fnames, err := ReadDir(dirname)
if err != nil {
errC <- err
return
}
newfnames := make([]string, 0)
for _, fname := range fnames {
if strings.HasSuffix(fname, suffix) {
newfnames = append(newfnames, fname)
}
}
sort.Strings(newfnames)
for len(newfnames) > int(max) {
f := path.Join(dirname, newfnames[0])
l, err := NewLock(f)
if err != nil {
errC <- err
return
}
err = l.TryLock()
if err != nil {
break
}
err = os.Remove(f)
if err != nil {
errC <- err
return
}
err = l.Unlock()
if err != nil {
plog.Errorf("error unlocking %s when purging file (%v)", l.Name(), err)
errC <- err
return
}
err = l.Destroy()
if err != nil {
plog.Errorf("error destroying lock %s when purging file (%v)", l.Name(), err)
errC <- err
return
}
plog.Infof("purged file %s successfully", f)
newfnames = newfnames[1:]
}
select {
case <-time.After(interval):
case <-stop:
return
}
}
}()
return errC
}

91
vendor/github.com/coreos/etcd/version/version.go generated vendored Normal file
View file

@ -0,0 +1,91 @@
// Copyright 2015 CoreOS, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
// Package version implements etcd version parsing and contains latest version
// information.
package version
import (
"fmt"
"os"
"path"
"strings"
"github.com/coreos/etcd/pkg/fileutil"
"github.com/coreos/etcd/pkg/types"
)
var (
// MinClusterVersion is the min cluster version this etcd binary is compatible with.
MinClusterVersion = "2.2.0"
Version = "2.3.0-alpha.0+git"
// Git SHA Value will be set during build
GitSHA = "Not provided (use ./build instead of go build)"
)
// WalVersion is an enum for versions of etcd logs.
type DataDirVersion string
const (
DataDirUnknown DataDirVersion = "Unknown WAL"
DataDir2_0 DataDirVersion = "2.0.0"
DataDir2_0Proxy DataDirVersion = "2.0 proxy"
DataDir2_0_1 DataDirVersion = "2.0.1"
)
type Versions struct {
Server string `json:"etcdserver"`
Cluster string `json:"etcdcluster"`
// TODO: raft state machine version
}
func DetectDataDir(dirpath string) (DataDirVersion, error) {
names, err := fileutil.ReadDir(dirpath)
if err != nil {
if os.IsNotExist(err) {
err = nil
}
// Error reading the directory
return DataDirUnknown, err
}
nameSet := types.NewUnsafeSet(names...)
if nameSet.Contains("member") {
ver, err := DetectDataDir(path.Join(dirpath, "member"))
if ver == DataDir2_0 {
return DataDir2_0_1, nil
}
return ver, err
}
if nameSet.ContainsAll([]string{"snap", "wal"}) {
// .../wal cannot be empty to exist.
walnames, err := fileutil.ReadDir(path.Join(dirpath, "wal"))
if err == nil && len(walnames) > 0 {
return DataDir2_0, nil
}
}
if nameSet.ContainsAll([]string{"proxy"}) {
return DataDir2_0Proxy, nil
}
return DataDirUnknown, nil
}
// Cluster only keeps the major.minor.
func Cluster(v string) string {
vs := strings.Split(v, ".")
if len(vs) <= 2 {
return v
}
return fmt.Sprintf("%s.%s", vs[0], vs[1])
}

View file

@ -67,6 +67,15 @@ type ConfigState struct {
// Google App Engine or with the "safe" build tag specified.
DisablePointerMethods bool
// DisablePointerAddresses specifies whether to disable the printing of
// pointer addresses. This is useful when diffing data structures in tests.
DisablePointerAddresses bool
// DisableCapacities specifies whether to disable the printing of capacities
// for arrays, slices, maps and channels. This is useful when diffing
// data structures in tests.
DisableCapacities bool
// ContinueOnMethod specifies whether or not recursion should continue once
// a custom error or Stringer interface is invoked. The default, false,
// means it will print the results of invoking the custom error or Stringer

View file

@ -129,7 +129,7 @@ func (d *dumpState) dumpPtr(v reflect.Value) {
d.w.Write(closeParenBytes)
// Display pointer information.
if len(pointerChain) > 0 {
if !d.cs.DisablePointerAddresses && len(pointerChain) > 0 {
d.w.Write(openParenBytes)
for i, addr := range pointerChain {
if i > 0 {
@ -282,13 +282,13 @@ func (d *dumpState) dump(v reflect.Value) {
case reflect.Map, reflect.String:
valueLen = v.Len()
}
if valueLen != 0 || valueCap != 0 {
if valueLen != 0 || !d.cs.DisableCapacities && valueCap != 0 {
d.w.Write(openParenBytes)
if valueLen != 0 {
d.w.Write(lenEqualsBytes)
printInt(d.w, int64(valueLen), 10)
}
if valueCap != 0 {
if !d.cs.DisableCapacities && valueCap != 0 {
if valueLen != 0 {
d.w.Write(spaceBytes)
}

View file

@ -1,27 +0,0 @@
Copyright (c) 2015, David Deng
All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
* Redistributions of source code must retain the above copyright notice, this
list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above copyright notice,
this list of conditions and the following disclaimer in the documentation
and/or other materials provided with the distribution.
* Neither the name of go-colortext nor the names of its
contributors may be used to endorse or promote products derived from
this software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.

View file

@ -1,47 +0,0 @@
/*
ct package provides functions to change the color of console text.
Under windows platform, the Console api is used. Under other systems, ANSI text mode is used.
*/
package ct
// Color is the type of color to be set.
type Color int
const (
// No change of color
None = Color(iota)
Black
Red
Green
Yellow
Blue
Magenta
Cyan
White
)
/*
ResetColor resets the foreground and background to original colors
*/
func ResetColor() {
resetColor()
}
// ChangeColor sets the foreground and background colors. If the value of the color is None,
// the corresponding color keeps unchanged.
// If fgBright or bgBright is set true, corresponding color use bright color. bgBright may be
// ignored in some OS environment.
func ChangeColor(fg Color, fgBright bool, bg Color, bgBright bool) {
changeColor(fg, fgBright, bg, bgBright)
}
// Foreground changes the foreground color.
func Foreground(cl Color, bright bool) {
ChangeColor(cl, bright, None, false)
}
// Background changes the background color.
func Background(cl Color, bright bool) {
ChangeColor(None, false, cl, bright)
}

View file

@ -1,35 +0,0 @@
// +build !windows
package ct
import (
"fmt"
)
func resetColor() {
fmt.Print("\x1b[0m")
}
func changeColor(fg Color, fgBright bool, bg Color, bgBright bool) {
if fg == None && bg == None {
return
} // if
s := ""
if fg != None {
s = fmt.Sprintf("%s%d", s, 30+(int)(fg-Black))
if fgBright {
s += ";1"
} // if
} // if
if bg != None {
if s != "" {
s += ";"
} // if
s = fmt.Sprintf("%s%d", s, 40+(int)(bg-Black))
} // if
s = "\x1b[0;" + s + "m"
fmt.Print(s)
}

View file

@ -1,139 +0,0 @@
// +build windows
package ct
import (
"syscall"
"unsafe"
)
var fg_colors = []uint16{
0,
0,
foreground_red,
foreground_green,
foreground_red | foreground_green,
foreground_blue,
foreground_red | foreground_blue,
foreground_green | foreground_blue,
foreground_red | foreground_green | foreground_blue}
var bg_colors = []uint16{
0,
0,
background_red,
background_green,
background_red | background_green,
background_blue,
background_red | background_blue,
background_green | background_blue,
background_red | background_green | background_blue}
const (
foreground_blue = uint16(0x0001)
foreground_green = uint16(0x0002)
foreground_red = uint16(0x0004)
foreground_intensity = uint16(0x0008)
background_blue = uint16(0x0010)
background_green = uint16(0x0020)
background_red = uint16(0x0040)
background_intensity = uint16(0x0080)
foreground_mask = foreground_blue | foreground_green | foreground_red | foreground_intensity
background_mask = background_blue | background_green | background_red | background_intensity
)
var (
kernel32 = syscall.NewLazyDLL("kernel32.dll")
procGetStdHandle = kernel32.NewProc("GetStdHandle")
procSetConsoleTextAttribute = kernel32.NewProc("SetConsoleTextAttribute")
procGetConsoleScreenBufferInfo = kernel32.NewProc("GetConsoleScreenBufferInfo")
hStdout uintptr
initScreenInfo *console_screen_buffer_info
)
func setConsoleTextAttribute(hConsoleOutput uintptr, wAttributes uint16) bool {
ret, _, _ := procSetConsoleTextAttribute.Call(
hConsoleOutput,
uintptr(wAttributes))
return ret != 0
}
type coord struct {
X, Y int16
}
type small_rect struct {
Left, Top, Right, Bottom int16
}
type console_screen_buffer_info struct {
DwSize coord
DwCursorPosition coord
WAttributes uint16
SrWindow small_rect
DwMaximumWindowSize coord
}
func getConsoleScreenBufferInfo(hConsoleOutput uintptr) *console_screen_buffer_info {
var csbi console_screen_buffer_info
ret, _, _ := procGetConsoleScreenBufferInfo.Call(
hConsoleOutput,
uintptr(unsafe.Pointer(&csbi)))
if ret == 0 {
return nil
}
return &csbi
}
const (
std_output_handle = uint32(-11 & 0xFFFFFFFF)
)
func init() {
kernel32 := syscall.NewLazyDLL("kernel32.dll")
procGetStdHandle = kernel32.NewProc("GetStdHandle")
hStdout, _, _ = procGetStdHandle.Call(uintptr(std_output_handle))
initScreenInfo = getConsoleScreenBufferInfo(hStdout)
syscall.LoadDLL("")
}
func resetColor() {
if initScreenInfo == nil { // No console info - Ex: stdout redirection
return
}
setConsoleTextAttribute(hStdout, initScreenInfo.WAttributes)
}
func changeColor(fg Color, fgBright bool, bg Color, bgBright bool) {
attr := uint16(0)
if fg == None || bg == None {
cbufinfo := getConsoleScreenBufferInfo(hStdout)
if cbufinfo == nil { // No console info - Ex: stdout redirection
return
}
attr = getConsoleScreenBufferInfo(hStdout).WAttributes
} // if
if fg != None {
attr = attr & ^foreground_mask | fg_colors[fg]
if fgBright {
attr |= foreground_intensity
} // if
} // if
if bg != None {
attr = attr & ^background_mask | bg_colors[bg]
if bgBright {
attr |= background_intensity
} // if
} // if
setConsoleTextAttribute(hStdout, attr)
}

21
vendor/github.com/decker502/dnspod-go/LICENSE generated vendored Normal file
View file

@ -0,0 +1,21 @@
MIT License
Copyright (c) 2017 decker
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.

View file

@ -51,9 +51,13 @@ func (e ValidationError) Error() string {
} else {
return "token is invalid"
}
return e.Inner.Error()
}
// No errors
func (e *ValidationError) valid() bool {
return e.Errors == 0
if e.Errors > 0 {
return false
}
return true
}

View file

@ -8,9 +8,8 @@ import (
)
type Parser struct {
ValidMethods []string // If populated, only these methods will be considered valid
UseJSONNumber bool // Use JSON Number format in JSON decoder
SkipClaimsValidation bool // Skip claims validation during token parsing
ValidMethods []string // If populated, only these methods will be considered valid
UseJSONNumber bool // Use JSON Number format in JSON decoder
}
// Parse, validate, and return a token.
@ -102,16 +101,14 @@ func (p *Parser) ParseWithClaims(tokenString string, claims Claims, keyFunc Keyf
vErr := &ValidationError{}
// Validate Claims
if !p.SkipClaimsValidation {
if err := token.Claims.Valid(); err != nil {
if err := token.Claims.Valid(); err != nil {
// If the Claims Valid returned an error, check if it is a validation error,
// If it was another error type, create a ValidationError with a generic ClaimsInvalid flag set
if e, ok := err.(*ValidationError); !ok {
vErr = &ValidationError{Inner: err, Errors: ValidationErrorClaimsInvalid}
} else {
vErr = e
}
// If the Claims Valid returned an error, check if it is a validation error,
// If it was another error type, create a ValidationError with a generic ClaimsInvalid flag set
if e, ok := err.(*ValidationError); !ok {
vErr = &ValidationError{Inner: err, Errors: ValidationErrorClaimsInvalid}
} else {
vErr = e
}
}

View file

@ -59,6 +59,7 @@ func New(endpoints []string, options *store.Config) (store.Store, error) {
db *bolt.DB
err error
boltOptions *bolt.Options
timeout = transientTimeout
)
if len(endpoints) > 1 {
@ -82,11 +83,15 @@ func New(endpoints []string, options *store.Config) (store.Store, error) {
}
}
if options.ConnectionTimeout != 0 {
timeout = options.ConnectionTimeout
}
b := &BoltDB{
client: db,
path: endpoints[0],
boltBucket: []byte(options.Bucket),
timeout: transientTimeout,
timeout: timeout,
PersistConnection: options.PersistConnection,
}

View file

@ -252,7 +252,7 @@ func (s *Zookeeper) List(directory string) ([]*store.KVPair, error) {
pair, err := s.Get(strings.TrimSuffix(directory, "/") + s.normalize(key))
if err != nil {
// If node is not found: List is out of date, retry
if err == zk.ErrNoNode {
if err == store.ErrKeyNotFound {
return s.List(directory)
}
return nil, err

View file

@ -9,6 +9,7 @@ import (
"os"
"path"
"path/filepath"
"strings"
"time"
)
@ -145,14 +146,22 @@ func (fs *AssetFS) Open(name string) (http.File, error) {
}
if b, err := fs.Asset(name); err == nil {
timestamp := defaultFileTimestamp
if info, err := fs.AssetInfo(name); err == nil {
timestamp = info.ModTime()
if fs.AssetInfo != nil {
if info, err := fs.AssetInfo(name); err == nil {
timestamp = info.ModTime()
}
}
return NewAssetFile(name, b, timestamp), nil
}
if children, err := fs.AssetDir(name); err == nil {
return NewAssetDirectory(name, children, fs), nil
} else {
// If the error is not found, return an error that will
// result in a 404 error. Otherwise the server returns
// a 500 error for files not found.
if strings.Contains(err.Error(), "not found") {
return nil, os.ErrNotExist
}
return nil, err
}
}

20
vendor/github.com/fatih/color/LICENSE.md generated vendored Normal file
View file

@ -0,0 +1,20 @@
The MIT License (MIT)
Copyright (c) 2013 Fatih Arslan
Permission is hereby granted, free of charge, to any person obtaining a copy of
this software and associated documentation files (the "Software"), to deal in
the Software without restriction, including without limitation the rights to
use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of
the Software, and to permit persons to whom the Software is furnished to do so,
subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS
FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR
COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.

Some files were not shown because too many files have changed in this diff Show more